diff --git a/Cargo.lock b/Cargo.lock index b331a798b1..ea6441f797 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -275,7 +275,7 @@ dependencies = [ "async-lock", "async-task", "concurrent-queue", - "fastrand", + "fastrand 1.9.0", "futures-lite", "slab", ] @@ -575,7 +575,7 @@ dependencies = [ "async-lock", "async-task", "atomic-waker", - "fastrand", + "fastrand 1.9.0", "futures-lite", "log", ] @@ -1092,26 +1092,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] -name = "env_filter" -version = "0.1.0" +name = "encoding_rs" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ - "log", - "regex", -] - -[[package]] -name = "env_logger" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "humantime", - "log", + "cfg-if 1.0.0", ] [[package]] @@ -1142,13 +1128,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.3" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ - "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1197,6 +1182,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" + [[package]] name = "femme" version = "2.2.1" @@ -1247,6 +1238,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.0" @@ -1320,7 +1326,7 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", @@ -1454,6 +1460,25 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "1.8.2" @@ -1539,6 +1564,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http" version = "1.0.0" @@ -1550,6 +1586,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite 0.2.13", +] + [[package]] name = "http-client" version = "6.5.3" @@ -1590,12 +1637,55 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "humantime" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite 0.2.13", + "socket2 0.5.6", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -1674,6 +1764,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + [[package]] name = "ipnetwork" version = "0.20.0" @@ -1690,7 +1786,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.13", + "rustix 0.38.32", "windows-sys 0.48.0", ] @@ -1832,9 +1928,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -1856,6 +1952,16 @@ dependencies = [ "value-bag", ] +[[package]] +name = "loki-api" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f56d36f573486ba7f462b62cbae597fef7d5d93665e7047956b457531b8a1ced" +dependencies = [ + "prost", + "prost-types", +] + [[package]] name = "lz4_flex" version = "0.11.1" @@ -1883,6 +1989,15 @@ dependencies = [ "libc", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "memchr" version = "2.6.3" @@ -1916,6 +2031,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1965,6 +2086,24 @@ dependencies = [ "getrandom 0.2.10", ] +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" version = "0.23.2" @@ -2019,6 +2158,16 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num" version = "0.4.1" @@ -2156,12 +2305,50 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +dependencies = [ + "bitflags 2.4.0", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.33", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "option-ext" version = "0.2.0" @@ -2177,6 +2364,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "parking" version = "2.1.0" @@ -2351,6 +2544,12 @@ dependencies = [ "spki", ] +[[package]] +name = "pkg-config" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + [[package]] name = "plotters" version = "0.3.5" @@ -2459,6 +2658,38 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" +dependencies = [ + "prost", +] + [[package]] name = "quinn" version = "0.10.2" @@ -2658,8 +2889,17 @@ checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.3.8", + "regex-syntax 0.7.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -2670,15 +2910,61 @@ checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.4", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite 0.2.13", + "rustls-pemfile 1.0.3", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.20" @@ -2783,7 +3069,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" dependencies = [ "bitflags 1.3.2", - "errno 0.3.3", + "errno 0.3.8", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", @@ -2792,15 +3078,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ "bitflags 2.4.0", - "errno 0.3.3", + "errno 0.3.8", "libc", - "linux-raw-sys 0.4.7", - "windows-sys 0.48.0", + "linux-raw-sys 0.4.13", + "windows-sys 0.52.0", ] [[package]] @@ -3182,6 +3468,15 @@ dependencies = [ "keccak", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shared_memory" version = "0.12.4" @@ -3257,6 +3552,12 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + [[package]] name = "socket2" version = "0.4.9" @@ -3480,6 +3781,45 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand 2.0.2", + "rustix 0.38.32", + "windows-sys 0.52.0", +] + [[package]] name = "thiserror" version = "1.0.48" @@ -3500,6 +3840,16 @@ dependencies = [ "syn 2.0.33", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", +] + [[package]] name = "tide" version = "0.16.0" @@ -3650,6 +4000,16 @@ dependencies = [ "syn 2.0.33", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -3684,6 +4044,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.13", + "tokio", +] + [[package]] name = "tokio-tungstenite" version = "0.21.0" @@ -3709,6 +4080,7 @@ dependencies = [ "hashbrown 0.14.0", "pin-project-lite 0.2.13", "tokio", + "tracing", ] [[package]] @@ -3724,6 +4096,12 @@ dependencies = [ "vsock", ] +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + [[package]] name = "tracing" version = "0.1.37" @@ -3755,8 +4133,89 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-loki" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49bbc87d08020d7c2a9f4bb0b7d10da5381d3867f8ae57fcc54621b34567e963" +dependencies = [ + "loki-api", + "reqwest", + "serde", + "serde_json", + "snap", + "tokio", + "tokio-stream", + "tracing", + "tracing-core", + "tracing-log 0.1.4", + "tracing-serde", + "tracing-subscriber", + "url", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log 0.2.0", + "tracing-serde", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + [[package]] name = "tungstenite" version = "0.21.0" @@ -3766,7 +4225,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 1.0.0", "httparse", "log", "rand 0.8.5", @@ -3951,6 +4410,12 @@ dependencies = [ "unzip-n", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "value-bag" version = "1.4.1" @@ -3987,6 +4452,12 @@ dependencies = [ "sval_serde", ] +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vec_map" version = "0.8.2" @@ -4025,6 +4496,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -4348,6 +4828,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if 1.0.0", + "windows-sys 0.48.0", +] + [[package]] name = "yasna" version = "0.5.2" @@ -4377,14 +4867,12 @@ dependencies = [ "async-trait", "base64 0.21.4", "const_format", - "env_logger", "event-listener 4.0.0", "flume", "form_urlencoded", "futures", "git-version", "lazy_static", - "log", "ordered-float", "paste", "petgraph", @@ -4397,6 +4885,7 @@ dependencies = [ "stop-token", "tokio", "tokio-util", + "tracing", "uhlc", "uuid", "vec_map", @@ -4429,8 +4918,8 @@ dependencies = [ "const_format", "futures", "git-version", - "log", "serde_json", + "tracing", "zenoh", "zenoh-core", "zenoh-plugin-trait", @@ -4452,14 +4941,14 @@ name = "zenoh-codec" version = "0.11.0-dev" dependencies = [ "criterion", - "env_logger", - "log", "rand 0.8.5", "serde", + "tracing", "uhlc", "zenoh-buffers", "zenoh-protocol", "zenoh-shm", + "zenoh-util", ] [[package]] @@ -4472,12 +4961,12 @@ version = "0.11.0-dev" dependencies = [ "flume", "json5", - "log", "num_cpus", "secrecy", "serde", "serde_json", "serde_yaml", + "tracing", "validated_struct", "zenoh-core", "zenoh-protocol", @@ -4513,17 +5002,17 @@ name = "zenoh-examples" version = "0.11.0-dev" dependencies = [ "clap", - "env_logger", "flume", "futures", "git-version", "json5", - "log", "rand 0.8.5", "rustc_version 0.4.0", "tokio", + "tracing", "zenoh", "zenoh-ext", + "zenoh-util", ] [[package]] @@ -4532,12 +5021,11 @@ version = "0.11.0-dev" dependencies = [ "bincode", "clap", - "env_logger", "flume", "futures", - "log", "serde", "tokio", + "tracing", "zenoh", "zenoh-core", "zenoh-macros", @@ -4592,12 +5080,12 @@ dependencies = [ "async-trait", "flume", "futures", - "log", "rustls 0.22.2", "rustls-webpki 0.102.2", "serde", "tokio", "tokio-util", + "tracing", "zenoh-buffers", "zenoh-codec", "zenoh-core", @@ -4614,7 +5102,6 @@ dependencies = [ "async-trait", "base64 0.21.4", "futures", - "log", "quinn", "rustls 0.21.7", "rustls-native-certs 0.7.0", @@ -4624,6 +5111,7 @@ dependencies = [ "tokio", "tokio-rustls 0.24.1", "tokio-util", + "tracing", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -4640,9 +5128,9 @@ version = "0.11.0-dev" dependencies = [ "async-trait", "futures", - "log", "tokio", "tokio-util", + "tracing", "uuid", "z-serial", "zenoh-collections", @@ -4660,9 +5148,9 @@ name = "zenoh-link-tcp" version = "0.11.0-dev" dependencies = [ "async-trait", - "log", "tokio", "tokio-util", + "tracing", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", @@ -4679,7 +5167,6 @@ dependencies = [ "async-trait", "base64 0.21.4", "futures", - "log", "rustls 0.22.2", "rustls-pemfile 2.0.0", "rustls-pki-types", @@ -4688,6 +5175,7 @@ dependencies = [ "tokio", "tokio-rustls 0.25.0", "tokio-util", + "tracing", "webpki-roots", "zenoh-config", "zenoh-core", @@ -4704,10 +5192,10 @@ name = "zenoh-link-udp" version = "0.11.0-dev" dependencies = [ "async-trait", - "log", "socket2 0.5.6", "tokio", "tokio-util", + "tracing", "zenoh-buffers", "zenoh-collections", "zenoh-core", @@ -4726,11 +5214,11 @@ dependencies = [ "advisory-lock", "async-trait", "filepath", - "log", "nix 0.27.1", "rand 0.8.5", "tokio", "tokio-util", + "tracing", "unix-named-pipe", "zenoh-buffers", "zenoh-config", @@ -4747,10 +5235,10 @@ version = "0.11.0-dev" dependencies = [ "async-trait", "futures", - "log", "nix 0.27.1", "tokio", "tokio-util", + "tracing", "uuid", "zenoh-core", "zenoh-link-commons", @@ -4766,10 +5254,10 @@ version = "0.11.0-dev" dependencies = [ "async-trait", "libc", - "log", "tokio", "tokio-util", "tokio-vsock", + "tracing", "zenoh-core", "zenoh-link-commons", "zenoh-protocol", @@ -4785,10 +5273,10 @@ version = "0.11.0-dev" dependencies = [ "async-trait", "futures-util", - "log", "tokio", "tokio-tungstenite", "tokio-util", + "tracing", "url", "zenoh-core", "zenoh-link-commons", @@ -4815,11 +5303,10 @@ version = "0.11.0-dev" dependencies = [ "async-std", "const_format", - "env_logger", "futures", "git-version", - "log", "serde_json", + "tracing", "zenoh", "zenoh-core", "zenoh-plugin-trait", @@ -4836,19 +5323,18 @@ dependencies = [ "base64 0.21.4", "clap", "const_format", - "env_logger", "flume", "futures", "git-version", "http-types", "jsonschema", "lazy_static", - "log", "rustc_version 0.4.0", "schemars", "serde", "serde_json", "tide", + "tracing", "zenoh", "zenoh-plugin-trait", "zenoh-result", @@ -4865,17 +5351,16 @@ dependencies = [ "const_format", "crc", "derive-new", - "env_logger", "flume", "futures", "git-version", "jsonschema", "libloading", - "log", "rustc_version 0.4.0", "schemars", "serde", "serde_json", + "tracing", "urlencoding", "zenoh", "zenoh-collections", @@ -4893,9 +5378,9 @@ version = "0.11.0-dev" dependencies = [ "const_format", "libloading", - "log", "serde", "serde_json", + "tracing", "zenoh-keyexpr", "zenoh-macros", "zenoh-result", @@ -4938,9 +5423,9 @@ dependencies = [ name = "zenoh-shm" version = "0.11.0-dev" dependencies = [ - "log", "serde", "shared_memory", + "tracing", "zenoh-buffers", "zenoh-result", ] @@ -4964,9 +5449,9 @@ name = "zenoh-task" version = "0.11.0-dev" dependencies = [ "futures", - "log", "tokio", "tokio-util", + "tracing", "zenoh-core", "zenoh-runtime", ] @@ -4976,11 +5461,9 @@ name = "zenoh-transport" version = "0.11.0-dev" dependencies = [ "async-trait", - "env_logger", "flume", "futures", "futures-util", - "log", "lz4_flex", "paste", "rand 0.8.5", @@ -4990,6 +5473,7 @@ dependencies = [ "sha3", "tokio", "tokio-util", + "tracing", "zenoh-buffers", "zenoh-codec", "zenoh-collections", @@ -5018,10 +5502,11 @@ dependencies = [ "lazy_static", "libc", "libloading", - "log", "pnet_datalink", "shellexpand", "tokio", + "tracing", + "tracing-subscriber", "winapi", "zenoh-core", "zenoh-result", @@ -5048,16 +5533,19 @@ name = "zenohd" version = "0.11.0-dev" dependencies = [ "clap", - "env_logger", "futures", "git-version", "json5", "lazy_static", - "log", "rand 0.8.5", "rustc_version 0.4.0", "tokio", + "tracing", + "tracing-loki", + "tracing-subscriber", + "url", "zenoh", + "zenoh-util", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index da99cb1fdc..b82735b72a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,8 @@ crc = "3.0.1" criterion = "0.5" derive_more = "0.99.17" derive-new = "0.6.0" -env_logger = "0.11.0" +tracing-subscriber = {version = "0.3", features = ["json", "env-filter"]} +tracing-loki = "0.2" event-listener = "4.0.0" flume = "0.11" form_urlencoded = "1.1.0" @@ -109,7 +110,7 @@ keyed-set = "0.4.4" lazy_static = "1.4.0" libc = "0.2.139" libloading = "0.8" -log = "0.4.17" +tracing = "0.1" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } num_cpus = "1.15.0" diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml index cf6f6a844b..72e7f6473a 100644 --- a/ci/valgrind-check/Cargo.toml +++ b/ci/valgrind-check/Cargo.toml @@ -23,10 +23,11 @@ description = "Internal crate for zenoh." [dependencies] tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } -env_logger = "0.11.0" +tracing-subscriber = {version = "0.3", features = ["json", "env-filter"]} futures = "0.3.25" zenoh = { path = "../../zenoh/" } zenoh-runtime = { path = "../../commons/zenoh-runtime/" } +zenoh-util = { path = "../../commons/zenoh-util/" } [[bin]] name = "pub_sub" diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index fac3437f39..a5fabcc705 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -14,11 +14,13 @@ use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; - +use zenoh_util::init_log; #[tokio::main] async fn main() { + + init_log(); + let _z = zenoh_runtime::ZRuntimePoolGuard; - env_logger::init(); let pub_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 102b6a036c..892f76e6d6 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -15,11 +15,13 @@ use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; +use zenoh_util::init_log; #[tokio::main] async fn main() { + init_log(); + let _z = zenoh_runtime::ZRuntimePoolGuard; - env_logger::init(); let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); let get_selector = Selector::try_from("test/valgrind/**").unwrap(); diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 72f507a596..6258213743 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -31,7 +31,7 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = [ - "log", + "tracing", "serde/std", "uhlc/std", "zenoh-protocol/std" @@ -44,7 +44,7 @@ shared-memory = [ complete_n = ["zenoh-protocol/complete_n"] [dependencies] -log = { workspace = true, optional = true } +tracing = {workspace = true, optional = true } serde = { workspace = true, features = ["alloc"] } uhlc = { workspace = true } zenoh-buffers = { workspace = true, default-features = false } @@ -54,9 +54,10 @@ zenoh-shm = { workspace = true, optional = true } # INFO: May cause problems when testing no_std stuff. Check this tool: https://docs.rs/crate/cargo-no-dev-deps/0.1.0 [dev-dependencies] criterion = { workspace = true } -env_logger = { workspace = true } + rand = { workspace = true, features = ["default"] } zenoh-protocol = { workspace = true, features = ["test"] } +zenoh-util = {workspace = true } [[bench]] name = "codec" diff --git a/commons/zenoh-codec/src/common/extension.rs b/commons/zenoh-codec/src/common/extension.rs index b31cfc19bc..6c22f8ff01 100644 --- a/commons/zenoh-codec/src/common/extension.rs +++ b/commons/zenoh-codec/src/common/extension.rs @@ -30,11 +30,11 @@ where let (u, has_ext): (ZExtUnknown, bool) = codec.read(&mut *reader)?; if u.is_mandatory() { #[cfg(feature = "std")] - log::error!("Unknown {_s} ext: {u:?}"); + tracing::error!("Unknown {_s} ext: {u:?}"); return Err(DidntRead); } else { #[cfg(feature = "std")] - log::debug!("Unknown {_s} ext: {u:?}"); + tracing::debug!("Unknown {_s} ext: {u:?}"); } Ok((u, has_ext)) } diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3fdb95e1b5..1e4ea22491 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -342,7 +342,7 @@ fn codec_shm_info() { // Common #[test] fn codec_extension() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); macro_rules! run_extension_single { ($ext:ty, $buff:expr) => { diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index feade8cc10..cb502881d9 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "Internal crate for zenoh." [dependencies] -log = { workspace = true } +tracing = {workspace = true} flume = { workspace = true } json5 = { workspace = true } num_cpus = { workspace = true } diff --git a/commons/zenoh-core/src/macros.rs b/commons/zenoh-core/src/macros.rs index 5e8cefcf5a..d8f2f1fdc3 100644 --- a/commons/zenoh-core/src/macros.rs +++ b/commons/zenoh-core/src/macros.rs @@ -170,7 +170,7 @@ macro_rules! zasync_executor_init { .await .unwrap(); - log::trace!( + tracing::trace!( "Spawned {} additional threads in the async global executor", count ); @@ -186,7 +186,7 @@ macro_rules! zparse { "Failed to read configuration: {} is not a valid value", $str ); - log::warn!("{}", e); + tracing::warn!("{}", e); e }) }; @@ -204,7 +204,7 @@ macro_rules! zparse_default { "Failed to read configuration: {} is not a valid value", $str ); - log::warn!("{}", e); + tracing::warn!("{}", e); $default } } diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index ccf23a8911..e6107b9a13 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -29,7 +29,7 @@ description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -log = { workspace = true } +tracing = {workspace = true} serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } zenoh-buffers = { workspace = true } diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index 33409ce20a..82f3614380 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -147,7 +147,7 @@ impl SharedMemoryBuf { } pub fn as_slice(&self) -> &[u8] { - log::trace!("SharedMemoryBuf::as_slice() == len = {:?}", self.len); + tracing::trace!("SharedMemoryBuf::as_slice() == len = {:?}", self.len); let bp = self.buf.load(Ordering::SeqCst); unsafe { std::slice::from_raw_parts(bp, self.len) } } @@ -218,7 +218,7 @@ impl SharedMemoryReader { info.shm_manager, e ); - log::trace!("{}", e); + tracing::trace!("{}", e); Err(ShmError(e).into()) } } @@ -243,7 +243,7 @@ impl SharedMemoryReader { } None => { let e = zerror!("Unable to find shared memory segment: {}", info.shm_manager); - log::trace!("{}", e); + tracing::trace!("{}", e); Err(ShmError(e).into()) } } @@ -298,7 +298,7 @@ impl SharedMemoryManager { .to_str() .ok_or_else(|| ShmError(zerror!("Unable to parse tmp directory: {:?}", temp_dir)))? .to_string(); - log::trace!("Creating file at: {}", path); + tracing::trace!("Creating file at: {}", path); let real_size = size + ACCOUNTED_OVERHEAD; let shmem = match ShmemConf::new() .size(real_size) @@ -335,7 +335,7 @@ impl SharedMemoryManager { busy_list, alignment: mem::align_of::(), }; - log::trace!( + tracing::trace!( "Created SharedMemoryManager for {:?}", shm.own_segment.as_ptr() ); @@ -361,7 +361,7 @@ impl SharedMemoryManager { } pub fn alloc(&mut self, len: usize) -> ZResult { - log::trace!("SharedMemoryManager::alloc({})", len); + tracing::trace!("SharedMemoryManager::alloc({})", len); // Always allocate a size that will keep the proper alignment requirements let required_len = align_addr_at(len + CHUNK_HEADER_SIZE, self.alignment); if self.available < required_len { @@ -374,20 +374,23 @@ impl SharedMemoryManager { match self.free_list.pop() { Some(mut chunk) if chunk.size >= required_len => { self.available -= required_len; - log::trace!("Allocator selected Chunk ({:?})", &chunk); + tracing::trace!("Allocator selected Chunk ({:?})", &chunk); if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { let free_chunk = Chunk { base_addr: unsafe { chunk.base_addr.add(required_len) }, offset: chunk.offset + required_len, size: chunk.size - required_len, }; - log::trace!("The allocation will leave a Free Chunk: {:?}", &free_chunk); + tracing::trace!( + "The allocation will leave a Free Chunk: {:?}", + &free_chunk + ); self.free_list.push(free_chunk); } chunk.size = required_len; let shm_buf = self.free_chunk_map_to_shmbuf(&chunk); - log::trace!("The allocated Chunk is ({:?})", &chunk); - log::trace!("Allocated Shared Memory Buffer: {:?}", &shm_buf); + tracing::trace!("The allocated Chunk is ({:?})", &chunk); + tracing::trace!("Allocated Shared Memory Buffer: {:?}", &shm_buf); self.busy_list.push(chunk); Ok(shm_buf) } @@ -398,13 +401,13 @@ impl SharedMemoryManager { } None => { let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - log::trace!("{}", e); + tracing::trace!("{}", e); Err(e.into()) } } } else { let e = zerror!( "SharedMemoryManager does not have sufficient free memory to allocate {} bytes, try de-fragmenting!", len); - log::warn!("{}", e); + tracing::warn!("{}", e); Err(e.into()) } } @@ -465,7 +468,7 @@ impl SharedMemoryManager { /// Returns the amount of memory freed pub fn garbage_collect(&mut self) -> usize { - log::trace!("Running Garbage Collector"); + tracing::trace!("Running Garbage Collector"); let mut freed = 0; let (free, busy) = self @@ -476,7 +479,7 @@ impl SharedMemoryManager { for f in free { freed += f.size; - log::trace!("Garbage Collecting Chunk: {:?}", f); + tracing::trace!("Garbage Collecting Chunk: {:?}", f); self.free_list.push(f) } self.available += freed; diff --git a/commons/zenoh-task/Cargo.toml b/commons/zenoh-task/Cargo.toml index bf52f13735..a9f5ab07c1 100644 --- a/commons/zenoh-task/Cargo.toml +++ b/commons/zenoh-task/Cargo.toml @@ -27,7 +27,7 @@ description = "Internal crate for zenoh." [dependencies] tokio = { workspace = true, features = ["default", "sync"] } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} zenoh-core = { workspace = true } zenoh-runtime = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } \ No newline at end of file diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index 7b305cee75..5f7c3c26d2 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -122,7 +122,7 @@ impl TaskController { .await .is_err() { - log::error!("Failed to terminate {} tasks", self.tracker.len()); + tracing::error!("Failed to terminate {} tasks", self.tracker.len()); return self.tracker.len(); } 0 @@ -183,7 +183,7 @@ impl TerminatableTask { pub async fn terminate_async(self, timeout: Duration) -> bool { self.token.cancel(); if tokio::time::timeout(timeout, self.handle).await.is_err() { - log::error!("Failed to terminate the task"); + tracing::error!("Failed to terminate the task"); return false; }; true diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index 7a66600e79..909ba12f6a 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -44,7 +44,8 @@ home = { workspace = true } humantime = { workspace = true } lazy_static = { workspace = true } libloading = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} +tracing-subscriber = {workspace = true} shellexpand = { workspace = true } zenoh-core = { workspace = true } zenoh-result = { workspace = true, features = ["default"] } diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/std_only/lib_loader.rs index 12a6da8489..dec5bc07af 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/std_only/lib_loader.rs @@ -12,11 +12,11 @@ // ZettaScale Zenoh Team, // use libloading::Library; -use log::{debug, warn}; use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; use std::ffi::OsString; use std::ops::Deref; use std::path::PathBuf; +use tracing::{debug, warn}; use zenoh_core::zconfigurable; use zenoh_result::{bail, ZResult}; @@ -111,7 +111,7 @@ impl LibLoader { pub unsafe fn search_and_load(&self, name: &str) -> ZResult<(Library, PathBuf)> { let filename = format!("{}{}{}", *LIB_PREFIX, name, *LIB_SUFFIX); let filename_ostr = OsString::from(&filename); - log::debug!( + tracing::debug!( "Search for library {} to load in {:?}", filename, self.search_paths @@ -150,7 +150,7 @@ impl LibLoader { prefix: Option<&str>, ) -> Vec<(Library, PathBuf, String)> { let lib_prefix = format!("{}{}", *LIB_PREFIX, prefix.unwrap_or("")); - log::debug!( + tracing::debug!( "Search for libraries {}*{} to load in {:?}", lib_prefix, *LIB_SUFFIX, diff --git a/commons/zenoh-util/src/std_only/log.rs b/commons/zenoh-util/src/std_only/log.rs new file mode 100644 index 0000000000..2587fa877f --- /dev/null +++ b/commons/zenoh-util/src/std_only/log.rs @@ -0,0 +1,36 @@ +use tracing_subscriber::EnvFilter; + +/// This is an utility function to enable the tracing formatting subscriber from +/// the `RUST_LOG` environment variable. +/// +/// # Safety +/// Calling this function initializes a `lazy_static` in the `tracing` crate +/// such static is not deallocated prior to process existing, thus tools such as `valgrind` +/// will report a memory leak. +/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 +pub fn init_log_from_env() { + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("z=info")); + + let subscriber = tracing_subscriber::fmt() + .with_env_filter(env_filter) + .with_thread_ids(true) + .with_thread_names(true) + .with_level(true) + .with_target(true); + + let subscriber = subscriber.finish(); + let _ = tracing::subscriber::set_global_default(subscriber); +} + +/// This is an utility function to enables the default tracing subscriber with INFO level +pub fn init_log() { + let subscriber = tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_thread_ids(true) + .with_thread_names(true) + .with_level(true) + .with_target(true); + + let subscriber = subscriber.finish(); + let _ = tracing::subscriber::set_global_default(subscriber); +} diff --git a/commons/zenoh-util/src/std_only/mod.rs b/commons/zenoh-util/src/std_only/mod.rs index 21b171e8e5..3ccd2152a0 100644 --- a/commons/zenoh-util/src/std_only/mod.rs +++ b/commons/zenoh-util/src/std_only/mod.rs @@ -5,6 +5,10 @@ pub mod time_range; pub use lib_loader::*; pub mod timer; pub use timer::*; +pub mod log; +pub use log::init_log; +pub use log::init_log_from_env; + /// The "ZENOH_HOME" environement variable name pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/std_only/net/mod.rs index dd570b364d..83ab08d678 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/std_only/net/mod.rs @@ -437,12 +437,12 @@ pub fn set_bind_to_device_udp_socket(socket: &UdpSocket, iface: &str) -> ZResult #[cfg(any(target_os = "macos", target_os = "windows"))] pub fn set_bind_to_device_tcp_socket(socket: &TcpSocket, iface: &str) -> ZResult<()> { - log::warn!("Binding the socket {socket:?} to the interface {iface} is not supported on macOS and Windows"); + tracing::warn!("Binding the socket {socket:?} to the interface {iface} is not supported on macOS and Windows"); Ok(()) } #[cfg(any(target_os = "macos", target_os = "windows"))] pub fn set_bind_to_device_udp_socket(socket: &UdpSocket, iface: &str) -> ZResult<()> { - log::warn!("Binding the socket {socket:?} to the interface {iface} is not supported on macOS and Windows"); + tracing::warn!("Binding the socket {socket:?} to the interface {iface} is not supported on macOS and Windows"); Ok(()) } diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/std_only/timer.rs index 909bebe6a4..6e7dde065a 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/std_only/timer.rs @@ -158,7 +158,7 @@ async fn timer_task( } Err(_) => { // Channel error - log::trace!("{}", e); + tracing::trace!("{}", e); return Ok(()); } } @@ -170,7 +170,7 @@ async fn timer_task( } Err(_) => { // Channel error - log::trace!("{}", e); + tracing::trace!("{}", e); return Ok(()); } }, @@ -205,7 +205,7 @@ impl Timer { .recv_async() .race(timer_task(c_e, ev_receiver)) .await; - log::trace!("A - Timer task no longer running..."); + tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { task::spawn_blocking(|| task::block_on(fut)); @@ -235,7 +235,7 @@ impl Timer { .recv_async() .race(timer_task(c_e, ev_receiver)) .await; - log::trace!("A - Timer task no longer running..."); + tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { task::spawn_blocking(|| task::block_on(fut)); @@ -255,7 +255,7 @@ impl Timer { // Stop the timer task let _ = sl_sender.send(()); - log::trace!("Stopping timer..."); + tracing::trace!("Stopping timer..."); // Remove the channels handlers self.sl_sender = None; self.ev_sender = None; @@ -267,7 +267,7 @@ impl Timer { // Stop the timer task let _ = sl_sender.send_async(()).await; - log::trace!("Stopping timer..."); + tracing::trace!("Stopping timer..."); // Remove the channels handlers self.sl_sender = None; self.ev_sender = None; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b0cf6a0ece..b08ace3cbc 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -43,12 +43,12 @@ transport_unixpipe = ["zenoh/transport_unixpipe"] [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "time", "io-std"] } clap = { workspace = true, features = ["derive"] } -env_logger = { workspace = true } +zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index a090458c71..ad87227fcd 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 486ccc4fdb..9541511e84 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -20,7 +20,7 @@ use zenoh_ext::SubscriberForward; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, forward) = parse_args(); diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 0603b4f9fb..a71cc7fed0 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -21,7 +21,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, selector, value, target, timeout) = parse_args(); diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 3538b7a05c..cbeea17164 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -21,7 +21,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, timeout) = parse_args(); diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index 1d047f9454..9525a7abf1 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let config = parse_args(); diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 937868e091..f82d62e8bd 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index fe5ed4d46b..404272f1fa 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, warmup, size, n) = parse_args(); let session = zenoh::open(config).res().unwrap(); diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index 53ff03b778..865734abe6 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let config = parse_args(); diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 4863387df0..39f788272e 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, value, attachment) = parse_args(); diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 2aadcf33de..fa72714741 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -24,7 +24,7 @@ const K: u32 = 3; #[tokio::main] async fn main() -> Result<(), zenoh::Error> { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, path, value) = parse_args(); diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index c8a33f98fa..8917eda74d 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -21,7 +21,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, sm_size, size) = parse_args(); // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 89b8b9b55c..43464f709a 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let args = Args::parse(); let mut prio = Priority::default(); diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index bd59be7dee..855502cea9 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -20,7 +20,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 7b38490507..eeab4647d8 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, value) = parse_args(); diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 33482e4680..ff34a7a4ba 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, value) = parse_args(); diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 2feac12a8e..a83187ea24 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, value, complete) = parse_args(); diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index bc778cfc0f..2f4e0bf7d1 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -18,7 +18,7 @@ use zenoh::scouting::WhatAmI; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); println!("Scouting..."); let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index 161db6819f..f85a11cdae 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -23,7 +23,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, complete) = parse_args(); diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index d2d86bea8b..291030062c 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, key_expr) = parse_args(); diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 0d0f9fc5ac..b0c9965749 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -19,7 +19,7 @@ use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr) = parse_args(); diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 0a8426edf0..e93893a49f 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -69,7 +69,7 @@ impl Drop for Stats { fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (mut config, m, n) = parse_args(); diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index ea21228a4b..f2e10616c1 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -32,7 +32,7 @@ async-trait = { workspace = true } rustls = { workspace = true } rustls-webpki = { workspace = true } flume = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } zenoh-codec = { workspace = true } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index fe87e70e94..cce12858e8 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -105,11 +105,11 @@ impl From> for LinkUnicast { pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { match zenoh_util::net::get_interface_names_by_addr(addr.ip()) { Ok(interfaces) => { - log::trace!("get_interface_names for {:?}: {:?}", addr.ip(), interfaces); + tracing::trace!("get_interface_names for {:?}: {:?}", addr.ip(), interfaces); interfaces } Err(e) => { - log::debug!("get_interface_names for {:?} failed: {:?}", addr.ip(), e); + tracing::debug!("get_interface_names for {:?} failed: {:?}", addr.ip(), e); vec![] } } diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index 496830b5ef..a10e18fd43 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -28,7 +28,7 @@ description = "Internal crate for zenoh." async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} quinn = { workspace = true } rustls-native-certs = { workspace = true } rustls-pemfile = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 33953d666d..8fd7777137 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -68,11 +68,11 @@ impl LinkUnicastQuic { #[async_trait] impl LinkUnicastTrait for LinkUnicastQuic { async fn close(&self) -> ZResult<()> { - log::trace!("Closing QUIC link: {}", self); + tracing::trace!("Closing QUIC link: {}", self); // Flush the QUIC stream let mut guard = zasynclock!(self.send); if let Err(e) = guard.finish().await { - log::trace!("Error closing QUIC stream {}: {}", self, e); + tracing::trace!("Error closing QUIC stream {}: {}", self, e); } self.connection.close(quinn::VarInt::from_u32(0), &[0]); Ok(()) @@ -81,7 +81,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { async fn write(&self, buffer: &[u8]) -> ZResult { let mut guard = zasynclock!(self.send); guard.write(buffer).await.map_err(|e| { - log::trace!("Write error on QUIC link {}: {}", self, e); + tracing::trace!("Write error on QUIC link {}: {}", self, e); zerror!(e).into() }) } @@ -89,7 +89,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { let mut guard = zasynclock!(self.send); guard.write_all(buffer).await.map_err(|e| { - log::trace!("Write error on QUIC link {}: {}", self, e); + tracing::trace!("Write error on QUIC link {}: {}", self, e); zerror!(e).into() }) } @@ -101,7 +101,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { .await .map_err(|e| { let e = zerror!("Read error on QUIC link {}: {}", self, e); - log::trace!("{}", &e); + tracing::trace!("{}", &e); e })? .ok_or_else(|| { @@ -110,7 +110,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { self, guard.id() ); - log::trace!("{}", &e); + tracing::trace!("{}", &e); e.into() }) } @@ -119,7 +119,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { let mut guard = zasynclock!(self.recv); guard.read_exact(buffer).await.map_err(|e| { let e = zerror!("Read error on QUIC link {}: {}", self, e); - log::trace!("{}", &e); + tracing::trace!("{}", &e); e.into() }) } @@ -215,7 +215,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .parse()?; if !server_name_verification { - log::warn!("Skipping name verification of servers"); + tracing::warn!("Skipping name verification of servers"); } // Initialize the QUIC connection @@ -440,7 +440,7 @@ async fn accept_task( let conn = qc.await.map_err(|e| { let e = zerror!("QUIC acceptor failed: {:?}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -452,7 +452,7 @@ async fn accept_task( .map_err(|e| zerror!("Can not accept QUIC connections: {}", e))?; // The accept future - log::trace!("Ready to accept QUIC connections on: {:?}", src_addr); + tracing::trace!("Ready to accept QUIC connections on: {:?}", src_addr); loop { tokio::select! { @@ -465,13 +465,13 @@ async fn accept_task( let (send, recv) = match quic_conn.accept_bi().await { Ok(stream) => stream, Err(e) => { - log::warn!("QUIC connection has no streams: {:?}", e); + tracing::warn!("QUIC connection has no streams: {:?}", e); continue; } }; let dst_addr = quic_conn.remote_address(); - log::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); + tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastQuic::new( quic_conn, @@ -483,12 +483,12 @@ async fn accept_task( // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } } Err(e) => { - log::warn!("{} Hint: increase the system open file limit.", e); + tracing::warn!("{} Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On diff --git a/io/zenoh-links/zenoh-link-serial/Cargo.toml b/io/zenoh-links/zenoh-link-serial/Cargo.toml index 6fc3aba97e..d89e1a0e78 100644 --- a/io/zenoh-links/zenoh-link-serial/Cargo.toml +++ b/io/zenoh-links/zenoh-link-serial/Cargo.toml @@ -34,7 +34,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } tokio-util = { workspace = true, features = ["rt"] } uuid = { workspace = true, default-features = true } diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 0efa40ee90..3e79ac1eac 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -94,7 +94,7 @@ impl LinkUnicastSerial { let res = match self.get_port_mut().bytes_to_read() { Ok(b) => b, Err(e) => { - log::warn!( + tracing::warn!( "Unable to check if there are bytes to read in serial {}: {}", self.src_locator, e @@ -112,11 +112,11 @@ impl LinkUnicastSerial { #[async_trait] impl LinkUnicastTrait for LinkUnicastSerial { async fn close(&self) -> ZResult<()> { - log::trace!("Closing Serial link: {}", self); + tracing::trace!("Closing Serial link: {}", self); let _guard = zasynclock!(self.write_lock); self.get_port_mut().clear().map_err(|e| { let e = zerror!("Unable to close Serial link {}: {}", self, e); - log::error!("{}", e); + tracing::error!("{}", e); e })?; self.is_connected.store(false, Ordering::Release); @@ -127,7 +127,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { let _guard = zasynclock!(self.write_lock); self.get_port_mut().write(buffer).await.map_err(|e| { let e = zerror!("Unable to write on Serial link {}: {}", self, e); - log::error!("{}", e); + tracing::error!("{}", e); e })?; Ok(buffer.len()) @@ -148,7 +148,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { Ok(read) => return Ok(read), Err(e) => { let e = zerror!("Read error on Serial link {}: {}", self, e); - log::error!("{}", e); + tracing::error!("{}", e); drop(_guard); tokio::time::sleep(std::time::Duration::from_millis(1)).await; continue; @@ -187,11 +187,11 @@ impl LinkUnicastTrait for LinkUnicastSerial { // e.g. for serial port "/dev/ttyUSB0" interface name will be "ttyUSB0" match z_serial::get_available_port_names() { Ok(interfaces) => { - log::trace!("get_interface_names for serial: {:?}", interfaces); + tracing::trace!("get_interface_names for serial: {:?}", interfaces); interfaces } Err(e) => { - log::debug!("get_interface_names for serial failed: {:?}", e); + tracing::debug!("get_interface_names for serial failed: {:?}", e); vec![] } } @@ -272,14 +272,14 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { let path = get_unix_path_as_string(endpoint.address()); let baud_rate = get_baud_rate(&endpoint); let exclusive = get_exclusive(&endpoint); - log::trace!("Opening Serial Link on device {path:?}, with baudrate {baud_rate} and exclusive set as {exclusive}"); + tracing::trace!("Opening Serial Link on device {path:?}, with baudrate {baud_rate} and exclusive set as {exclusive}"); let port = ZSerial::new(path.clone(), baud_rate, exclusive).map_err(|e| { let e = zerror!( "Can not create a new Serial link bound to {:?}: {}", path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -298,14 +298,14 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { let path = get_unix_path_as_string(endpoint.address()); let baud_rate = get_baud_rate(&endpoint); let exclusive = get_exclusive(&endpoint); - log::trace!("Creating Serial listener on device {path:?}, with baudrate {baud_rate} and exclusive set as {exclusive}"); + tracing::trace!("Creating Serial listener on device {path:?}, with baudrate {baud_rate} and exclusive set as {exclusive}"); let port = ZSerial::new(path.clone(), baud_rate, exclusive).map_err(|e| { let e = zerror!( "Can not create a new Serial link bound to {:?}: {}", path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -354,7 +354,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastSerial { "Can not delete the Serial listener because it has not been found: {}", path ); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; @@ -395,12 +395,12 @@ async fn accept_read_task( tokio::time::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; } - log::trace!("Creating serial link from {:?}", src_path); + tracing::trace!("Creating serial link from {:?}", src_path); is_connected.store(true, Ordering::Release); Ok(link.clone()) } - log::trace!("Ready to accept Serial connections on: {:?}", src_path); + tracing::trace!("Ready to accept Serial connections on: {:?}", src_path); loop { tokio::select! { @@ -413,14 +413,14 @@ async fn accept_read_task( Ok(link) => { // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link.clone())).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } // Ensure the creation of this link is only once break; } Err(e) => { - log::warn!("{}. Hint: Is the serial cable connected?", e); + tracing::warn!("{}. Hint: Is the serial cable connected?", e); tokio::time::sleep(Duration::from_micros(*SERIAL_ACCEPT_THROTTLE_TIME)).await; continue; diff --git a/io/zenoh-links/zenoh-link-tcp/Cargo.toml b/io/zenoh-links/zenoh-link-tcp/Cargo.toml index b638f97443..ca94412382 100644 --- a/io/zenoh-links/zenoh-link-tcp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tcp/Cargo.toml @@ -28,7 +28,7 @@ description = "Internal crate for zenoh." async-trait = { workspace = true } tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } -log = { workspace = true } +tracing = {workspace = true} zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } zenoh-protocol = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index 361f4fe69e..3ef4f235ed 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -50,7 +50,7 @@ impl LinkUnicastTcp { fn new(socket: TcpStream, src_addr: SocketAddr, dst_addr: SocketAddr) -> LinkUnicastTcp { // Set the TCP nodelay option if let Err(err) = socket.set_nodelay(true) { - log::warn!( + tracing::warn!( "Unable to set NODEALY option on TCP link {} => {}: {}", src_addr, dst_addr, @@ -62,7 +62,7 @@ impl LinkUnicastTcp { if let Err(err) = socket.set_linger(Some(Duration::from_secs( (*TCP_LINGER_TIMEOUT).try_into().unwrap(), ))) { - log::warn!( + tracing::warn!( "Unable to set LINGER option on TCP link {} => {}: {}", src_addr, dst_addr, @@ -88,11 +88,11 @@ impl LinkUnicastTcp { #[async_trait] impl LinkUnicastTrait for LinkUnicastTcp { async fn close(&self) -> ZResult<()> { - log::trace!("Closing TCP link: {}", self); + tracing::trace!("Closing TCP link: {}", self); // Close the underlying TCP socket self.get_mut_socket().shutdown().await.map_err(|e| { let e = zerror!("TCP link shutdown {}: {:?}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -100,7 +100,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { async fn write(&self, buffer: &[u8]) -> ZResult { self.get_mut_socket().write(buffer).await.map_err(|e| { let e = zerror!("Write error on TCP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -108,7 +108,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { self.get_mut_socket().write_all(buffer).await.map_err(|e| { let e = zerror!("Write error on TCP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -116,7 +116,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { async fn read(&self, buffer: &mut [u8]) -> ZResult { self.get_mut_socket().read(buffer).await.map_err(|e| { let e = zerror!("Read error on TCP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -128,7 +128,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { .await .map_err(|e| { let e = zerror!("Read error on TCP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; Ok(()) @@ -398,28 +398,28 @@ async fn accept_task( let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not accept TCP connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; - log::trace!("Ready to accept TCP connections on: {:?}", src_addr); + tracing::trace!("Ready to accept TCP connections on: {:?}", src_addr); loop { tokio::select! { _ = token.cancelled() => break, res = accept(&socket) => { match res { Ok((stream, dst_addr)) => { - log::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); + tracing::debug!("Accepted TCP connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastTcp::new(stream, src_addr, dst_addr)); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } }, Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 975fa49467..11d00d96d8 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -28,7 +28,7 @@ description = "Internal crate for zenoh." async-trait = { workspace = true } base64 = { workspace = true } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} rustls = { workspace = true } rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index b24ce4ac31..9eec2feb2a 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -79,7 +79,7 @@ impl LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option if let Err(err) = tcp_stream.set_nodelay(true) { - log::warn!( + tracing::warn!( "Unable to set NODEALY option on TLS link {} => {}: {}", src_addr, dst_addr, @@ -91,7 +91,7 @@ impl LinkUnicastTls { if let Err(err) = tcp_stream.set_linger(Some(Duration::from_secs( (*TLS_LINGER_TIMEOUT).try_into().unwrap(), ))) { - log::warn!( + tracing::warn!( "Unable to set LINGER option on TLS link {} => {}: {}", src_addr, dst_addr, @@ -123,23 +123,23 @@ impl LinkUnicastTls { #[async_trait] impl LinkUnicastTrait for LinkUnicastTls { async fn close(&self) -> ZResult<()> { - log::trace!("Closing TLS link: {}", self); + tracing::trace!("Closing TLS link: {}", self); // Flush the TLS stream let _guard = zasynclock!(self.write_mtx); let tls_stream = self.get_sock_mut(); let res = tls_stream.flush().await; - log::trace!("TLS link flush {}: {:?}", self, res); + tracing::trace!("TLS link flush {}: {:?}", self, res); // Close the underlying TCP stream let (tcp_stream, _) = tls_stream.get_mut(); let res = tcp_stream.shutdown().await; - log::trace!("TLS link shutdown {}: {:?}", self, res); + tracing::trace!("TLS link shutdown {}: {:?}", self, res); res.map_err(|e| zerror!(e).into()) } async fn write(&self, buffer: &[u8]) -> ZResult { let _guard = zasynclock!(self.write_mtx); self.get_sock_mut().write(buffer).await.map_err(|e| { - log::trace!("Write error on TLS link {}: {}", self, e); + tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) } @@ -147,7 +147,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { let _guard = zasynclock!(self.write_mtx); self.get_sock_mut().write_all(buffer).await.map_err(|e| { - log::trace!("Write error on TLS link {}: {}", self, e); + tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) } @@ -155,7 +155,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read(&self, buffer: &mut [u8]) -> ZResult { let _guard = zasynclock!(self.read_mtx); self.get_sock_mut().read(buffer).await.map_err(|e| { - log::trace!("Read error on TLS link {}: {}", self, e); + tracing::trace!("Read error on TLS link {}: {}", self, e); zerror!(e).into() }) } @@ -163,7 +163,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { let _guard = zasynclock!(self.read_mtx); let _ = self.get_sock_mut().read_exact(buffer).await.map_err(|e| { - log::trace!("Read error on TLS link {}: {}", self, e); + tracing::trace!("Read error on TLS link {}: {}", self, e); zerror!(e) })?; Ok(()) @@ -370,11 +370,11 @@ async fn accept_task( let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not accept TLS connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; - log::trace!("Ready to accept TLS connections on: {:?}", src_addr); + tracing::trace!("Ready to accept TLS connections on: {:?}", src_addr); loop { tokio::select! { _ = token.cancelled() => break, @@ -387,22 +387,22 @@ async fn accept_task( Ok(stream) => TlsStream::Server(stream), Err(e) => { let e = format!("Can not accept TLS connection: {e}"); - log::warn!("{}", e); + tracing::warn!("{}", e); continue; } }; - log::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); + tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } } Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On @@ -526,7 +526,7 @@ impl TlsClientConfig { .parse() .map_err(|_| zerror!("Unknown server name verification argument: {}", s))?; if s { - log::warn!("Skipping name verification of servers"); + tracing::warn!("Skipping name verification of servers"); } s } @@ -534,18 +534,18 @@ impl TlsClientConfig { }; // Allows mixed user-generated CA and webPKI CA - log::debug!("Loading default Web PKI certificates."); + tracing::debug!("Loading default Web PKI certificates."); let mut root_cert_store = RootCertStore { roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), }; if let Some(custom_root_cert) = load_trust_anchors(config)? { - log::debug!("Loading user-generated certificates."); + tracing::debug!("Loading user-generated certificates."); root_cert_store.extend(custom_root_cert.roots); } let cc = if tls_client_server_auth { - log::debug!("Loading client authentication key and certificate..."); + tracing::debug!("Loading client authentication key and certificate..."); let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; diff --git a/io/zenoh-links/zenoh-link-udp/Cargo.toml b/io/zenoh-links/zenoh-link-udp/Cargo.toml index bcc0f16ee4..f1bc3365f1 100644 --- a/io/zenoh-links/zenoh-link-udp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-udp/Cargo.toml @@ -28,7 +28,7 @@ description = "Internal crate for zenoh." tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } async-trait = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} socket2 = { workspace = true } zenoh-buffers = { workspace = true } zenoh-collections = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index bc894bd296..59848b95c1 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -57,7 +57,7 @@ impl LinkMulticastUdp { #[async_trait] impl LinkMulticastTrait for LinkMulticastUdp { async fn close(&self) -> ZResult<()> { - log::trace!("Closing UDP link: {}", self); + tracing::trace!("Closing UDP link: {}", self); match self.multicast_addr.ip() { IpAddr::V4(dst_ip4) => match self.multicast_addr.ip() { IpAddr::V4(src_ip4) => self.mcast_sock.leave_multicast_v4(dst_ip4, src_ip4), @@ -67,7 +67,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { } .map_err(|e| { let e = zerror!("Close error on UDP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -78,7 +78,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { .await .map_err(|e| { let e = zerror!("Write error on UDP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -95,7 +95,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { loop { let (n, addr) = self.mcast_sock.recv_from(buffer).await.map_err(|e| { let e = zerror!("Read error on UDP link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 1cd4a0b1ec..1fa9f9a7f4 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -149,7 +149,7 @@ impl LinkUnicastUdp { #[async_trait] impl LinkUnicastTrait for LinkUnicastUdp { async fn close(&self) -> ZResult<()> { - log::trace!("Closing UDP link: {}", self); + tracing::trace!("Closing UDP link: {}", self); match &self.variant { LinkUnicastUdpVariant::Connected(link) => link.close().await, LinkUnicastUdpVariant::Unconnected(link) => { @@ -273,7 +273,7 @@ impl LinkManagerUnicastUdp { .await .map_err(|e| { let e = zerror!("Can not create a new UDP link bound to {}: {}", dst_addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -284,20 +284,20 @@ impl LinkManagerUnicastUdp { // Connect the socket to the remote address socket.connect(dst_addr).await.map_err(|e| { let e = zerror!("Can not create a new UDP link bound to {}: {}", dst_addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; // Get source and destination UDP addresses let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not create a new UDP link bound to {}: {}", dst_addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; let dst_addr = socket.peer_addr().map_err(|e| { let e = zerror!("Can not create a new UDP link bound to {}: {}", dst_addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -312,7 +312,7 @@ impl LinkManagerUnicastUdp { // Bind the UDP socket let socket = UdpSocket::bind(addr).await.map_err(|e| { let e = zerror!("Can not create a new UDP listener on {}: {}", addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -322,7 +322,7 @@ impl LinkManagerUnicastUdp { let local_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not create a new UDP listener on {}: {}", addr, e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -492,11 +492,11 @@ async fn accept_read_task( let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not accept UDP connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; - log::trace!("Ready to accept UDP connections on: {:?}", src_addr); + tracing::trace!("Ready to accept UDP connections on: {:?}", src_addr); loop { // Buffers for deserialization @@ -514,7 +514,7 @@ async fn accept_read_task( Some(link) => break link.upgrade(), None => { // A new peers has sent data to this socket - log::debug!("Accepted UDP connection on {}: {}", src_addr, dst_addr); + tracing::debug!("Accepted UDP connection on {}: {}", src_addr, dst_addr); let unconnected = Arc::new(LinkUnicastUdpUnconnected { socket: Arc::downgrade(&socket), links: links.clone(), @@ -530,7 +530,7 @@ async fn accept_read_task( )); // Add the new link to the set of connected peers if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } } } @@ -547,7 +547,7 @@ async fn accept_read_task( } Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On diff --git a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml index 66784728f9..22ff335d25 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml @@ -29,7 +29,7 @@ transport_unixpipe = [] [dependencies] async-trait = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} rand = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } zenoh-core = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index eb8ee05d87..090ef0a340 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -471,7 +471,7 @@ impl Drop for UnicastPipe { #[async_trait] impl LinkUnicastTrait for UnicastPipe { async fn close(&self) -> ZResult<()> { - log::trace!("Closing Unix Pipe link: {}", self); + tracing::trace!("Closing Unix Pipe link: {}", self); Ok(()) } @@ -509,7 +509,7 @@ impl LinkUnicastTrait for UnicastPipe { #[inline(always)] fn get_interface_names(&self) -> Vec { // @TODO: Not supported for now - log::debug!("The get_interface_names for UnicastPipe is not supported"); + tracing::debug!("The get_interface_names for UnicastPipe is not supported"); vec![] } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml b/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml index 1e2bba789c..b915767a19 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixsock_stream/Cargo.toml @@ -34,7 +34,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} nix = { workspace = true } tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } tokio-util = { workspace = true, features = ["rt"] } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index b85cee9c66..fa1c2d9d0f 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -65,17 +65,17 @@ impl LinkUnicastUnixSocketStream { #[async_trait] impl LinkUnicastTrait for LinkUnicastUnixSocketStream { async fn close(&self) -> ZResult<()> { - log::trace!("Closing UnixSocketStream link: {}", self); + tracing::trace!("Closing UnixSocketStream link: {}", self); // Close the underlying UnixSocketStream socket let res = self.get_mut_socket().shutdown().await; - log::trace!("UnixSocketStream link shutdown {}: {:?}", self, res); + tracing::trace!("UnixSocketStream link shutdown {}: {:?}", self, res); res.map_err(|e| zerror!(e).into()) } async fn write(&self, buffer: &[u8]) -> ZResult { self.get_mut_socket().write(buffer).await.map_err(|e| { let e = zerror!("Write error on UnixSocketStream link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -83,7 +83,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { self.get_mut_socket().write_all(buffer).await.map_err(|e| { let e = zerror!("Write error on UnixSocketStream link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -91,7 +91,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { async fn read(&self, buffer: &mut [u8]) -> ZResult { self.get_mut_socket().read(buffer).await.map_err(|e| { let e = zerror!("Read error on UnixSocketStream link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -103,7 +103,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { .map(|_len| ()) .map_err(|e| { let e = zerror!("Read error on UnixSocketStream link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -126,7 +126,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { #[inline(always)] fn get_interface_names(&self) -> Vec { // @TODO: Not supported for now - log::debug!("The get_interface_names for LinkUnicastUnixSocketStream is not supported"); + tracing::debug!("The get_interface_names for LinkUnicastUnixSocketStream is not supported"); vec![] } @@ -221,7 +221,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -231,7 +231,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -242,7 +242,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -250,7 +250,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { Some(path) => PathBuf::from(path), None => { let e = format!("Can not create a new UnixSocketStream link bound to {path:?}"); - log::warn!("{}", e); + tracing::warn!("{}", e); PathBuf::from(format!("{}", Uuid::new_v4())) } }; @@ -260,7 +260,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { "Can not create a new UnixSocketStream link bound to {:?}", path ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -314,7 +314,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { "Can not create a new UnixSocketStream listener on {} - Unable to open lock file: {}", path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -326,7 +326,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -342,7 +342,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -352,19 +352,19 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { path, e ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; let local_path = PathBuf::from(local_addr.as_pathname().ok_or_else(|| { let e = zerror!("Can not create a new UnixSocketStream listener on {}", path); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?); let local_path_str = local_path.to_str().ok_or_else(|| { let e = zerror!("Can not create a new UnixSocketStream listener on {}", path); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -409,7 +409,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { "Can not delete the UnixSocketStream listener because it has not been found: {}", path ); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; @@ -425,7 +425,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { // Remove the Unix Domain Socket file let lock_file_path = format!("{path}.lock"); let tmp = remove_file(lock_file_path); - log::trace!("UnixSocketStream Domain Socket removal result: {:?}", tmp); + tracing::trace!("UnixSocketStream Domain Socket removal result: {:?}", tmp); Ok(()) } @@ -457,7 +457,7 @@ async fn accept_task( let src_addr = socket.local_addr().map_err(|e| { zerror!("Can not accept UnixSocketStream connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; @@ -466,7 +466,7 @@ async fn accept_task( "Can not create a new UnixSocketStream link bound to {:?}", src_addr ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?); @@ -475,12 +475,12 @@ async fn accept_task( "Can not create a new UnixSocketStream link bound to {:?}", src_addr ); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; // The accept future - log::trace!( + tracing::trace!( "Ready to accept UnixSocketStream connections on: {}", src_path ); @@ -494,7 +494,7 @@ async fn accept_task( Ok(stream) => { let dst_path = format!("{}", Uuid::new_v4()); - log::debug!("Accepted UnixSocketStream connection on: {:?}", src_addr,); + tracing::debug!("Accepted UnixSocketStream connection on: {:?}", src_addr,); // Create the new link object let link = Arc::new(LinkUnicastUnixSocketStream::new( @@ -503,12 +503,12 @@ async fn accept_task( // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } } Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On diff --git a/io/zenoh-links/zenoh-link-vsock/Cargo.toml b/io/zenoh-links/zenoh-link-vsock/Cargo.toml index c9b451b5b9..4bf709b8c2 100644 --- a/io/zenoh-links/zenoh-link-vsock/Cargo.toml +++ b/io/zenoh-links/zenoh-link-vsock/Cargo.toml @@ -28,7 +28,7 @@ description = "Internal crate for zenoh." async-trait = { workspace = true } tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } -log = { workspace = true } +tracing = {workspace = true} libc = { workspace = true } zenoh-core = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index ced7b9dc15..2700fcf04d 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -114,10 +114,10 @@ impl LinkUnicastVsock { #[async_trait] impl LinkUnicastTrait for LinkUnicastVsock { async fn close(&self) -> ZResult<()> { - log::trace!("Closing vsock link: {}", self); + tracing::trace!("Closing vsock link: {}", self); self.get_mut_socket().shutdown().await.map_err(|e| { let e = zerror!("vsock link shutdown {}: {:?}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -125,7 +125,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { async fn write(&self, buffer: &[u8]) -> ZResult { self.get_mut_socket().write(buffer).await.map_err(|e| { let e = zerror!("Write error on vsock link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -133,7 +133,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { self.get_mut_socket().write_all(buffer).await.map_err(|e| { let e = zerror!("Write error on vsock link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -141,7 +141,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { async fn read(&self, buffer: &mut [u8]) -> ZResult { self.get_mut_socket().read(buffer).await.map_err(|e| { let e = zerror!("Read error on vsock link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -153,7 +153,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { .await .map_err(|e| { let e = zerror!("Read error on vsock link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; Ok(()) @@ -334,28 +334,28 @@ async fn accept_task( let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not accept vsock connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; - log::trace!("Ready to accept vsock connections on: {:?}", src_addr); + tracing::trace!("Ready to accept vsock connections on: {:?}", src_addr); loop { tokio::select! { _ = token.cancelled() => break, res = accept(&mut socket) => { match res { Ok((stream, dst_addr)) => { - log::debug!("Accepted vsock connection on {:?}: {:?}", src_addr, dst_addr); + tracing::debug!("Accepted vsock connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object let link = Arc::new(LinkUnicastVsock::new(stream, src_addr, dst_addr)); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } }, Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On diff --git a/io/zenoh-links/zenoh-link-ws/Cargo.toml b/io/zenoh-links/zenoh-link-ws/Cargo.toml index 0a1027b9bd..63d78a5cb5 100644 --- a/io/zenoh-links/zenoh-link-ws/Cargo.toml +++ b/io/zenoh-links/zenoh-link-ws/Cargo.toml @@ -34,7 +34,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } futures-util = { workspace = true, features = ["sink", "std"] } -log = { workspace = true } +tracing = {workspace = true} tokio = { workspace = true, features = ["io-std", "macros", "net", "rt-multi-thread", "time"] } tokio-util = { workspace = true, features = ["rt"] } tokio-tungstenite = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 1a6d0fecf3..e94e4b6868 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -61,7 +61,7 @@ impl LinkUnicastWs { ) -> LinkUnicastWs { // Set the TCP nodelay option if let Err(err) = get_stream(&socket).set_nodelay(true) { - log::warn!( + tracing::warn!( "Unable to set NODEALY option on TCP link {} => {}: {}", src_addr, dst_addr, @@ -123,12 +123,12 @@ impl LinkUnicastWs { #[async_trait] impl LinkUnicastTrait for LinkUnicastWs { async fn close(&self) -> ZResult<()> { - log::trace!("Closing WebSocket link: {}", self); + tracing::trace!("Closing WebSocket link: {}", self); let mut guard = zasynclock!(self.send); // Close the underlying TCP socket guard.close().await.map_err(|e| { let e = zerror!("WebSocket link shutdown {}: {:?}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -139,7 +139,7 @@ impl LinkUnicastTrait for LinkUnicastWs { guard.send(msg).await.map_err(|e| { let e = zerror!("Write error on WebSocket link {}: {}", self, e); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; @@ -207,7 +207,7 @@ impl LinkUnicastTrait for LinkUnicastWs { #[inline(always)] fn get_interface_names(&self) -> Vec { // @TODO: Not supported for now - log::debug!("The get_interface_names for LinkUnicastWs is not supported"); + tracing::debug!("The get_interface_names for LinkUnicastWs is not supported"); vec![] } @@ -228,7 +228,7 @@ impl Drop for LinkUnicastWs { let mut guard = zasynclock!(self.send); // Close the underlying TCP socket guard.close().await.unwrap_or_else(|e| { - log::warn!("`LinkUnicastWs::Drop` error when closing WebSocket {}", e) + tracing::warn!("`LinkUnicastWs::Drop` error when closing WebSocket {}", e) }); }) } @@ -387,7 +387,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { "Can not delete the TCP (WebSocket) listener because it has not been found: {}", addr ); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; @@ -426,7 +426,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { } } } - Err(err) => log::error!("Unable to get local addresses: {}", err), + Err(err) => tracing::error!("Unable to get local addresses: {}", err), } } else if key.ip() == default_ipv6 { match zenoh_util::net::get_local_addresses(None) { @@ -443,7 +443,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { } } } - Err(err) => log::error!("Unable to get local addresses: {}", err), + Err(err) => tracing::error!("Unable to get local addresses: {}", err), } } else { locators.push(listener_locator.clone()); @@ -467,11 +467,11 @@ async fn accept_task( let src_addr = socket.local_addr().map_err(|e| { let e = zerror!("Can not accept TCP (WebSocket) connections: {}", e); - log::warn!("{}", e); + tracing::warn!("{}", e); e })?; - log::trace!( + tracing::trace!( "Ready to accept TCP (WebSocket) connections on: {:?}", src_addr ); @@ -482,7 +482,7 @@ async fn accept_task( match res { Ok(res) => res, Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); + tracing::warn!("{}. Hint: increase the system open file limit.", e); // Throttle the accept loop upon an error // NOTE: This might be due to various factors. However, the most common case is that // the process has reached the maximum number of open files in the system. On @@ -498,7 +498,7 @@ async fn accept_task( _ = token.cancelled() => break, }; - log::debug!( + tracing::debug!( "Accepted TCP (WebSocket) connection on {:?}: {:?}", src_addr, dst_addr @@ -508,7 +508,7 @@ async fn accept_task( .await .map_err(|e| { let e = zerror!("Error when creating the WebSocket session: {}", e); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; // Create the new link object @@ -516,7 +516,7 @@ async fn accept_task( // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) + tracing::error!("{}-{}: {}", file!(), line!(), e) } } diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index 5304a9fa17..b3a299e8be 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -62,7 +62,7 @@ tokio = { workspace = true, features = [ ] } tokio-util = { workspace = true, features = ["rt"]} flume = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} lz4_flex = { workspace = true } paste = { workspace = true } rand = { workspace = true, features = ["default"] } @@ -89,6 +89,6 @@ zenoh-task = { workspace = true } [dev-dependencies] futures-util = { workspace = true } -env_logger = { workspace = true } +zenoh-util = {workspace = true } zenoh-protocol = { workspace = true, features = ["test"] } futures = { workspace = true } diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 9df7632f7a..832cabd207 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -264,7 +264,7 @@ impl StageIn { tch.sn.set(sn).unwrap(); // Reinsert the batch *c_guard = Some(batch); - log::warn!( + tracing::warn!( "Zenoh message dropped because it can not be fragmented: {:?}", msg ); @@ -374,7 +374,7 @@ impl Backoff { } None => { self.retry_time = NanoSeconds::MAX; - log::warn!( + tracing::warn!( "Pipeline pull backoff overflow detected! Retrying in {}ns.", self.retry_time ); diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index bfbdd3af61..aede7ae1fb 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -342,7 +342,7 @@ impl TransportLinkMulticastUniversal { ) .await; if let Err(e) = res { - log::debug!("{}", e); + tracing::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); @@ -378,7 +378,7 @@ impl TransportLinkMulticastUniversal { .await; c_signal.trigger(); if let Err(e) = res { - log::debug!("{}", e); + tracing::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle zenoh_runtime::ZRuntime::Net.spawn(async move { c_transport.delete().await }); @@ -393,7 +393,7 @@ impl TransportLinkMulticastUniversal { } pub(super) async fn close(mut self) -> ZResult<()> { - log::trace!("{}: closing", self.link); + tracing::trace!("{}: closing", self.link); self.stop_rx(); if let Some(handle) = self.handle_rx.take() { // It is safe to unwrap the Arc since we have the ownership of the whole link diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index b9b594205f..033daa1791 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -180,7 +180,7 @@ impl TransportManager { } pub async fn close_multicast(&self) { - log::trace!("TransportManagerMulticast::clear())"); + tracing::trace!("TransportManagerMulticast::clear())"); zasynclock!(self.state.multicast.protocols).clear(); @@ -300,7 +300,7 @@ impl TransportManager { res.map(|_| ()).ok_or_else(|| { let e = zerror!("Can not delete the transport for locator: {}", locator); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } @@ -331,7 +331,7 @@ impl TransportManager { res.map(|_| ()).ok_or_else(|| { let e = zerror!("Can not delete the transport for locator: {}", locator); - log::trace!("{}", e); + tracing::trace!("{}", e); e.into() }) } diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 5cf714210f..4927c179d7 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -66,7 +66,7 @@ impl TransportMulticastInner { "Ingoring Join on {} of peer: {}. Inconsistent parameters.", peer.locator, peer.zid, ); - log::debug!("{}", e); + tracing::debug!("{}", e); bail!("{}", e); } @@ -80,7 +80,7 @@ impl TransportMulticastInner { batch_size: BatchSize, ) -> ZResult<()> { if zread!(self.peers).len() >= self.manager.config.multicast.max_sessions { - log::debug!( + tracing::debug!( "Ingoring Join on {} from peer: {}. Max sessions reached: {}.", locator, join.zid, @@ -90,7 +90,7 @@ impl TransportMulticastInner { } if join.version != self.manager.config.version { - log::debug!( + tracing::debug!( "Ingoring Join on {} from peer: {}. Unsupported version: {}. Expected: {}.", locator, join.zid, @@ -101,7 +101,7 @@ impl TransportMulticastInner { } if join.resolution != self.manager.config.resolution { - log::debug!( + tracing::debug!( "Ingoring Join on {} from peer: {}. Unsupported SN resolution: {:?}. Expected: {:?}.", locator, join.zid, @@ -112,7 +112,7 @@ impl TransportMulticastInner { } if join.batch_size != batch_size { - log::debug!( + tracing::debug!( "Ingoring Join on {} from peer: {}. Unsupported Batch Size: {:?}. Expected: {:?}.", locator, join.zid, @@ -123,7 +123,7 @@ impl TransportMulticastInner { } if !self.manager.config.multicast.is_qos && join.ext_qos.is_some() { - log::debug!( + tracing::debug!( "Ingoring Join on {} from peer: {}. QoS is not supported.", locator, join.zid, @@ -226,7 +226,7 @@ impl TransportMulticastInner { ) -> ZResult<()> { let precedes = guard.sn.precedes(sn)?; if !precedes { - log::debug!( + tracing::debug!( "Transport: {}. Frame with invalid SN dropped: {}. Expected: {}.", self.manager.config.zid, sn, @@ -259,7 +259,7 @@ impl TransportMulticastInner { .decode() .map_err(|_| zerror!("{}: decoding error", locator))?; - log::trace!("Received: {:?}", msg); + tracing::trace!("Received: {:?}", msg); #[cfg(feature = "stats")] { @@ -282,7 +282,7 @@ impl TransportMulticastInner { self.del_peer(&locator, reason)?; } _ => { - log::debug!( + tracing::debug!( "Transport: {}. Message handling not implemented: {:?}", self.manager.config.zid, msg diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index 2e7f54098d..155b6b5568 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -162,7 +162,7 @@ impl TransportMulticastInner { /* TERMINATION */ /*************************************/ pub(super) async fn delete(&self) -> ZResult<()> { - log::debug!("Closing multicast transport on {:?}", self.locator); + tracing::debug!("Closing multicast transport on {:?}", self.locator); // Notify the callback that we are going to close the transport let callback = zwrite!(self.callback).take(); @@ -192,7 +192,7 @@ impl TransportMulticastInner { } pub(crate) async fn close(&self, reason: u8) -> ZResult<()> { - log::trace!( + tracing::trace!( "Closing multicast transport of peer {}: {}", self.manager.config.zid, self.locator @@ -355,7 +355,7 @@ impl TransportMulticastInner { } let priority_rx = priority_rx.into_boxed_slice(); - log::debug!( + tracing::debug!( "New transport joined on {}: zid {}, whatami {}, resolution {:?}, locator {}, is_qos {}, is_shm {}, initial sn: {:?}", self.locator, peer.zid, @@ -416,7 +416,7 @@ impl TransportMulticastInner { pub(super) fn del_peer(&self, locator: &Locator, reason: u8) -> ZResult<()> { let mut guard = zwrite!(self.peers); if let Some(peer) = guard.remove(locator) { - log::debug!( + tracing::debug!( "Peer {}/{}/{} has left multicast {} with reason: {}", peer.zid, peer.whatami, diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index 74238c7e71..3b58277402 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -37,7 +37,7 @@ impl TransportMulticastInner { } } None => { - log::trace!( + tracing::trace!( "Message dropped because the transport has no links: {}", msg ); @@ -59,7 +59,7 @@ impl TransportMulticastInner { crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader) }; if let Err(e) = res { - log::trace!("Failed SHM conversion: {}", e); + tracing::trace!("Failed SHM conversion: {}", e); return false; } } diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index c1a1a8c16c..ce9229db4d 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -394,8 +394,8 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { self.link, ); match reason { - close::reason::MAX_LINKS => log::debug!("{}", e), - _ => log::error!("{}", e), + close::reason::MAX_LINKS => tracing::debug!("{}", e), + _ => tracing::error!("{}", e), } return Err((e.into(), None)); } @@ -405,7 +405,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { self.link, msg.body ); - log::error!("{}", e); + tracing::error!("{}", e); return Err((e.into(), Some(close::reason::INVALID))); } }; @@ -621,7 +621,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - match $res { Ok(output) => output, Err((e, reason)) => { - log::debug!("{}", e); + tracing::debug!("{}", e); let _ = link.close(reason).await; return Err(e); } @@ -731,7 +731,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - ) .await?; - log::debug!( + tracing::debug!( "New transport link accepted from {} to {}: {}.", osyn_out.other_zid, manager.config.zid, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 878b058f31..9a7c3d8f32 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -373,7 +373,7 @@ impl<'a> OpenFsm for &'a AuthPubKeyFsm<'a> { _input: Self::SendInitSynIn, ) -> Result { const S: &str = "PubKey extension - Send InitSyn."; - log::trace!("{S}"); + tracing::trace!("{S}"); let init_syn = InitSyn { alice_pubkey: zasyncread!(self.inner).pub_key.clone(), @@ -396,7 +396,7 @@ impl<'a> OpenFsm for &'a AuthPubKeyFsm<'a> { input: Self::RecvInitAckIn, ) -> Result { const S: &str = "PubKey extension - Recv InitAck."; - log::trace!("{S}"); + tracing::trace!("{S}"); let (state, mut ext) = input; @@ -442,7 +442,7 @@ impl<'a> OpenFsm for &'a AuthPubKeyFsm<'a> { state: Self::SendOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Send OpenSyn."; - log::trace!("{S}"); + tracing::trace!("{S}"); let open_syn = OpenSyn { nonce_encrypted_with_bob_pubkey: state.nonce.clone(), @@ -549,7 +549,7 @@ impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { input: Self::RecvInitSynIn, ) -> Result { const S: &str = "PubKey extension - Recv InitSyn."; - log::trace!("{S}"); + tracing::trace!("{S}"); let (state, mut ext) = input; @@ -587,7 +587,7 @@ impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { state: Self::SendInitAckIn, ) -> Result { const S: &str = "PubKey extension - Send InitAck."; - log::trace!("{S}"); + tracing::trace!("{S}"); let init_ack = InitAck { bob_pubkey: zasyncread!(self.inner).pub_key.clone(), @@ -611,7 +611,7 @@ impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { input: Self::RecvOpenSynIn, ) -> Result { const S: &str = "PubKey extension - Recv OpenSyn."; - log::trace!("{S}"); + tracing::trace!("{S}"); let (state, mut ext) = input; @@ -650,7 +650,7 @@ impl<'a> AcceptFsm for &'a AuthPubKeyFsm<'a> { _input: Self::SendOpenAckIn, ) -> Result { const S: &str = "PubKey extension - Send OpenAck."; - log::trace!("{S}"); + tracing::trace!("{S}"); Ok(Some(ZExtUnit::new())) } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index f66a8fd53d..23560e307e 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -96,19 +96,19 @@ impl AuthUsrPwd { } lookup.insert(user, password); } - log::debug!("{S} User-password dictionary has been configured."); + tracing::debug!("{S} User-password dictionary has been configured."); } let mut credentials: Option<(User, Password)> = None; if let Some(user) = config.user() { if let Some(password) = config.password() { - log::debug!("{S} User-password has been configured."); + tracing::debug!("{S} User-password has been configured."); credentials = Some((user.as_bytes().to_owned(), password.as_bytes().to_owned())); } } if !lookup.is_empty() || credentials.is_some() { - log::debug!("{S} User-password authentication is enabled."); + tracing::debug!("{S} User-password authentication is enabled."); Ok(Some(Self { lookup, credentials, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index f2d6fe4dd0..2aec0cf508 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -203,7 +203,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let codec = Zenoh080::new(); let mut reader = ext.value.reader(); let Ok(init_ack): Result = codec.read(&mut reader) else { - log::trace!("{} Decoding error.", S); + tracing::trace!("{} Decoding error.", S); state.is_shm = false; return Ok(0); }; @@ -219,7 +219,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { // Verify that Bob has correctly read Alice challenge if challenge != init_ack.alice_challenge { - log::trace!( + tracing::trace!( "{} Challenge mismatch: {} != {}.", S, init_ack.alice_challenge, @@ -233,7 +233,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_ack.bob_info) { Ok(buff) => buff, Err(e) => { - log::trace!("{} {}", S, e); + tracing::trace!("{} {}", S, e); state.is_shm = false; return Ok(0); } @@ -243,7 +243,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { Ok(bytes) => bytes, Err(_) => { - log::trace!("{} Failed to read remote Shm.", S); + tracing::trace!("{} Failed to read remote Shm.", S); state.is_shm = false; return Ok(0); } @@ -288,7 +288,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { }; if ext.value != 1 { - log::trace!("{} Invalid value.", S); + tracing::trace!("{} Invalid value.", S); state.is_shm = false; return Ok(()); } @@ -377,7 +377,7 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let codec = Zenoh080::new(); let mut reader = ext.value.reader(); let Ok(init_syn): Result = codec.read(&mut reader) else { - log::trace!("{} Decoding error.", S); + tracing::trace!("{} Decoding error.", S); state.is_shm = false; return Ok(0); }; @@ -386,7 +386,7 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_syn.alice_info) { Ok(buff) => buff, Err(e) => { - log::trace!("{} {}", S, e); + tracing::trace!("{} {}", S, e); state.is_shm = false; return Ok(0); } @@ -396,7 +396,7 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { Ok(bytes) => bytes, Err(_) => { - log::trace!("{} Failed to read remote Shm.", S); + tracing::trace!("{} Failed to read remote Shm.", S); state.is_shm = false; return Ok(0); } @@ -464,7 +464,7 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { // Verify that Alice has correctly read Bob challenge let bob_challnge = ext.value; if challenge != bob_challnge { - log::trace!( + tracing::trace!( "{} Challenge mismatch: {} != {}.", S, bob_challnge, diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index c3f1bfbb8a..bb5db2336e 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -229,8 +229,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { link, ); match reason { - close::reason::MAX_LINKS => log::debug!("{}", e), - _ => log::error!("{}", e), + close::reason::MAX_LINKS => tracing::debug!("{}", e), + _ => tracing::error!("{}", e), } return Err((e.into(), None)); } @@ -240,7 +240,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { link, msg.body ); - log::error!("{}", e); + tracing::error!("{}", e); return Err((e.into(), Some(close::reason::INVALID))); } }; @@ -260,7 +260,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { i_fsn_res, m_fsn_res ); - log::error!("{}", e); + tracing::error!("{}", e); return Err((e.into(), Some(close::reason::INVALID))); } res.set(Field::FrameSN, i_fsn_res); @@ -276,7 +276,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { i_rid_res, m_rid_res ); - log::error!("{}", e); + tracing::error!("{}", e); return Err((e.into(), Some(close::reason::INVALID))); } res.set(Field::RequestID, i_rid_res); @@ -447,8 +447,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { link, ); match reason { - close::reason::MAX_LINKS => log::debug!("{}", e), - _ => log::error!("{}", e), + close::reason::MAX_LINKS => tracing::debug!("{}", e), + _ => tracing::error!("{}", e), } return Err((e.into(), None)); } @@ -458,7 +458,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { link, msg.body ); - log::error!("{}", e); + tracing::error!("{}", e); return Err((e.into(), Some(close::reason::INVALID))); } }; @@ -644,7 +644,7 @@ pub(crate) async fn open_link( ) .await?; - log::debug!( + tracing::debug!( "New transport link opened from {} to {}: {}.", manager.config.zid, iack_out.other_zid, diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index bd756d6396..54543b8e6e 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -147,7 +147,7 @@ impl TransportLinkUnicastTx { pub(crate) async fn send_batch(&mut self, batch: &mut WBatch) -> ZResult<()> { const ERR: &str = "Write error on link: "; - // log::trace!("WBatch: {:?}", batch); + // tracing::trace!("WBatch: {:?}", batch); let res = batch .finalize(self.buffer.as_mut()) @@ -162,7 +162,7 @@ impl TransportLinkUnicastTx { .as_slice(), }; - // log::trace!("WBytes: {:02x?}", bytes); + // tracing::trace!("WBytes: {:02x?}", bytes); // Send the message on the link self.inner.link.write_all(bytes).await?; @@ -230,7 +230,7 @@ impl TransportLinkUnicastRx { self.link.read(into.as_mut_slice()).await? }; - // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); + // tracing::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); let buffer = ZSlice::make(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; @@ -239,7 +239,7 @@ impl TransportLinkUnicastRx { .initialize(buff) .map_err(|e| zerror!("{ERR}{self}. {e}."))?; - // log::trace!("RBatch: {:?}", batch); + // tracing::trace!("RBatch: {:?}", batch); Ok(batch) } diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 43e4516aa5..7eb39092eb 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -62,7 +62,7 @@ pub(crate) async fn send_with_link( } link.write_all(&buffer).await?; } - log::trace!("Sent: {:?}", msg); + tracing::trace!("Sent: {:?}", msg); #[cfg(feature = "stats")] { @@ -122,13 +122,13 @@ impl TransportUnicastLowlatency { c_transport.stats.clone(), ) .await; - log::debug!( + tracing::debug!( "[{}] Keepalive task finished with result {:?}", c_transport.manager.config.zid, res ); if res.is_err() { - log::debug!( + tracing::debug!( "[{}] finalizing transport with peer: {}", c_transport.manager.config.zid, c_transport.config.zid @@ -190,13 +190,13 @@ impl TransportUnicastLowlatency { } }; - log::debug!( + tracing::debug!( "[{}] Rx task finished with result {:?}", c_transport.manager.config.zid, res ); if res.is_err() { - log::debug!( + tracing::debug!( "[{}] finalizing transport with peer: {}", c_transport.manager.config.zid, c_transport.config.zid diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index 87c03dde56..4be94cc1a0 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -41,7 +41,7 @@ impl TransportUnicastLowlatency { } callback.handle_message(msg) } else { - log::debug!( + tracing::debug!( "Transport: {}. No callback available, dropping message: {}", self.config.zid, msg @@ -62,7 +62,7 @@ impl TransportUnicastLowlatency { .read(&mut reader) .map_err(|_| zerror!("{}: decoding error", link))?; - log::trace!("Received: {:?}", msg); + tracing::trace!("Received: {:?}", msg); #[cfg(feature = "stats")] { diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 283c143499..dcc9fc8476 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -87,7 +87,7 @@ impl TransportUnicastLowlatency { /* TERMINATION */ /*************************************/ pub(super) async fn finalize(&self, reason: u8) -> ZResult<()> { - log::debug!( + tracing::debug!( "[{}] Finalizing transport with peer: {}", self.manager.config.zid, self.config.zid @@ -107,7 +107,7 @@ impl TransportUnicastLowlatency { } pub(super) async fn delete(&self) -> ZResult<()> { - log::debug!( + tracing::debug!( "[{}] Deleting transport with peer: {}", self.manager.config.zid, self.config.zid @@ -150,7 +150,7 @@ impl TransportUnicastLowlatency { let mut a_guard = zasynclock!(self.alive); if *a_guard { let e = zerror!("Transport already synched with peer: {}", self.config.zid); - log::trace!("{}", e); + tracing::trace!("{}", e); return Err(e.into()); } @@ -229,7 +229,7 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { other_initial_sn: TransportSn, other_lease: Duration, ) -> AddLinkResult { - log::trace!("Adding link: {}", link); + tracing::trace!("Adding link: {}", link); let _ = self.sync(other_initial_sn).await; @@ -263,12 +263,12 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { /* TERMINATION */ /*************************************/ async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { - log::trace!("Closing link {} with peer: {}", link, self.config.zid); + tracing::trace!("Closing link {} with peer: {}", link, self.config.zid); self.finalize(reason).await } async fn close(&self, reason: u8) -> ZResult<()> { - log::trace!("Closing transport with peer: {}", self.config.zid); + tracing::trace!("Closing transport with peer: {}", self.config.zid); self.finalize(reason).await } } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 8a63f4f630..2cce7299b0 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -294,7 +294,7 @@ impl TransportManager { } pub async fn close_unicast(&self) { - log::trace!("TransportManagerUnicast::clear())"); + tracing::trace!("TransportManagerUnicast::clear())"); let mut pl_guard = zasynclock!(self.state.unicast.protocols) .drain() @@ -435,7 +435,7 @@ impl TransportManager { config, existing_config ); - log::trace!("{}", e); + tracing::trace!("{}", e); return Err(InitTransportError::Link(( e.into(), link.fail(), @@ -524,7 +524,7 @@ impl TransportManager { self.config.unicast.max_sessions, config.zid ); - log::trace!("{e}"); + tracing::trace!("{e}"); return Err(InitTransportError::Link(( e.into(), link.fail(), @@ -537,10 +537,10 @@ impl TransportManager { // Select and create transport implementation depending on the cfg and enabled features let t = if config.is_lowlatency { - log::debug!("Will use LowLatency transport!"); + tracing::debug!("Will use LowLatency transport!"); TransportUnicastLowlatency::make(self.clone(), config.clone()) } else { - log::debug!("Will use Universal transport!"); + tracing::debug!("Will use Universal transport!"); link_error!( TransportUnicastUniversal::make(self.clone(), config.clone()), close::reason::INVALID @@ -589,7 +589,7 @@ impl TransportManager { zcondfeat!( "shared-memory", { - log::debug!( + tracing::debug!( "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", self.config.zid, config.zid, @@ -603,7 +603,7 @@ impl TransportManager { ); }, { - log::debug!( + tracing::debug!( "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, multilink: {}, lowlatency: {}", self.config.zid, config.zid, @@ -719,7 +719,7 @@ impl TransportManager { .remove(peer) .ok_or_else(|| { let e = zerror!("Can not delete the transport of peer: {}", peer); - log::trace!("{}", e); + tracing::trace!("{}", e); e })?; Ok(()) @@ -733,13 +733,13 @@ impl TransportManager { // are too small for the scenario zenoh is deployed in; // - there is a tentative of DoS attack. // In both cases, let's close the link straight away with no additional notification - log::trace!("Closing link for preventing potential DoS: {}", link); + tracing::trace!("Closing link for preventing potential DoS: {}", link); let _ = link.close().await; return; } // A new link is available - log::trace!("Accepting link... {}", link); + tracing::trace!("Accepting link... {}", link); self.state.unicast.incoming.fetch_add(1, SeqCst); // Spawn a task to accept the link @@ -752,7 +752,7 @@ impl TransportManager { ) .await { - log::debug!("{}", e); + tracing::debug!("{}", e); } incoming_counter.fetch_sub(1, SeqCst); }); diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index fe4e8c8691..9a85ee9a46 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -97,7 +97,7 @@ impl TransportLinkUnicastUniversal { .await; if let Err(e) = res { - log::debug!("{}", e); + tracing::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle // TODO(yuyuan): do more study to check which ZRuntime should be used or refine the @@ -125,7 +125,7 @@ impl TransportLinkUnicastUniversal { // TODO(yuyuan): improve this callback if let Err(e) = res { - log::debug!("{}", e); + tracing::debug!("{}", e); // Spawn a task to avoid a deadlock waiting for this same task // to finish in the close() joining its handle @@ -146,7 +146,7 @@ impl TransportLinkUnicastUniversal { } pub(super) async fn close(self) -> ZResult<()> { - log::trace!("{}: closing", self.link); + tracing::trace!("{}: closing", self.link); self.tracker.close(); self.token.cancel(); diff --git a/io/zenoh-transport/src/unicast/universal/reliability.rs b/io/zenoh-transport/src/unicast/universal/reliability.rs index 2f5f6db5c3..b3637bee27 100644 --- a/io/zenoh-transport/src/unicast/universal/reliability.rs +++ b/io/zenoh-transport/src/unicast/universal/reliability.rs @@ -120,7 +120,7 @@ impl ReliabilityQueue { self.sn.get(), self.capacity() ); - log::trace!("{}", e); + tracing::trace!("{}", e); return zerror!(ZErrorKind::Other { descr: e }); } @@ -152,7 +152,7 @@ impl ReliabilityQueue { self.sn.get(), self.capacity() ); - log::trace!("{}", e); + tracing::trace!("{}", e); return zerror!(ZErrorKind::Other { descr: e }); } @@ -222,7 +222,7 @@ impl ReliabilityQueue { self.sn.get(), self.capacity() ); - log::trace!("{}", e); + tracing::trace!("{}", e); return zerror!(ZErrorKind::Other { descr: e }); } diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 9dfe075956..027a11c796 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -100,7 +100,7 @@ impl TransportUnicastUniversal { self.trigger_callback(callback.as_ref(), msg)?; } } else { - log::debug!( + tracing::debug!( "Transport: {}. No callback available, dropping messages: {:?}", self.config.zid, payload @@ -152,7 +152,7 @@ impl TransportUnicastUniversal { if let Some(callback) = callback.as_ref() { return self.trigger_callback(callback.as_ref(), msg); } else { - log::debug!( + tracing::debug!( "Transport: {}. No callback available, dropping messages: {:?}", self.config.zid, msg @@ -170,7 +170,7 @@ impl TransportUnicastUniversal { ) -> ZResult<()> { let precedes = guard.sn.roll(sn)?; if !precedes { - log::debug!( + tracing::debug!( "Transport: {}. Frame with invalid SN dropped: {}. Expected: {}.", self.config.zid, sn, @@ -193,7 +193,7 @@ impl TransportUnicastUniversal { .decode() .map_err(|_| zerror!("{}: decoding error", link))?; - log::trace!("Received: {:?}", msg); + tracing::trace!("Received: {:?}", msg); #[cfg(feature = "stats")] { @@ -208,7 +208,7 @@ impl TransportUnicastUniversal { } TransportBody::KeepAlive(KeepAlive { .. }) => {} _ => { - log::debug!( + tracing::debug!( "Transport: {}. Message handling not implemented: {:?}", self.config.zid, msg diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index 30e1bd2ecd..58acd5c4b2 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -128,7 +128,7 @@ impl TransportUnicastUniversal { /* TERMINATION */ /*************************************/ pub(super) async fn delete(&self) -> ZResult<()> { - log::debug!( + tracing::debug!( "[{}] Closing transport with peer: {}", self.manager.config.zid, self.config.zid @@ -217,7 +217,7 @@ impl TransportUnicastUniversal { let mut a_guard = zasynclock!(self.alive); if *a_guard { let e = zerror!("Transport already synched with peer: {}", self.config.zid); - log::trace!("{}", e); + tracing::trace!("{}", e); return Err(e.into()); } @@ -357,7 +357,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { /* TERMINATION */ /*************************************/ async fn close_link(&self, link: Link, reason: u8) -> ZResult<()> { - log::trace!("Closing link {} with peer: {}", link, self.config.zid); + tracing::trace!("Closing link {} with peer: {}", link, self.config.zid); let transport_link_pipeline = zlinkget!(zread!(self.links), link) .ok_or_else(|| zerror!("Cannot close Link {:?}: not found", link))? @@ -378,7 +378,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { } async fn close(&self, reason: u8) -> ZResult<()> { - log::trace!("Closing transport with peer: {}", self.config.zid); + tracing::trace!("Closing transport with peer: {}", self.config.zid); let mut pipelines = zread!(self.links) .iter() diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index 67c783c530..ffc162c0b4 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -24,7 +24,7 @@ impl TransportUnicastUniversal { // block for fairly long time let pl = $pipeline.clone(); drop($guard); - log::trace!("Scheduled: {:?}", $msg); + tracing::trace!("Scheduled: {:?}", $msg); return pl.push_network_message($msg); }; } @@ -47,7 +47,7 @@ impl TransportUnicastUniversal { } // No Link found - log::trace!( + tracing::trace!( "Message dropped because the transport has no links: {}", msg ); @@ -67,7 +67,7 @@ impl TransportUnicastUniversal { crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) }; if let Err(e) = res { - log::trace!("Failed SHM conversion: {}", e); + tracing::trace!("Failed SHM conversion: {}", e); return false; } } diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index 13a605a588..ca24aa8f60 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -99,7 +99,7 @@ async fn run(endpoints: &[EndPoint]) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_tcp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7000).parse().unwrap(), @@ -112,7 +112,7 @@ async fn endpoint_tcp() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_udp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("udp/127.0.0.1:{}", 7010).parse().unwrap(), @@ -125,7 +125,7 @@ async fn endpoint_udp() { #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Remove the files if they still exists let f1 = "zenoh-test-unix-socket-0.sock"; let f2 = "zenoh-test-unix-socket-1.sock"; @@ -146,7 +146,7 @@ async fn endpoint_unix() { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_ws() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("ws/127.0.0.1:{}", 7020).parse().unwrap(), @@ -159,7 +159,7 @@ async fn endpoint_ws() { #[cfg(feature = "transport_unixpipe")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_unixpipe() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ "unixpipe/endpoint_unixpipe".parse().unwrap(), @@ -173,7 +173,7 @@ async fn endpoint_unixpipe() { #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_tcp_udp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ format!("tcp/127.0.0.1:{}", 7030).parse().unwrap(), @@ -192,7 +192,7 @@ async fn endpoint_tcp_udp() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_tcp_udp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-2.sock"; let _ = std::fs::remove_file(f1); @@ -216,7 +216,7 @@ async fn endpoint_tcp_udp_unix() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_tcp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-3.sock"; let _ = std::fs::remove_file(f1); @@ -238,7 +238,7 @@ async fn endpoint_tcp_unix() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_udp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Remove the file if it still exists let f1 = "zenoh-test-unix-socket-4.sock"; let _ = std::fs::remove_file(f1); // Define the locators @@ -257,7 +257,7 @@ async fn endpoint_udp_unix() { async fn endpoint_tls() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -336,7 +336,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM async fn endpoint_quic() { use zenoh_link::quic::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -412,7 +412,7 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM #[cfg(all(feature = "transport_vsock", target_os = "linux"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn endpoint_vsock() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ "vsock/-1:1234".parse().unwrap(), diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 5d0c9ef9ae..e2ed01469c 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -331,7 +331,7 @@ mod tests { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_multicast_compression_udp_only() { - env_logger::init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 96525c263c..9f0ecdff8d 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -327,7 +327,7 @@ mod tests { #[cfg(all(feature = "transport_compression", feature = "transport_udp"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_multicast_udp_only() { - env_logger::init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index ccc74e679e..399a9dea45 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -117,7 +117,7 @@ async fn run(endpoints: &[EndPoint]) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_whitelist_tcp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ @@ -132,7 +132,7 @@ async fn transport_whitelist_tcp() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_whitelist_unixpipe() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ @@ -146,7 +146,7 @@ async fn transport_whitelist_unixpipe() { #[cfg(all(feature = "transport_vsock", target_os = "linux"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_whitelist_vsock() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index d94ade1ce1..8acb604528 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -637,7 +637,7 @@ async fn run_with_lowlatency_transport(endpoint: &EndPoint) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_tcp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8000).parse().unwrap(); run_with_universal_transport(&endpoint).await; } @@ -645,7 +645,7 @@ async fn authenticator_tcp() { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_tcp_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 8100).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } @@ -653,7 +653,7 @@ async fn authenticator_tcp_with_lowlatency_transport() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_udp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8010).parse().unwrap(); run_with_universal_transport(&endpoint).await; } @@ -661,7 +661,7 @@ async fn authenticator_udp() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_udp_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 8110).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } @@ -670,7 +670,7 @@ async fn authenticator_udp_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn authenticator_unixpipe() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_test".parse().unwrap(); run_with_universal_transport(&endpoint).await; } @@ -679,7 +679,7 @@ async fn authenticator_unixpipe() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn authenticator_unixpipe_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/authenticator_unixpipe_with_lowlatency_transport" .parse() .unwrap(); @@ -690,7 +690,7 @@ async fn authenticator_unixpipe_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn authenticator_ws() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8020).parse().unwrap(); run_with_universal_transport(&endpoint).await; } @@ -699,7 +699,7 @@ async fn authenticator_ws() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn authenticator_ws_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 8120).parse().unwrap(); run_with_lowlatency_transport(&endpoint).await; } @@ -707,7 +707,7 @@ async fn authenticator_ws_with_lowlatency_transport() { #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-10.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -721,7 +721,7 @@ async fn authenticator_unix() { async fn authenticator_tls() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -821,7 +821,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== async fn authenticator_quic() { use zenoh_link::quic::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 7707da57de..e4fbae4862 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -422,7 +422,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_compression_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ @@ -447,7 +447,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_compression_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 19100).parse().unwrap()]; @@ -469,7 +469,7 @@ mod tests { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_compression_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ @@ -494,7 +494,7 @@ mod tests { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_compression_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 19110).parse().unwrap()]; diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index ae17ae3f99..1cbaab75c7 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -348,7 +348,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec = vec![ format!("tcp/127.0.0.1:{}", 9000).parse().unwrap(), @@ -378,7 +378,7 @@ async fn transport_tcp_concurrent() { #[tokio::test] #[ignore] async fn transport_ws_concurrent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint01: Vec = vec![ format!("ws/127.0.0.1:{}", 9020).parse().unwrap(), @@ -408,7 +408,7 @@ async fn transport_ws_concurrent() { #[tokio::test] #[ignore] async fn transport_unixpipe_concurrent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint01: Vec = vec![ "unixpipe/transport_unixpipe_concurrent".parse().unwrap(), diff --git a/io/zenoh-transport/tests/unicast_defragmentation.rs b/io/zenoh-transport/tests/unicast_defragmentation.rs index 4b09bac0f4..9a3b656cdc 100644 --- a/io/zenoh-transport/tests/unicast_defragmentation.rs +++ b/io/zenoh-transport/tests/unicast_defragmentation.rs @@ -131,7 +131,7 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_defragmentation_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 11000).parse().unwrap(); @@ -164,7 +164,7 @@ async fn transport_unicast_defragmentation_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_unicast_defragmentation_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 11010).parse().unwrap(); @@ -197,7 +197,7 @@ async fn transport_unicast_defragmentation_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_unicast_defragmentation_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoint: EndPoint = "unixpipe/transport_unicast_defragmentation_unixpipe_only" diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 6d9f889d8c..c076cf5fa7 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -414,7 +414,7 @@ async fn lowlatency_transport_intermittent(endpoint: &EndPoint) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_tcp_intermittent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12000).parse().unwrap(); universal_transport_intermittent(&endpoint).await; } @@ -422,7 +422,7 @@ async fn transport_tcp_intermittent() { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_tcp_intermittent_for_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 12100).parse().unwrap(); lowlatency_transport_intermittent(&endpoint).await; } @@ -431,7 +431,7 @@ async fn transport_tcp_intermittent_for_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_ws_intermittent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12010).parse().unwrap(); universal_transport_intermittent(&endpoint).await; } @@ -440,7 +440,7 @@ async fn transport_ws_intermittent() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_ws_intermittent_for_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 12110).parse().unwrap(); lowlatency_transport_intermittent(&endpoint).await; } @@ -449,7 +449,7 @@ async fn transport_ws_intermittent_for_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_unixpipe_intermittent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent".parse().unwrap(); universal_transport_intermittent(&endpoint).await; } @@ -458,7 +458,7 @@ async fn transport_unixpipe_intermittent() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_unixpipe_intermittent_for_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_intermittent_for_lowlatency_transport" .parse() .unwrap(); @@ -468,7 +468,7 @@ async fn transport_unixpipe_intermittent_for_lowlatency_transport() { #[cfg(all(feature = "transport_vsock", target_os = "linux"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_vsock_intermittent() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); universal_transport_intermittent(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index 5e4499be2a..c2d188e16a 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -477,7 +477,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn multilink_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 18000).parse().unwrap(); multilink_transport(&endpoint).await; @@ -486,7 +486,7 @@ mod tests { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn multilink_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 18010).parse().unwrap(); multilink_transport(&endpoint).await; @@ -496,7 +496,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn multilink_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 18020).parse().unwrap(); multilink_transport(&endpoint).await; @@ -506,7 +506,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn multilink_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/multilink_unixpipe_only".parse().unwrap(); multilink_transport(&endpoint).await; @@ -516,7 +516,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn multilink_unix_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); @@ -531,7 +531,7 @@ mod tests { async fn multilink_tls_only() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real @@ -726,7 +726,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== #[cfg(all(feature = "transport_vsock", target_os = "linux"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn multilink_vsock_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); multilink_transport(&endpoint).await; diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index 56e4a1b140..6eec9a4cb1 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -475,7 +475,7 @@ async fn openclose_lowlatency_transport(endpoint: &EndPoint) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn openclose_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); openclose_universal_transport(&endpoint).await; } @@ -483,7 +483,7 @@ async fn openclose_tcp_only() { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn openclose_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } @@ -491,7 +491,7 @@ async fn openclose_tcp_only_with_lowlatency_transport() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn openclose_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); openclose_universal_transport(&endpoint).await; } @@ -499,7 +499,7 @@ async fn openclose_udp_only() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn openclose_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } @@ -508,7 +508,7 @@ async fn openclose_udp_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn openclose_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); openclose_universal_transport(&endpoint).await; } @@ -517,7 +517,7 @@ async fn openclose_ws_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn openclose_ws_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } @@ -526,7 +526,7 @@ async fn openclose_ws_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn openclose_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only".parse().unwrap(); openclose_universal_transport(&endpoint).await; } @@ -535,7 +535,7 @@ async fn openclose_unixpipe_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn openclose_unixpipe_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/openclose_unixpipe_only_with_lowlatency_transport" .parse() .unwrap(); @@ -546,7 +546,7 @@ async fn openclose_unixpipe_only_with_lowlatency_transport() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn openclose_unix_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-9.sock"; let _ = std::fs::remove_file(f1); let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); @@ -560,7 +560,7 @@ async fn openclose_unix_only() { async fn openclose_tls_only() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // NOTE: this an auto-generated pair of certificate and key. // The target domain is localhost, so it has no real // mapping to any existing domain. The certificate and key @@ -758,7 +758,7 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== async fn openclose_tcp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let listen_endpoint: EndPoint = format!("tcp/{}:{}", addrs[0], 13001).parse().unwrap(); @@ -777,7 +777,7 @@ async fn openclose_tcp_only_connect_with_interface_restriction() { async fn openclose_tcp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let listen_endpoint: EndPoint = format!("tcp/{}:{}#iface=lo", addrs[0], 13002) .parse() @@ -796,7 +796,7 @@ async fn openclose_tcp_only_listen_with_interface_restriction() { async fn openclose_udp_only_connect_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let listen_endpoint: EndPoint = format!("udp/{}:{}", addrs[0], 13003).parse().unwrap(); @@ -815,7 +815,7 @@ async fn openclose_udp_only_connect_with_interface_restriction() { async fn openclose_udp_only_listen_with_interface_restriction() { let addrs = get_ipv4_ipaddrs(None); - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let listen_endpoint: EndPoint = format!("udp/{}:{}#iface=lo", addrs[0], 13004) .parse() .unwrap(); @@ -829,7 +829,7 @@ async fn openclose_udp_only_listen_with_interface_restriction() { #[cfg(all(feature = "transport_vsock", target_os = "linux"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn openclose_vsock() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); openclose_lowlatency_transport(&endpoint).await; } diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index 9c851a0510..7d2e295a5a 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -332,7 +332,7 @@ async fn run(endpoints: &[EndPoint]) { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn priorities_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 10000).parse().unwrap()]; // Run @@ -343,7 +343,7 @@ async fn priorities_tcp_only() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn conduits_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec!["unixpipe/conduits_unixpipe_only" .to_string() @@ -356,7 +356,7 @@ async fn conduits_unixpipe_only() { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn priorities_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 10010).parse().unwrap()]; // Run diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 6796f803ca..e7a4191cc2 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -378,7 +378,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_tcp_shm() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); run(&endpoint, false).await; } @@ -386,7 +386,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_tcp_shm_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14001).parse().unwrap(); run(&endpoint, true).await; } @@ -394,7 +394,7 @@ mod tests { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_ws_shm() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14010).parse().unwrap(); run(&endpoint, false).await; } @@ -402,7 +402,7 @@ mod tests { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_ws_shm_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14011).parse().unwrap(); run(&endpoint, true).await; } @@ -410,7 +410,7 @@ mod tests { #[cfg(feature = "transport_unixpipe")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unixpipe_shm() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm".parse().unwrap(); run(&endpoint, false).await; } @@ -418,7 +418,7 @@ mod tests { #[cfg(feature = "transport_unixpipe")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unixpipe_shm_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint: EndPoint = "unixpipe/transport_unixpipe_shm_with_lowlatency_transport" .parse() .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 83c3d98dce..277c9efda3 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -299,7 +299,7 @@ mod tests { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_tcp_simultaneous() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint01: Vec = vec![ format!("tcp/127.0.0.1:{}", 15000).parse().unwrap(), format!("tcp/127.0.0.1:{}", 15001).parse().unwrap(), @@ -320,7 +320,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_unixpipe_simultaneous() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint01: Vec = vec![ "unixpipe/transport_unixpipe_simultaneous".parse().unwrap(), "unixpipe/transport_unixpipe_simultaneous2".parse().unwrap(), @@ -341,7 +341,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] async fn transport_ws_simultaneous() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let endpoint01: Vec = vec![ format!("ws/127.0.0.1:{}", 15020).parse().unwrap(), diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 38534a1a17..63ece1eab9 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -594,7 +594,7 @@ async fn run_with_lowlatency_transport( #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tcp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ @@ -619,7 +619,7 @@ async fn transport_unicast_tcp_only() { #[cfg(feature = "transport_tcp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("tcp/127.0.0.1:{}", 16100).parse().unwrap()]; @@ -641,7 +641,7 @@ async fn transport_unicast_tcp_only_with_lowlatency_transport() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_udp_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ @@ -666,7 +666,7 @@ async fn transport_unicast_udp_only() { #[cfg(feature = "transport_udp")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![format!("udp/127.0.0.1:{}", 16110).parse().unwrap()]; @@ -688,7 +688,7 @@ async fn transport_unicast_udp_only_with_lowlatency_transport() { #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_unix_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-5.sock"; let _ = std::fs::remove_file(f1); @@ -714,7 +714,7 @@ async fn transport_unicast_unix_only() { #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_unix_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-5-lowlatency.sock"; let _ = std::fs::remove_file(f1); @@ -740,7 +740,7 @@ async fn transport_unicast_unix_only_with_lowlatency_transport() { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_ws_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![ @@ -773,7 +773,7 @@ async fn transport_unicast_ws_only() { #[cfg(feature = "transport_ws")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_ws_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locators let endpoints: Vec = vec![format!("ws/127.0.0.1:{}", 16120).parse().unwrap()]; @@ -803,7 +803,7 @@ async fn transport_unicast_ws_only_with_lowlatency_transport() { #[cfg(feature = "transport_unixpipe")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_unixpipe_only() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ @@ -828,7 +828,7 @@ async fn transport_unicast_unixpipe_only() { #[cfg(feature = "transport_unixpipe")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ @@ -854,7 +854,7 @@ async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { #[cfg(all(feature = "transport_tcp", feature = "transport_udp"))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tcp_udp() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let endpoints: Vec = vec![ @@ -885,7 +885,7 @@ async fn transport_unicast_tcp_udp() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tcp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-6.sock"; let _ = std::fs::remove_file(f1); @@ -919,7 +919,7 @@ async fn transport_unicast_tcp_unix() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_udp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-7.sock"; let _ = std::fs::remove_file(f1); @@ -954,7 +954,7 @@ async fn transport_unicast_udp_unix() { ))] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tcp_udp_unix() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let f1 = "zenoh-test-unix-socket-8.sock"; let _ = std::fs::remove_file(f1); @@ -988,7 +988,7 @@ async fn transport_unicast_tcp_udp_unix() { async fn transport_unicast_tls_only_server() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); @@ -1034,7 +1034,7 @@ async fn transport_unicast_tls_only_server() { async fn transport_unicast_quic_only_server() { use zenoh_link::quic::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint @@ -1079,7 +1079,7 @@ async fn transport_unicast_quic_only_server() { async fn transport_unicast_tls_only_mutual_success() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let client_auth = "true"; @@ -1151,7 +1151,7 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); // Define the locator let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); @@ -1219,7 +1219,7 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { use zenoh_link::tls::config::*; - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let client_auth = "true"; diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index 445483c9aa..eab4e8edb3 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -33,7 +33,7 @@ async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } futures = { workspace = true } git-version = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde_json = { workspace = true } zenoh = { workspace = true } zenoh-core = { workspace = true } diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index 441c796507..8e6814590f 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -36,13 +36,12 @@ crate-type = ["cdylib"] [dependencies] async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } -env_logger = { workspace = true } +zenoh-util = {workspace = true } futures = { workspace = true } git-version = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-core = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } -zenoh-util = { workspace = true } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index c2f083827d..777dbb74d4 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -14,13 +14,13 @@ #![recursion_limit = "256"] use futures::select; -use log::{debug, info}; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, Mutex, }; +use tracing::{debug, info}; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; use zenoh::runtime::Runtime; @@ -109,7 +109,7 @@ impl RunningPluginTrait for RunningPlugin { guard.flag.store(false, Relaxed); guard.flag = Arc::new(AtomicBool::new(true)); match KeyExpr::try_from(selector.clone()) { - Err(e) => log::error!("{}", e), + Err(e) => tracing::error!("{}", e), Ok(selector) => { async_std::task::spawn(run( guard.runtime.clone(), @@ -140,7 +140,7 @@ impl Drop for RunningPlugin { } async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { - env_logger::init(); + zenoh_util::init_log_from_env(); // create a zenoh Session that shares the same Runtime than zenohd let session = zenoh::init(runtime).res().await.unwrap(); diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 8459bb5172..19fa9eafdc 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -36,13 +36,13 @@ anyhow = { workspace = true, features = ["default"] } async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } -env_logger = { workspace = true } +zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } http-types = { workspace = true } lazy_static = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} schemars = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } @@ -50,7 +50,6 @@ tide = { workspace = true } zenoh = { workspace = true, features = ["unstable"] } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } -zenoh-util = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index 0c6eb4357b..324a900dda 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -33,7 +33,7 @@ if(typeof(EventSource) !== "undefined") { #[async_std::main] async fn main() { // initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 6f4e80f4eb..ca4cf6c174 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -227,8 +227,8 @@ impl Plugin for RestPlugin { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - let _ = env_logger::try_init(); - log::debug!("REST plugin {}", LONG_VERSION.as_str()); + zenoh_util::init_log_from_env(); + tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); let plugin_conf = runtime_conf @@ -299,7 +299,7 @@ fn with_extended_string R>( } async fn query(mut req: Request<(Arc, String)>) -> tide::Result { - log::trace!("Incoming GET request: {:?}", req); + tracing::trace!("Incoming GET request: {:?}", req); let first_accept = match req.header("accept") { Some(accept) => accept[0] @@ -327,7 +327,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result {} Ok(Err(e)) => { - log::debug!( + tracing::debug!( "SSE error ({})! Unsubscribe and terminate (task {})", e, async_std::task::current().id() ); if let Err(e) = sub.undeclare().res().await { - log::error!("Error undeclaring subscriber: {}", e); + tracing::error!("Error undeclaring subscriber: {}", e); } break; } Err(_) => { - log::debug!( + tracing::debug!( "SSE timeout! Unsubscribe and terminate (task {})", async_std::task::current().id() ); if let Err(e) = sub.undeclare().res().await { - log::error!("Error undeclaring subscriber: {}", e); + tracing::error!("Error undeclaring subscriber: {}", e); } break; } @@ -428,7 +428,7 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result { - log::trace!("Incoming PUT request: {:?}", req); + tracing::trace!("Incoming PUT request: {:?}", req); match req.body_bytes().await { Ok(bytes) => { let key_expr = match path_to_key_expr(req.url().path(), &req.state().1) { @@ -476,7 +476,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let zid = runtime.zid().to_string(); let session = zenoh::init(runtime).res().await.unwrap(); @@ -507,7 +507,7 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { .delete(write); if let Err(e) = app.listen(conf.http_port).await { - log::error!("Unable to start http server for REST: {:?}", e); + tracing::error!("Unable to start http server for REST: {:?}", e); return Err(e.into()); } Ok(()) diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 65b15686f7..35a5232cf0 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -37,12 +37,12 @@ async-trait = { workspace = true } crc = { workspace = true } const_format = { workspace = true } derive-new = { workspace = true } -env_logger = { workspace = true } +zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } libloading = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } urlencoding = { workspace = true } @@ -52,7 +52,6 @@ zenoh-core = { workspace = true } zenoh-keyexpr = { workspace = true } zenoh-plugin-trait = { workspace = true } zenoh-result = { workspace = true } -zenoh-util = { workspace = true } zenoh_backend_traits = { workspace = true } [build-dependencies] diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index aa7260e868..e39929ecce 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -35,7 +35,7 @@ pub(crate) async fn create_and_start_storage( out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { - log::trace!("Create storage '{}'", &admin_key); + tracing::trace!("Create storage '{}'", &admin_key); let capability = backend.get_capability(); let storage = backend.create_storage(config.clone()).await?; let store_intercept = StoreIntercept { diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 0db30bbd6a..9f11141fea 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -66,8 +66,8 @@ impl Plugin for StoragesPlugin { type Instance = zenoh::plugins::RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - std::mem::drop(env_logger::try_init()); - log::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); + zenoh_util::init_log_from_env(); + tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; Ok(Box::new(StorageRuntime::from(StorageRuntimeInner::new( @@ -99,7 +99,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let PluginConfig { name, backend_search_dirs, @@ -135,7 +135,7 @@ impl StorageRuntimeInner { }) .map_or_else( |e| { - log::error!( + tracing::error!( "Cannot spawn static volume '{}': {}", MEMORY_BACKEND_NAME, e @@ -145,13 +145,13 @@ impl StorageRuntimeInner { ); for volume in &volumes { new_self.spawn_volume(volume).map_or_else( - |e| log::error!("Cannot spawn volume '{}': {}", volume.name(), e), + |e| tracing::error!("Cannot spawn volume '{}': {}", volume.name(), e), |_| (), ); } for storage in &storages { new_self.spawn_storage(storage).map_or_else( - |e| log::error!("Cannot spawn storage '{}': {}", storage.name(), e), + |e| tracing::error!("Cannot spawn storage '{}': {}", storage.name(), e), |_| (), ); } @@ -172,7 +172,7 @@ impl StorageRuntimeInner { } fn kill_volume>(&mut self, name: T) -> ZResult<()> { let name = name.as_ref(); - log::info!("Killing volume '{}'", name); + tracing::info!("Killing volume '{}'", name); if let Some(storages) = self.storages.remove(name) { async_std::task::block_on(futures::future::join_all( storages @@ -189,7 +189,7 @@ impl StorageRuntimeInner { fn spawn_volume(&mut self, config: &VolumeConfig) -> ZResult<()> { let volume_id = config.name(); let backend_name = config.backend(); - log::info!( + tracing::info!( "Spawning volume '{}' with backend '{}'", volume_id, backend_name @@ -209,10 +209,10 @@ impl StorageRuntimeInner { } fn kill_storage(&mut self, config: &StorageConfig) { let volume = &config.volume_id; - log::info!("Killing storage '{}' from volume '{}'", config.name, volume); + tracing::info!("Killing storage '{}' from volume '{}'", config.name, volume); if let Some(storages) = self.storages.get_mut(volume) { if let Some(storage) = storages.get_mut(&config.name) { - log::debug!( + tracing::debug!( "Closing storage '{}' from volume '{}'", config.name, config.volume_id @@ -233,7 +233,7 @@ impl StorageRuntimeInner { volume_id, storage.name ))?; let storage_name = storage.name.clone(); - log::info!( + tracing::info!( "Spawning storage '{}' from volume '{}' with backend '{}'", storage_name, volume_id, @@ -287,13 +287,13 @@ impl RunningPluginTrait for StorageRuntime { let name = { zlock!(self.0).name.clone() }; let old = PluginConfig::try_from((&name, old))?; let new = PluginConfig::try_from((&name, new))?; - log::debug!("config change requested for plugin '{}'", name); - log::debug!("old config: {:?}", &old); - log::debug!("new config: {:?}", &new); + tracing::debug!("config change requested for plugin '{}'", name); + tracing::debug!("old config: {:?}", &old); + tracing::debug!("new config: {:?}", &new); let diffs = ConfigDiff::diffs(old, new); - log::debug!("applying diff: {:?}", &diffs); + tracing::debug!("applying diff: {:?}", &diffs); { zlock!(self.0).update(diffs) }?; - log::debug!("applying diff done"); + tracing::debug!("applying diff done"); Ok(None) } diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index ebb4922c9d..8b37094ffd 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -58,7 +58,7 @@ impl Volume for MemoryBackend { } async fn create_storage(&self, properties: StorageConfig) -> ZResult> { - log::debug!("Create Memory Storage with configuration: {:?}", properties); + tracing::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } @@ -86,7 +86,7 @@ impl Volume for MemoryBackend { impl Drop for MemoryBackend { fn drop(&mut self) { // nothing to do in case of memory backend - log::trace!("MemoryBackend::drop()"); + tracing::trace!("MemoryBackend::drop()"); } } @@ -116,7 +116,7 @@ impl Storage for MemoryStorage { value: Value, timestamp: Timestamp, ) -> ZResult { - log::trace!("put for {:?}", key); + tracing::trace!("put for {:?}", key); let mut map = self.map.write().await; match map.entry(key) { std::collections::hash_map::Entry::Occupied(mut e) => { @@ -135,7 +135,7 @@ impl Storage for MemoryStorage { key: Option, _timestamp: Timestamp, ) -> ZResult { - log::trace!("delete for {:?}", key); + tracing::trace!("delete for {:?}", key); self.map.write().await.remove_entry(&key); return Ok(StorageInsertionResult::Deleted); } @@ -145,7 +145,7 @@ impl Storage for MemoryStorage { key: Option, _parameters: &str, ) -> ZResult> { - log::trace!("get for {:?}", key); + tracing::trace!("get for {:?}", key); // @TODO: use parameters??? match self.map.read().await.get(&key) { Some(v) => Ok(vec![v.clone()]), @@ -166,6 +166,6 @@ impl Storage for MemoryStorage { impl Drop for MemoryStorage { fn drop(&mut self) { // nothing to do in case of memory backend - log::trace!("MemoryStorage::drop()"); + tracing::trace!("MemoryStorage::drop()"); } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 7295367a06..39480a3912 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -62,7 +62,7 @@ impl AlignQueryable { } async fn start(&self) -> Self { - log::debug!( + tracing::debug!( "[ALIGN QUERYABLE] Declaring Queryable on '{}'...", self.digest_key ); @@ -78,19 +78,19 @@ impl AlignQueryable { let query = match queryable.recv_async().await { Ok(query) => query, Err(e) => { - log::error!("Error in receiving query: {}", e); + tracing::error!("Error in receiving query: {}", e); continue; } }; - log::trace!("[ALIGN QUERYABLE] Received Query '{}'", query.selector()); + tracing::trace!("[ALIGN QUERYABLE] Received Query '{}'", query.selector()); let diff_required = self.parse_selector(query.selector()); - log::trace!( + tracing::trace!( "[ALIGN QUERYABLE] Parsed selector diff_required:{:?}", diff_required ); if diff_required.is_some() { let values = self.get_value(diff_required.unwrap()).await; - log::trace!("[ALIGN QUERYABLE] value for the query is {:?}", values); + tracing::trace!("[ALIGN QUERYABLE] value for the query is {:?}", values); for value in values { match value { AlignData::Interval(i, c) => { @@ -176,7 +176,7 @@ impl AlignQueryable { fn parse_selector(&self, selector: Selector) -> Option { let properties = selector.parameters_stringmap().unwrap(); // note: this is a hashmap - log::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); + tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); if properties.get(super::ERA).is_some() { Some(AlignComponent::Era( EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), @@ -218,7 +218,7 @@ impl AlignQueryable { if let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - log::trace!( + tracing::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", sample.key_expr.as_str(), sample.value @@ -227,7 +227,7 @@ impl AlignQueryable { match timestamp.cmp(&logentry.timestamp) { Ordering::Greater => return None, Ordering::Less => { - log::error!( + tracing::error!( "[ALIGN QUERYABLE] Data in the storage is older than requested." ); return None; @@ -237,7 +237,7 @@ impl AlignQueryable { } } Err(err) => { - log::error!( + tracing::error!( "[ALIGN QUERYABLE] Error when requesting storage: {:?}.", err ); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 041567ae27..58abc7e05a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -54,20 +54,20 @@ impl Aligner { pub async fn start(&self) { while let Ok((from, incoming_digest)) = self.rx_digest.recv_async().await { if self.in_processed(incoming_digest.checksum).await { - log::trace!( + tracing::trace!( "[ALIGNER]Skipping already processed digest: {}", incoming_digest.checksum ); continue; } else if self.snapshotter.get_digest().await.checksum == incoming_digest.checksum { - log::trace!( + tracing::trace!( "[ALIGNER]Skipping matching digest: {}", incoming_digest.checksum ); continue; } else { // process this digest - log::debug!( + tracing::debug!( "[ALIGNER]Processing digest: {:?} from {}", incoming_digest, from @@ -87,7 +87,7 @@ impl Aligner { let checksum = other.checksum; let timestamp = other.timestamp; let (missing_content, no_content_err) = self.get_missing_content(&other, from).await; - log::debug!( + tracing::debug!( "[ALIGNER] Missing {} entries; query corresponding samples", missing_content.len() ); @@ -100,14 +100,14 @@ impl Aligner { .await; // Missing data might be empty since some samples in digest might be outdated - log::debug!("[ALIGNER] Received {} queried samples", missing_data.len()); - log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); + tracing::debug!("[ALIGNER] Received {} queried samples", missing_data.len()); + tracing::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { let sample = Sample::new(key, value).with_timestamp(ts); - log::debug!("[ALIGNER] Adding {:?} to storage", sample); + tracing::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { - log::error!("[ALIGNER] Error adding sample to storage: {}", e) + tracing::error!("[ALIGNER] Error adding sample to storage: {}", e) }); } @@ -143,7 +143,7 @@ impl Aligner { } async fn get_missing_content(&self, other: &Digest, from: &str) -> (Vec, bool) { - log::debug!("[ALIGNER] Get missing content from {from} ..."); + tracing::debug!("[ALIGNER] Get missing content from {from} ..."); // get my digest let this = &self.snapshotter.get_digest().await; @@ -156,9 +156,9 @@ impl Aligner { let ((cold_data, no_cold_err), (warm_data, no_warm_err), (hot_data, no_hot_err)) = futures::join!(cold_alignment, warm_alignment, hot_alignment); - log::debug!("[ALIGNER] Missing content from {from} in Cold era: {cold_data:?}"); - log::debug!("[ALIGNER] Missing content from {from} in Warm era: {warm_data:?}"); - log::debug!("[ALIGNER] Missing content from {from} in Hot era: {hot_data:?}"); + tracing::debug!("[ALIGNER] Missing content from {from} in Cold era: {cold_data:?}"); + tracing::debug!("[ALIGNER] Missing content from {from} in Warm era: {warm_data:?}"); + tracing::debug!("[ALIGNER] Missing content from {from} in Hot era: {hot_data:?}"); ( [cold_data, warm_data, hot_data].concat(), no_cold_err && no_warm_err && no_hot_err, @@ -209,7 +209,7 @@ impl Aligner { other_intervals.insert(i, c); } Err(e) => { - log::error!("[ALIGNER] Error decoding reply: {}", e); + tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } }; @@ -255,7 +255,7 @@ impl Aligner { other_subintervals.insert(i, c); } Err(e) => { - log::error!("[ALIGNER] Error decoding reply: {}", e); + tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } }; @@ -296,7 +296,7 @@ impl Aligner { other_content.insert(i, c); } Err(e) => { - log::error!("[ALIGNER] Error decoding reply: {}", e); + tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } }; @@ -315,7 +315,7 @@ impl Aligner { .join(&from) .unwrap() .with_parameters(&properties); - log::trace!("[ALIGNER] Sending Query '{}'...", selector); + tracing::trace!("[ALIGNER] Sending Query '{}'...", selector); let mut return_val = Vec::new(); match self .session @@ -329,7 +329,7 @@ impl Aligner { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - log::trace!( + tracing::trace!( "[ALIGNER] Received ('{}': '{}')", sample.key_expr.as_str(), sample.value @@ -337,7 +337,7 @@ impl Aligner { return_val.push(sample); } Err(err) => { - log::error!( + tracing::error!( "[ALIGNER] Received error for query on selector {} :{}", selector, err @@ -348,11 +348,13 @@ impl Aligner { } } Err(err) => { - log::error!("[ALIGNER] Query failed on selector `{}`: {}", selector, err); + tracing::error!("[ALIGNER] Query failed on selector `{}`: {}", selector, err); no_err = false; } }; - log::trace!("[ALIGNER] On Query '{selector}' received: {return_val:?} (no_err:{no_err})"); + tracing::trace!( + "[ALIGNER] On Query '{selector}' received: {return_val:?} (no_err:{no_err})" + ); (return_val, no_err) } } diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index b743a70451..3382320610 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -80,7 +80,7 @@ impl Replica { name: &str, rx: Receiver, ) { - log::trace!("[REPLICA] Opening session..."); + tracing::trace!("[REPLICA] Opening session..."); let startup_entries = match store_intercept.storage.get_all_entries().await { Ok(entries) => { let mut result = Vec::new(); @@ -89,7 +89,7 @@ impl Replica { if let Some(prefix) = storage_config.clone().strip_prefix { result.push((prefix, entry.1)); } else { - log::error!("Empty key found with timestamp `{}`", entry.1); + tracing::error!("Empty key found with timestamp `{}`", entry.1); } } else { result.push(( @@ -104,7 +104,7 @@ impl Replica { result } Err(e) => { - log::error!("[REPLICA] Error fetching entries from storage: {}", e); + tracing::error!("[REPLICA] Error fetching entries from storage: {}", e); return; } }; @@ -181,12 +181,12 @@ impl Replica { ); select!( - () = digest_sub => log::trace!("[REPLICA] Exiting digest subscriber"), - () = align_q => log::trace!("[REPLICA] Exiting align queryable"), - () = aligner => log::trace!("[REPLICA] Exiting aligner"), - () = digest_pub => log::trace!("[REPLICA] Exiting digest publisher"), - () = snapshot_task => log::trace!("[REPLICA] Exiting snapshot task"), - () = storage_task => log::trace!("[REPLICA] Exiting storage task"), + () = digest_sub => tracing::trace!("[REPLICA] Exiting digest subscriber"), + () = align_q => tracing::trace!("[REPLICA] Exiting align queryable"), + () = aligner => tracing::trace!("[REPLICA] Exiting aligner"), + () = digest_pub => tracing::trace!("[REPLICA] Exiting digest publisher"), + () = snapshot_task => tracing::trace!("[REPLICA] Exiting snapshot task"), + () = storage_task => tracing::trace!("[REPLICA] Exiting storage task"), ) } @@ -199,7 +199,7 @@ impl Replica { .join("**") .unwrap(); - log::debug!( + tracing::debug!( "[DIGEST_SUB] Declaring Subscriber named {} on '{}'", self.name, digest_key @@ -215,13 +215,13 @@ impl Replica { let sample = match subscriber.recv_async().await { Ok(sample) => sample, Err(e) => { - log::error!("[DIGEST_SUB] Error receiving sample: {}", e); + tracing::error!("[DIGEST_SUB] Error receiving sample: {}", e); continue; } }; let from = &sample.key_expr.as_str() [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; - log::trace!( + tracing::trace!( "[DIGEST_SUB] From {} Received {} ('{}': '{}')", from, sample.kind, @@ -231,7 +231,7 @@ impl Replica { let digest: Digest = match serde_json::from_str(&format!("{}", sample.value)) { Ok(digest) => digest, Err(e) => { - log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); + tracing::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); continue; } }; @@ -246,10 +246,12 @@ impl Replica { ) .await; if to_be_processed { - log::trace!("[DIGEST_SUB] sending {} to aligner", digest.checksum); + tracing::trace!("[DIGEST_SUB] sending {} to aligner", digest.checksum); match tx.send_async((from.to_string(), digest)).await { Ok(()) => {} - Err(e) => log::error!("[DIGEST_SUB] Error sending digest to aligner: {}", e), + Err(e) => { + tracing::error!("[DIGEST_SUB] Error sending digest to aligner: {}", e) + } } }; received.insert(from.to_string(), ts); @@ -263,7 +265,7 @@ impl Replica { .join(&self.name) .unwrap(); - log::debug!("[DIGEST_PUB] Declaring Publisher on '{}'...", digest_key); + tracing::debug!("[DIGEST_PUB] Declaring Publisher on '{}'...", digest_key); let publisher = self .session .declare_publisher(digest_key) @@ -282,10 +284,10 @@ impl Replica { drop(digests_published); drop(digest); - log::trace!("[DIGEST_PUB] Putting Digest: {} ...", digest_json); + tracing::trace!("[DIGEST_PUB] Putting Digest: {} ...", digest_json); match publisher.put(digest_json).res().await { Ok(()) => {} - Err(e) => log::error!("[DIGEST_PUB] Digest publication failed: {}", e), + Err(e) => tracing::error!("[DIGEST_PUB] Digest publication failed: {}", e), } } } @@ -304,13 +306,13 @@ impl Replica { } let digests_published = self.digests_published.read().await; if digests_published.contains(&checksum) { - log::trace!("[DIGEST_SUB] Dropping since matching digest already seen"); + tracing::trace!("[DIGEST_SUB] Dropping since matching digest already seen"); return false; } // TODO: test this part if received.contains_key(from) && *received.get(from).unwrap() > ts { // not the latest from that replica - log::trace!("[DIGEST_SUB] Dropping older digest at {} from {}", ts, from); + tracing::trace!("[DIGEST_SUB] Dropping older digest at {} from {}", ts, from); return false; } // TODO: test this part @@ -321,7 +323,7 @@ impl Replica { self.replica_config.delta, ) { - log::error!("[DIGEST_SUB] Mismatching digest configs, cannot be aligned"); + tracing::error!("[DIGEST_SUB] Mismatching digest configs, cannot be aligned"); return false; } true diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index 10f11c47e2..ae9090c544 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -318,9 +318,9 @@ impl Snapshotter { let stable = replica_data.stable_log.read().await; let volatile = replica_data.volatile_log.read().await; let digest = replica_data.digest.read().await; - log::trace!("Stable log:: {:?}", stable); - log::trace!("Volatile log:: {:?}", volatile); - log::trace!("Digest:: {:?}", digest); + tracing::trace!("Stable log:: {:?}", stable); + tracing::trace!("Volatile log:: {:?}", volatile); + tracing::trace!("Digest:: {:?}", digest); } // Expose digest diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 9c419c6d31..4131471977 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -144,7 +144,7 @@ impl StorageService { let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { Ok(storage_sub) => storage_sub, Err(e) => { - log::error!("Error starting storage '{}': {}", self.name, e); + tracing::error!("Error starting storage '{}': {}", self.name, e); return; } }; @@ -159,7 +159,7 @@ impl StorageService { { Ok(storage_queryable) => storage_queryable, Err(e) => { - log::error!("Error starting storage '{}': {}", self.name, e); + tracing::error!("Error starting storage '{}': {}", self.name, e); return; } }; @@ -173,14 +173,14 @@ impl StorageService { let sample = match sample { Ok(sample) => sample, Err(e) => { - log::error!("Error in sample: {}", e); + tracing::error!("Error in sample: {}", e); continue; } }; // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored if sample.get_timestamp().is_none() { - log::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + tracing::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -195,7 +195,7 @@ impl StorageService { match update { Ok(sample) => self.process_sample(sample).await, Err(e) => { - log::error!("Error in receiving aligner update: {}", e); + tracing::error!("Error in receiving aligner update: {}", e); } } }, @@ -203,7 +203,7 @@ impl StorageService { message = rx.recv_async() => { match message { Ok(StorageMessage::Stop) => { - log::trace!("Dropping storage '{}'", self.name); + tracing::trace!("Dropping storage '{}'", self.name); return }, Ok(StorageMessage::GetStatus(tx)) => { @@ -212,7 +212,7 @@ impl StorageService { drop(storage); } Err(e) => { - log::error!("Storage Message Channel Error: {}", e); + tracing::error!("Storage Message Channel Error: {}", e); }, }; } @@ -226,7 +226,7 @@ impl StorageService { let mut sample = match sample { Ok(sample) => sample, Err(e) => { - log::error!("Error in sample: {}", e); + tracing::error!("Error in sample: {}", e); continue; } }; @@ -241,7 +241,7 @@ impl StorageService { message = rx.recv_async() => { match message { Ok(StorageMessage::Stop) => { - log::trace!("Dropping storage '{}'", self.name); + tracing::trace!("Dropping storage '{}'", self.name); return }, Ok(StorageMessage::GetStatus(tx)) => { @@ -250,7 +250,7 @@ impl StorageService { drop(storage); } Err(e) => { - log::error!("Storage Message Channel Error: {}", e); + tracing::error!("Storage Message Channel Error: {}", e); }, }; }, @@ -262,7 +262,7 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - log::trace!("[STORAGE] Processing sample: {}", sample); + tracing::trace!("[STORAGE] Processing sample: {}", sample); // Call incoming data interceptor (if any) let sample = if let Some(ref interceptor) = self.in_interceptor { interceptor(sample) @@ -280,7 +280,7 @@ impl StorageService { } else { vec![sample.key_expr.clone().into()] }; - log::trace!( + tracing::trace!( "The list of keys matching `{}` is : {:?}", sample.key_expr, matching_keys @@ -294,7 +294,7 @@ impl StorageService { || (self.capability.history.eq(&History::Latest) && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { - log::trace!( + tracing::trace!( "Sample `{}` identified as neded processing for key {}", sample, k @@ -324,7 +324,7 @@ impl StorageService { let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { - log::error!("{}", e); + tracing::error!("{}", e); return; } }; @@ -361,7 +361,7 @@ impl StorageService { match sending { Ok(_) => (), Err(e) => { - log::error!("Error in sending the sample to the log: {}", e); + tracing::error!("Error in sending the sample to the log: {}", e); } } } @@ -383,7 +383,7 @@ impl StorageService { zenoh_home().join(TOMBSTONE_FILENAME), serde_json::to_string_pretty(&serialized_data).unwrap(), ) { - log::error!("Saving tombstones failed: {}", e); + tracing::error!("Saving tombstones failed: {}", e); } } } @@ -412,7 +412,7 @@ impl StorageService { zenoh_home().join(WILDCARD_UPDATES_FILENAME), serde_json::to_string_pretty(&serialized_data).unwrap(), ) { - log::error!("Saving wildcard updates failed: {}", e); + tracing::error!("Saving wildcard updates failed: {}", e); } } } @@ -441,7 +441,7 @@ impl StorageService { let stripped_key = match self.strip_prefix(&key_expr.into()) { Ok(stripped) => stripped, Err(e) => { - log::error!("{}", e); + tracing::error!("{}", e); break; } }; @@ -455,7 +455,7 @@ impl StorageService { } } Err(e) => { - log::warn!( + tracing::warn!( "Storage '{}' raised an error fetching a query on key {} : {}", self.name, key_expr, @@ -476,7 +476,7 @@ impl StorageService { let stripped_key = match self.strip_prefix(&key_expr.into()) { Ok(stripped) => stripped, Err(e) => { - log::error!("{}", e); + tracing::error!("{}", e); return false; } }; @@ -494,11 +494,11 @@ impl StorageService { let q = match query { Ok(q) => q, Err(e) => { - log::error!("Error in query: {}", e); + tracing::error!("Error in query: {}", e); return; } }; - log::trace!("[STORAGE] Processing query on key_expr: {}", q.key_expr()); + tracing::trace!("[STORAGE] Processing query on key_expr: {}", q.key_expr()); if q.key_expr().is_wild() { // resolve key expr into individual keys let matching_keys = self.get_matching_keys(q.key_expr()).await; @@ -507,7 +507,7 @@ impl StorageService { let stripped_key = match self.strip_prefix(&key.clone().into()) { Ok(k) => k, Err(e) => { - log::error!("{}", e); + tracing::error!("{}", e); // @TODO: return error when it is supported return; } @@ -524,7 +524,7 @@ impl StorageService { sample }; if let Err(e) = q.reply(Ok(sample)).res().await { - log::warn!( + tracing::warn!( "Storage '{}' raised an error replying a query: {}", self.name, e @@ -532,7 +532,9 @@ impl StorageService { } } } - Err(e) => log::warn!("Storage'{}' raised an error on query: {}", self.name, e), + Err(e) => { + tracing::warn!("Storage'{}' raised an error on query: {}", self.name, e) + } }; } drop(storage); @@ -540,7 +542,7 @@ impl StorageService { let stripped_key = match self.strip_prefix(q.key_expr()) { Ok(k) => k, Err(e) => { - log::error!("{}", e); + tracing::error!("{}", e); // @TODO: return error when it is supported return; } @@ -558,7 +560,7 @@ impl StorageService { sample }; if let Err(e) = q.reply(Ok(sample)).res().await { - log::warn!( + tracing::warn!( "Storage '{}' raised an error replying a query: {}", self.name, e @@ -567,7 +569,7 @@ impl StorageService { } } Err(e) => { - log::warn!("Storage '{}' raised an error on query: {e}", self.name); + tracing::warn!("Storage '{}' raised an error on query: {e}", self.name); } }; } @@ -590,7 +592,7 @@ impl StorageService { } } } - Err(e) => log::warn!( + Err(e) => tracing::warn!( "Storage '{}' raised an error while retrieving keys: {}", self.name, e @@ -648,7 +650,7 @@ impl StorageService { { Ok(replies) => replies, Err(e) => { - log::error!("Error aligning storage '{}': {}", self.name, e); + tracing::error!("Error aligning storage '{}': {}", self.name, e); return; } }; @@ -657,7 +659,7 @@ impl StorageService { Ok(sample) => { self.process_sample(sample).await; } - Err(e) => log::warn!( + Err(e) => tracing::warn!( "Storage '{}' received an error to align query: {}", self.name, e @@ -707,7 +709,7 @@ struct GarbageCollectionEvent { #[async_trait] impl Timed for GarbageCollectionEvent { async fn run(&mut self) { - log::trace!("Start garbage collection"); + tracing::trace!("Start garbage collection"); let time_limit = NTP64::from(SystemTime::now().duration_since(UNIX_EPOCH).unwrap()) - NTP64::from(self.config.lifespan); @@ -738,6 +740,6 @@ impl Timed for GarbageCollectionEvent { wildcard_updates.remove(&k); } - log::trace!("End garbage collection of obsolete data-infos"); + tracing::trace!("End garbage collection of obsolete data-infos"); } } diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index 6de5e2f2ca..ff3bec3e02 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -35,7 +35,7 @@ pub(crate) async fn start_storage( let storage_name = parts[7]; let name = format!("{uuid}/{storage_name}"); - log::trace!("Start storage '{}' on keyexpr '{}'", name, config.key_expr); + tracing::trace!("Start storage '{}' on keyexpr '{}'", name, config.key_expr); let (tx, rx) = flume::bounded(1); diff --git a/plugins/zenoh-plugin-trait/Cargo.toml b/plugins/zenoh-plugin-trait/Cargo.toml index d930437232..f78967fe3d 100644 --- a/plugins/zenoh-plugin-trait/Cargo.toml +++ b/plugins/zenoh-plugin-trait/Cargo.toml @@ -28,7 +28,7 @@ name = "zenoh_plugin_trait" [dependencies] libloading = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde = { workspace = true } serde_json = { workspace = true } zenoh-macros = { workspace = true } diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index d975fa0e25..359d854d56 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -129,7 +129,7 @@ impl ) -> Self { let plugin_loader: StaticPlugin = StaticPlugin::new(); self.plugins.push(PluginRecord::new(plugin_loader)); - log::debug!( + tracing::debug!( "Declared static plugin {}", self.plugins.last().unwrap().name() ); @@ -149,7 +149,7 @@ impl .as_ref() .ok_or("Dynamic plugin loading is disabled")? .clone(); - log::debug!("Declared dynamic plugin {} by name {}", &name, &plugin_name); + tracing::debug!("Declared dynamic plugin {} by name {}", &name, &plugin_name); let loader = DynamicPlugin::new(name, DynamicPluginSource::ByName((libloader, plugin_name))); self.plugins.push(PluginRecord::new(loader)); @@ -164,7 +164,7 @@ impl ) -> ZResult<&mut dyn DeclaredPlugin> { let name = name.into(); let paths = paths.iter().map(|p| p.as_ref().into()).collect(); - log::debug!("Declared dynamic plugin {} by paths {:?}", &name, &paths); + tracing::debug!("Declared dynamic plugin {} by paths {:?}", &name, &paths); let loader = DynamicPlugin::new(name, DynamicPluginSource::ByPaths(paths)); self.plugins.push(PluginRecord::new(loader)); Ok(self.plugins.last_mut().unwrap()) @@ -269,7 +269,7 @@ impl P for PluginsManager { fn plugins_status(&self, names: &keyexpr) -> Vec { - log::debug!( + tracing::debug!( "Plugin manager with prefix `{}` : requested plugins_status {:?}", self.default_lib_prefix, names diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index 1153f8a6ed..1b3168d1cf 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -36,7 +36,7 @@ impl DynamicPluginSource { for path in paths { match unsafe { LibLoader::load_file(path) } { Ok((l, p)) => return Ok((l, p)), - Err(e) => log::debug!("Attempt to load {} failed: {}", path, e), + Err(e) => tracing::debug!("Attempt to load {} failed: {}", path, e), } } bail!("Plugin not found in {:?}", &paths) @@ -55,11 +55,11 @@ impl DynamicPluginStarter { fn get_vtable(lib: &Library, path: &Path) -> ZResult> { - log::debug!("Loading plugin {}", path.to_str().unwrap(),); + tracing::debug!("Loading plugin {}", path.to_str().unwrap(),); let get_plugin_loader_version = unsafe { lib.get:: PluginLoaderVersion>(b"get_plugin_loader_version")? }; let plugin_loader_version = get_plugin_loader_version(); - log::debug!("Plugin loader version: {}", &plugin_loader_version); + tracing::debug!("Plugin loader version: {}", &plugin_loader_version); if plugin_loader_version != PLUGIN_LOADER_VERSION { bail!( "Plugin loader version mismatch: host = {}, plugin = {}", @@ -71,7 +71,7 @@ impl let mut plugin_compatibility_record = get_compatibility(); let mut host_compatibility_record = Compatibility::with_empty_plugin_version::(); - log::debug!( + tracing::debug!( "Plugin compativilty record: {:?}", &plugin_compatibility_record ); @@ -173,10 +173,10 @@ impl DeclaredPlugin LoadedPlugin StartedPlugin ZResult<&mut dyn StartedPlugin> { if self.instance.is_none() { - log::debug!("Plugin `{}` started", self.name()); + tracing::debug!("Plugin `{}` started", self.name()); self.instance = Some(P::start(self.name(), args)?); } else { - log::warn!("Plugin `{}` already started", self.name()); + tracing::warn!("Plugin `{}` already started", self.name()); } Ok(self) } @@ -126,7 +126,7 @@ where self } fn stop(&mut self) { - log::debug!("Plugin `{}` stopped", self.name()); + tracing::debug!("Plugin `{}` stopped", self.name()); self.instance = None; } fn instance(&self) -> &Instance { diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 6a0488cb54..99b6ecf5c1 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -33,17 +33,16 @@ default = [] [dependencies] tokio = { workspace = true, features = ["rt", "sync", "time", "macros", "io-std"] } bincode = { workspace = true } -env_logger = { workspace = true } +zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} serde = { workspace = true, features = ["default"] } zenoh = { workspace = true, features = ["unstable"], default-features = false } zenoh-core = { workspace = true } zenoh-macros = { workspace = true } zenoh-result = { workspace = true } zenoh-sync = { workspace = true } -zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } zenoh-task = { workspace = true } diff --git a/zenoh-ext/examples/z_member.rs b/zenoh-ext/examples/z_member.rs index fb10ac4cd8..f660482a3c 100644 --- a/zenoh-ext/examples/z_member.rs +++ b/zenoh-ext/examples/z_member.rs @@ -20,7 +20,7 @@ use zenoh_ext::group::*; #[tokio::main] async fn main() { - env_logger::init(); + zenoh_util::init_log_from_env(); let z = Arc::new(zenoh::open(Config::default()).res().await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() diff --git a/zenoh-ext/examples/z_pub_cache.rs b/zenoh-ext/examples/z_pub_cache.rs index e564ffb8f1..34c13dbce8 100644 --- a/zenoh-ext/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/z_pub_cache.rs @@ -20,7 +20,7 @@ use zenoh_ext::*; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, value, history, prefix, complete) = parse_args(); diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index 570d15ac15..c7491cfa61 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -21,7 +21,7 @@ use zenoh_ext::*; #[tokio::main] async fn main() { // Initiate logging - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, key_expr, query) = parse_args(); diff --git a/zenoh-ext/examples/z_view_size.rs b/zenoh-ext/examples/z_view_size.rs index 64e7b3ea4c..e23b122928 100644 --- a/zenoh-ext/examples/z_view_size.rs +++ b/zenoh-ext/examples/z_view_size.rs @@ -20,7 +20,7 @@ use zenoh_ext::group::*; #[tokio::main] async fn main() { - env_logger::init(); + zenoh_util::init_log_from_env(); let (config, group_name, id, size, timeout) = parse_args(); diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 3595ccad08..6bd9841c29 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -187,7 +187,7 @@ async fn keep_alive_task(state: Arc) { .mul_f32(state.local_member.refresh_ratio); loop { tokio::time::sleep(period).await; - log::trace!("Sending Keep Alive for: {}", &state.local_member.mid); + tracing::trace!("Sending Keep Alive for: {}", &state.local_member.mid); let _ = state.group_publisher.put(buf.clone()).res().await; } } @@ -205,11 +205,11 @@ fn spawn_watchdog(s: Arc, period: Duration) -> JoinHandle<()> { .collect(); for e in &expired_members { - log::debug!("Member with lease expired: {}", e); + tracing::debug!("Member with lease expired: {}", e); ms.remove(e); } if !expired_members.is_empty() { - log::debug!("Other members list: {:?}", ms.keys()); + tracing::debug!("Other members list: {:?}", ms.keys()); drop(ms); let u_evt = &*s.user_events_tx.lock().await; for e in expired_members { @@ -231,12 +231,12 @@ async fn query_handler(z: Arc, state: Arc) { ) .try_into() .unwrap(); - log::debug!("Started query handler for: {}", &qres); + tracing::debug!("Started query handler for: {}", &qres); let buf = bincode::serialize(&state.local_member).unwrap(); let queryable = z.declare_queryable(&qres).res().await.unwrap(); while let Ok(query) = queryable.recv_async().await { - log::trace!("Serving query for: {}", &qres); + tracing::trace!("Serving query for: {}", &qres); query .reply(Ok(Sample::new(qres.clone(), buf.clone()))) .res() @@ -255,11 +255,11 @@ async fn net_event_handler(z: Arc, state: Arc) { match bincode::deserialize::(&(s.value.payload.contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { - log::debug!("Member join: {:?}", &je.member); + tracing::debug!("Member join: {:?}", &je.member); let alive_till = Instant::now().add(je.member.lease); let mut ms = state.members.lock().await; ms.insert(je.member.mid.clone(), (je.member.clone(), alive_till)); - log::debug!("Other members list: {:?}", ms.keys()); + tracing::debug!("Other members list: {:?}", ms.keys()); state.cond.notify_all(); drop(ms); let u_evt = &*state.user_events_tx.lock().await; @@ -268,10 +268,10 @@ async fn net_event_handler(z: Arc, state: Arc) { } } GroupNetEvent::Leave(le) => { - log::debug!("Member leave: {:?}", &le.mid); + tracing::debug!("Member leave: {:?}", &le.mid); let mut ms = state.members.lock().await; ms.remove(&le.mid); - log::debug!("Other members list: {:?}", ms.keys()); + tracing::debug!("Other members list: {:?}", ms.keys()); drop(ms); let u_evt = &*state.user_events_tx.lock().await; if let Some(tx) = u_evt { @@ -279,7 +279,7 @@ async fn net_event_handler(z: Arc, state: Arc) { } } GroupNetEvent::KeepAlive(kae) => { - log::debug!( + tracing::debug!( "KeepAlive from {} ({})", &kae.mid, if kae.mid.ne(&state.local_member.mid) { @@ -293,19 +293,19 @@ async fn net_event_handler(z: Arc, state: Arc) { let v = mm.remove(&kae.mid); match v { Some((m, _)) => { - log::trace!("Updating leasefor: {:?}", &kae.mid); + tracing::trace!("Updating leasefor: {:?}", &kae.mid); let alive_till = Instant::now().add(m.lease); mm.insert(m.mid.clone(), (m, alive_till)); } None => { - log::debug!( + tracing::debug!( "Received Keep Alive from unknown member: {}", &kae.mid ); let qres = format!("{}/{}/{}", GROUP_PREFIX, &state.gid, kae.mid); // @TODO: we could also send this member info let qc = ConsolidationMode::None; - log::trace!("Issuing Query for {}", &qres); + tracing::trace!("Issuing Query for {}", &qres); let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); while let Ok(reply) = receiver.recv_async().await { @@ -317,12 +317,12 @@ async fn net_event_handler(z: Arc, state: Arc) { Ok(m) => { let mut expiry = Instant::now(); expiry = expiry.add(m.lease); - log::debug!( + tracing::debug!( "Received member information: {:?}", &m ); mm.insert(kae.mid.clone(), (m.clone(), expiry)); - log::debug!( + tracing::debug!( "Other members list: {:?}", mm.keys() ); @@ -336,13 +336,13 @@ async fn net_event_handler(z: Arc, state: Arc) { } } Err(e) => { - log::warn!( + tracing::warn!( "Unable to deserialize the Member info received: {}", e); } } } Err(e) => { - log::warn!("Error received: {}", e); + tracing::warn!("Error received: {}", e); } } } @@ -350,12 +350,12 @@ async fn net_event_handler(z: Arc, state: Arc) { } } } else { - log::trace!("KeepAlive from Local Participant -- Ignoring"); + tracing::trace!("KeepAlive from Local Participant -- Ignoring"); } } }, Err(e) => { - log::warn!("Failed decoding net-event due to: {:?}", e); + tracing::warn!("Failed decoding net-event due to: {:?}", e); } } } @@ -390,7 +390,7 @@ impl Group { let is_auto_liveliness = matches!(with.liveliness, MemberLiveliness::Auto); // announce the member: - log::debug!("Sending Join Message for local member: {:?}", &with); + tracing::debug!("Sending Join Message for local member: {:?}", &with); let join_evt = GroupNetEvent::Join(JoinEvent { member: with }); let buf = bincode::serialize(&join_evt).unwrap(); let _ = state.group_publisher.put(buf).res().await; diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index aede6a2ee4..344fe99d37 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -127,7 +127,7 @@ impl<'a> PublicationCache<'a> { } Some(Err(e)) => bail!("Invalid key expression for queryable_prefix: {}", e), }; - log::debug!( + tracing::debug!( "Create PublicationCache on {} with history={} resource_limit={:?}", &key_expr, conf.history, @@ -192,7 +192,7 @@ impl<'a> PublicationCache<'a> { } queue.push_back(sample); } else if cache.len() >= limit { - log::error!("PublicationCache on {}: resource_limit exceeded - can't cache publication for a new resource", + tracing::error!("PublicationCache on {}: resource_limit exceeded - can't cache publication for a new resource", pub_key_expr); } else { let mut queue: VecDeque = VecDeque::new(); @@ -214,7 +214,7 @@ impl<'a> PublicationCache<'a> { } } if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { - log::warn!("Error replying to query: {}", e); + tracing::warn!("Error replying to query: {}", e); } } } @@ -228,7 +228,7 @@ impl<'a> PublicationCache<'a> { } } if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { - log::warn!("Error replying to query: {}", e); + tracing::warn!("Error replying to query: {}", e); } } } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 978d348da1..4d97670e1e 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -667,7 +667,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { if state.pending_fetches == 0 { callback(s); } else { - log::trace!("Sample received while fetch in progress: push it to merge_queue"); + tracing::trace!( + "Sample received while fetch in progress: push it to merge_queue" + ); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. s.ensure_timestamp(); @@ -792,12 +794,12 @@ impl Drop for RepliesHandler { fn drop(&mut self) { let mut state = zlock!(self.state); state.pending_fetches -= 1; - log::trace!( + tracing::trace!( "Fetch done - {} fetches still in progress", state.pending_fetches ); if state.pending_fetches == 0 { - log::debug!( + tracing::debug!( "All fetches done. Replies and live publications merged - {} samples to propagate", state.merge_queue.len() ); @@ -912,13 +914,13 @@ where TryIntoSample: TryInto, >::Error: Into, { - log::debug!("Fetch data for FetchingSubscriber"); + tracing::debug!("Fetch data for FetchingSubscriber"); (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { Ok(s) => { let mut state = zlock!(handler.state); - log::trace!("Fetched sample received: push it to merge_queue"); + tracing::trace!("Fetched sample received: push it to merge_queue"); state.merge_queue.push(s); } - Err(e) => log::debug!("Received error fetching data: {}", e.into()), + Err(e) => tracing::debug!("Received error fetching data: {}", e.into()), })) } diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 144e5dbf72..7b890fcb7a 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -69,14 +69,14 @@ tokio-util = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } const_format = { workspace = true } -env_logger = { workspace = true } + event-listener = { workspace = true } flume = { workspace = true } form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } lazy_static = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index 69828a5d7f..cf187298a9 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -49,7 +49,7 @@ impl IntoCallbackReceiverPair<'static, T> ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, @@ -72,7 +72,7 @@ impl IntoCallbackReceiverPair<'static, T> ( Dyn::new(move |t| { if let Err(e) = sender.send(t) { - log::error!("{}", e) + tracing::error!("{}", e) } }), receiver, diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index 628f07611a..b8837ba31e 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -657,7 +657,7 @@ impl SyncResolve for KeyExprUndeclaration<'_> { } _ => return Err(zerror!("Failed to undeclare {}, make sure you use the result of `Session::declare_keyexpr` to call `Session::undeclare`", expr).into()), }; - log::trace!("undeclare_keyexpr({:?})", expr_id); + tracing::trace!("undeclare_keyexpr({:?})", expr_id); let mut state = zwrite!(session.state); state.local_resources.remove(&expr_id); diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 5c473e8ad8..4ef0b9eb44 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -60,7 +60,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -84,7 +84,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -108,7 +108,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -132,7 +132,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -156,7 +156,7 @@ impl Primitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -211,7 +211,7 @@ impl EPrimitives for Mux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -336,7 +336,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -360,7 +360,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -384,7 +384,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -408,7 +408,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -432,7 +432,7 @@ impl Primitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } @@ -487,7 +487,7 @@ impl EPrimitives for McastMux { let _ = self.handler.schedule(ctx.msg); } } else { - log::error!("Uninitialized multiplexer!"); + tracing::error!("Uninitialized multiplexer!"); } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 765779ee40..653849ee5a 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -272,7 +272,7 @@ impl Primitives for Face { pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); } _ => { - log::error!("Unsupported request"); + tracing::error!("Unsupported request"); } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index da6ae0c371..46a00bd382 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -38,7 +38,7 @@ pub(crate) fn declare_subscription( sub_info: &SubscriberInfo, node_id: NodeId, ) { - log::debug!("Declare subscription {}", face); + tracing::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) @@ -86,7 +86,7 @@ pub(crate) fn declare_subscription( } drop(wtables); } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + None => tracing::error!("Declare subscription for unknown scope {}!", expr.scope), } } @@ -97,7 +97,7 @@ pub(crate) fn undeclare_subscription( expr: &WireExpr, node_id: NodeId, ) { - log::debug!("Undeclare subscription {}", face); + tracing::debug!("Undeclare subscription {}", face); let rtables = zread!(tables.tables); match rtables.get_mapping(face, &expr.scope, expr.mapping) { Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { @@ -126,9 +126,9 @@ pub(crate) fn undeclare_subscription( Resource::clean(&mut res); drop(wtables); } - None => log::error!("Undeclare unknown subscription!"), + None => tracing::error!("Undeclare unknown subscription!"), }, - None => log::error!("Undeclare subscription with unknown scope!"), + None => tracing::error!("Undeclare subscription with unknown scope!"), } } @@ -267,14 +267,14 @@ macro_rules! treat_timestamp { Ok(()) => (), Err(e) => { if $drop { - log::error!( + tracing::error!( "Error treating timestamp for received Data ({}). Drop it!", e ); return; } else { data.timestamp = Some(hlc.new_timestamp()); - log::error!( + tracing::error!( "Error treating timestamp for received Data ({}). Replace timestamp: {:?}", e, data.timestamp); @@ -284,7 +284,7 @@ macro_rules! treat_timestamp { } else { // Timestamp not present; add one data.timestamp = Some(hlc.new_timestamp()); - log::trace!("Adding timestamp to DataInfo: {:?}", data.timestamp); + tracing::trace!("Adding timestamp to DataInfo: {:?}", data.timestamp); } } } @@ -435,7 +435,7 @@ pub fn full_reentrant_route_data( let tables = zread!(tables_ref.tables); match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { - log::trace!( + tracing::trace!( "Route data for res {}{}", prefix.expr(), expr.suffix.as_ref() @@ -552,7 +552,7 @@ pub fn full_reentrant_route_data( } } None => { - log::error!("Route data with unknown scope {}!", expr.scope); + tracing::error!("Route data with unknown scope {}!", expr.scope); } } } @@ -592,14 +592,14 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } } None => { - log::error!( + tracing::error!( "Pull data for unknown subscription {} (no info)!", prefix.expr() + expr.suffix.as_ref() ); } }, None => { - log::error!( + tracing::error!( "Pull data for unknown subscription {} (no context)!", prefix.expr() + expr.suffix.as_ref() ); @@ -607,14 +607,14 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE } } None => { - log::error!( + tracing::error!( "Pull data for unknown subscription {} (no resource)!", prefix.expr() + expr.suffix.as_ref() ); } }, None => { - log::error!("Pull data with unknown scope {}!", expr.scope); + tracing::error!("Pull data with unknown scope {}!", expr.scope); } }; } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 570377acd1..62eae0703e 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -50,7 +50,7 @@ pub(crate) fn declare_queryable( qabl_info: &QueryableInfo, node_id: NodeId, ) { - log::debug!("Register queryable {}", face); + tracing::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) @@ -95,7 +95,7 @@ pub(crate) fn declare_queryable( } drop(wtables); } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + None => tracing::error!("Declare queryable for unknown scope {}!", expr.scope), } } @@ -131,9 +131,9 @@ pub(crate) fn undeclare_queryable( Resource::clean(&mut res); drop(wtables); } - None => log::error!("Undeclare unknown queryable!"), + None => tracing::error!("Undeclare unknown queryable!"), }, - None => log::error!("Undeclare queryable with unknown scope!"), + None => tracing::error!("Undeclare queryable with unknown scope!"), } } @@ -402,7 +402,7 @@ impl Timed for QueryCleanup { .remove(&self.qid) { drop(tables_lock); - log::warn!( + tracing::warn!( "Didn't receive final reply {}:{} from {}: Timeout!", query.0.src_face, self.qid, @@ -525,7 +525,7 @@ pub fn route_query( let rtables = zread!(tables_ref.tables); match rtables.get_mapping(face, &expr.scope, expr.mapping) { Some(prefix) => { - log::debug!( + tracing::debug!( "Route query {}:{} for res {}{}", face, qid, @@ -605,7 +605,7 @@ pub fn route_query( } if route.is_empty() { - log::debug!( + tracing::debug!( "Send final reply {}:{} (no matching queryables or not master)", face, qid @@ -634,7 +634,7 @@ pub fn route_query( inc_req_stats!(outface, tx, admin, body) } - log::trace!("Propagate query {}:{} to {}", face, qid, outface); + tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); outface.primitives.send_request(RoutingContext::with_expr( Request { id: *qid, @@ -665,7 +665,7 @@ pub fn route_query( inc_req_stats!(outface, tx, admin, body) } - log::trace!("Propagate query {}:{} to {}", face, qid, outface); + tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); outface.primitives.send_request(RoutingContext::with_expr( Request { id: *qid, @@ -684,7 +684,7 @@ pub fn route_query( } } } else { - log::debug!("Send final reply {}:{} (not master)", face, qid); + tracing::debug!("Send final reply {}:{} (not master)", face, qid); drop(rtables); face.primitives .clone() @@ -699,7 +699,7 @@ pub fn route_query( } } None => { - log::error!( + tracing::error!( "Route query with unknown scope {}! Send final reply.", expr.scope ); @@ -763,7 +763,7 @@ pub(crate) fn route_send_response( "".to_string(), // @TODO provide the proper key expression of the response for interceptors )); } - None => log::warn!( + None => tracing::warn!( "Route reply {}:{} from {}: Query nof found!", face, qid, @@ -781,7 +781,7 @@ pub(crate) fn route_send_response_final( match get_mut_unchecked(face).pending_queries.remove(&qid) { Some(query) => { drop(queries_lock); - log::debug!( + tracing::debug!( "Received final reply {}:{} from {}", query.0.src_face, qid, @@ -789,7 +789,7 @@ pub(crate) fn route_send_response_final( ); finalize_pending_query(query); } - None => log::warn!( + None => tracing::warn!( "Route final reply {}:{} from {}: Query nof found!", face, qid, @@ -810,7 +810,7 @@ pub(crate) fn finalize_pending_query(query: (Arc, CancellationToken)) { let (query, cancellation_token) = query; cancellation_token.cancel(); if let Some(query) = Arc::into_inner(query) { - log::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); + tracing::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); query .src_face .primitives diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 1762ff2cb4..88c6908028 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -294,7 +294,7 @@ impl Resource { if let Some(ref mut parent) = mutres.parent { if Arc::strong_count(res) <= 3 && res.childs.is_empty() { // consider only childless resource held by only one external object (+ 1 strong count for resclone, + 1 strong count for res.parent to a total of 3 ) - log::debug!("Unregister resource {}", res.expr()); + tracing::debug!("Unregister resource {}", res.expr()); if let Some(context) = mutres.context.as_mut() { for match_ in &mut context.matches { let mut match_ = match_.upgrade().unwrap(); @@ -355,8 +355,8 @@ impl Resource { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); - if log::log_enabled!(log::Level::Debug) && rest.is_empty() { - log::debug!("Register resource {}", new.expr()); + if tracing::enabled!(tracing::Level::DEBUG) && rest.is_empty() { + tracing::debug!("Register resource {}", new.expr()); } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) @@ -380,8 +380,8 @@ impl Resource { Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); - if log::log_enabled!(log::Level::Debug) && rest.is_empty() { - log::debug!("Register resource {}", new.expr()); + if tracing::enabled!(tracing::Level::DEBUG) && rest.is_empty() { + tracing::debug!("Register resource {}", new.expr()); } let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) @@ -641,7 +641,7 @@ impl Resource { } get_mut_unchecked(res).context_mut().matches = matches; } else { - log::error!("Call match_resource() on context less res {}", res.expr()); + tracing::error!("Call match_resource() on context less res {}", res.expr()); } } @@ -680,7 +680,7 @@ pub fn register_expr( let mut fullexpr = prefix.expr(); fullexpr.push_str(expr.suffix.as_ref()); if res.expr() != fullexpr { - log::error!("Resource {} remapped. Remapping unsupported!", expr_id); + tracing::error!("Resource {} remapped. Remapping unsupported!", expr_id); } } None => { @@ -731,7 +731,7 @@ pub fn register_expr( drop(wtables); } }, - None => log::error!("Declare resource with unknown scope {}!", expr.scope), + None => tracing::error!("Declare resource with unknown scope {}!", expr.scope), } } @@ -739,7 +739,7 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), - None => log::error!("Undeclare unknown resource!"), + None => tracing::error!("Undeclare unknown resource!"), } drop(wtables); } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 10605b25b1..2d5eb436e7 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -171,12 +171,12 @@ impl Tables { pub fn close_face(tables: &TablesLock, face: &Weak) { match face.upgrade() { Some(mut face) => { - log::debug!("Close {}", face); + tracing::debug!("Close {}", face); face.task_controller.terminate_all(Duration::from_secs(10)); finalize_pending_queries(tables, &mut face); zlock!(tables.ctrl_lock).close_face(tables, &mut face); } - None => log::error!("Face already closed!"), + None => tracing::error!("Face already closed!"), } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 8968ec8fc6..3845917240 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -89,7 +89,7 @@ fn register_client_subscription( // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); + tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -194,7 +194,7 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); + tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } @@ -296,7 +296,7 @@ impl HatPubSubTrait for HatCode { if key_expr.ends_with('/') { return Arc::new(route); } - log::trace!( + tracing::trace!( "compute_data_route({}, {:?}, {:?})", key_expr, source, @@ -305,7 +305,7 @@ impl HatPubSubTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return Arc::new(route); } }; diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index e89cfb174d..609d6e0b04 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -120,7 +120,7 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); + tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -189,7 +189,7 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); + tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { @@ -294,7 +294,7 @@ impl HatQueriesTrait for HatCode { if key_expr.ends_with('/') { return EMPTY_ROUTE.clone(); } - log::trace!( + tracing::trace!( "compute_query_route({}, {:?}, {:?})", key_expr, source, @@ -303,7 +303,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return EMPTY_ROUTE.clone(); } }; @@ -352,7 +352,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return result; } }; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 35afaf30d7..2bf22489de 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -136,7 +136,7 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc) { - log::trace!("Schedule computations"); + tracing::trace!("Schedule computations"); if self.peers_trees_task.is_none() { let task = TerminatableTask::spawn( zenoh_runtime::ZRuntime::Net, @@ -147,14 +147,14 @@ impl HatTables { .await; let mut tables = zwrite!(tables_ref.tables); - log::trace!("Compute trees"); + tracing::trace!("Compute trees"); let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); - log::trace!("Compute routes"); + tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_childs); queries::queries_tree_change(&mut tables, &new_childs); - log::trace!("Computations completed"); + tracing::trace!("Computations completed"); hat_mut!(tables).peers_trees_task = None; }, TerminatableTask::create_cancellation_token(), @@ -426,7 +426,7 @@ impl HatBaseTrait for HatCode { hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); }; } - (_, _) => log::error!("Closed transport in session closing!"), + (_, _) => tracing::error!("Closed transport in session closing!"), } Ok(()) } @@ -513,7 +513,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), None => { - log::error!( + tracing::error!( "Received peer declaration with unknown routing context id {}", nodeid ); @@ -521,7 +521,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - log::error!( + tracing::error!( "Could not find corresponding link in peers network for {}", face ); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 4d3497c861..d5f37e3733 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -132,7 +132,7 @@ impl Network { autoconnect: WhatAmIMatcher, ) -> Self { let mut graph = petgraph::stable_graph::StableGraph::default(); - log::debug!("{} Add node (self) {}", name, zid); + tracing::debug!("{} Add node (self) {}", name, zid); let idx = graph.add_node(Node { zid, whatami: Some(runtime.whatami()), @@ -190,7 +190,7 @@ impl Network { Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), None => { - log::error!( + tracing::error!( "Cannot find local psid for context {} on link {}", context, link_id @@ -199,7 +199,7 @@ impl Network { } }, None => { - log::error!("Cannot find link {}", link_id); + tracing::error!("Cannot find link {}", link_id); 0 } } @@ -225,7 +225,7 @@ impl Network { if let Some(idx2) = self.get_idx(zid) { Some(idx2.index().try_into().unwrap()) } else { - log::error!( + tracing::error!( "{} Internal error building link state: cannot get index of {}", self.name, zid @@ -278,12 +278,12 @@ impl Network { fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { if let Ok(msg) = self.make_msg(idxs) { - log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + tracing::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); if let Err(e) = transport.schedule(msg) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -294,14 +294,14 @@ impl Network { if let Ok(msg) = self.make_msg(idxs) { for link in self.links.values() { if parameters(link) { - log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + tracing::trace!("{} Send to {} {:?}", self.name, link.zid, msg); if let Err(e) = link.transport.schedule(msg.clone()) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -336,7 +336,7 @@ impl Network { } pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { - log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); let graph = &self.graph; @@ -345,7 +345,7 @@ impl Network { let src_link = match links.values_mut().find(|link| link.zid == src) { Some(link) => link, None => { - log::error!( + tracing::error!( "{} Received LinkStateList from unknown link {}", self.name, src @@ -383,7 +383,7 @@ impl Network { link_state.links, )), None => { - log::error!( + tracing::error!( "Received LinkState from {} with unknown node mapping {}", src, link_state.psid @@ -406,7 +406,7 @@ impl Network { if let Some(zid) = src_link.get_zid(l) { Some(*zid) } else { - log::error!( + tracing::error!( "{} Received LinkState from {} with unknown link mapping {}", self.name, src, @@ -420,14 +420,14 @@ impl Network { }) .collect::>(); - // log::trace!( + // tracing::trace!( // "{} Received from {} mapped: {:?}", // self.name, // src, // link_states // ); for link_state in &link_states { - log::trace!( + tracing::trace!( "{} Received from {} mapped: {:?}", self.name, src, @@ -545,7 +545,7 @@ impl Network { sn, links: links.clone(), }; - log::debug!("{} Add node (state) {}", self.name, zid); + tracing::debug!("{} Add node (state) {}", self.name, zid); let idx = self.add_node(node); Some((links, idx, true)) } @@ -559,7 +559,7 @@ impl Network { for link in links { if let Some(idx2) = self.get_idx(link) { if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { - log::trace!( + tracing::trace!( "{} Update edge (state) {} {}", self.name, self.graph[*idx1].zid, @@ -575,7 +575,7 @@ impl Network { sn: 0, links: vec![], }; - log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); + tracing::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); let idx = self.add_node(node); reintroduced_nodes.push((vec![], idx, true)); } @@ -587,7 +587,7 @@ impl Network { } for (eidx, idx2) in edges { if !links.contains(&self.graph[idx2].zid) { - log::trace!( + tracing::trace!( "{} Remove edge (state) {} {}", self.name, self.graph[*idx1].zid, @@ -709,7 +709,7 @@ impl Network { let (idx, new) = match self.get_idx(&zid) { Some(idx) => (idx, false), None => { - log::debug!("{} Add node (link) {}", self.name, zid); + tracing::debug!("{} Add node (link) {}", self.name, zid); ( self.add_node(Node { zid, @@ -723,7 +723,7 @@ impl Network { } }; if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { - log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); + tracing::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); self.update_edge(self.idx, idx); } self.graph[self.idx].links.push(zid); @@ -805,7 +805,7 @@ impl Network { } pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { - log::trace!("{} remove_link {}", self.name, zid); + tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); @@ -876,7 +876,7 @@ impl Network { let mut removed = vec![]; for idx in self.graph.node_indices().collect::>() { if !visit_map.is_visited(&idx) { - log::debug!("Remove node {}", &self.graph[idx].zid); + tracing::debug!("Remove node {}", &self.graph[idx].zid); removed.push((idx, self.graph.remove_node(idx).unwrap())); } } @@ -903,7 +903,7 @@ impl Network { self.distances = paths.distances; } - if log::log_enabled!(log::Level::Debug) { + if tracing::enabled!(tracing::Level::DEBUG) { let ps: Vec> = paths .predecessors .iter() @@ -918,7 +918,7 @@ impl Network { }) }) .collect(); - log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); + tracing::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); } self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 0c05c39c7b..02b86de6b0 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -53,7 +53,7 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); + tracing::debug!("Send subscription {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -72,7 +72,7 @@ fn send_sourced_subscription_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -145,7 +145,7 @@ fn propagate_sourced_subscription( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating sub {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -153,7 +153,7 @@ fn propagate_sourced_subscription( ); } } - None => log::error!( + None => tracing::error!( "Error propagating sub {}: cannot get index of {}!", res.expr(), source @@ -171,7 +171,7 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); + tracing::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -205,7 +205,7 @@ fn register_client_subscription( // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); + tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -289,7 +289,7 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); + tracing::debug!("Send forget subscription {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -307,7 +307,7 @@ fn send_forget_sourced_subscription_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -353,7 +353,7 @@ fn propagate_forget_sourced_subscription( Some(tree_sid.index() as NodeId), ); } else { - log::trace!( + tracing::trace!( "Propagating forget sub {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -361,7 +361,7 @@ fn propagate_forget_sourced_subscription( ); } } - None => log::error!( + None => tracing::error!( "Error propagating forget sub {}: cannot get index of {}!", res.expr(), source @@ -370,7 +370,7 @@ fn propagate_forget_sourced_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( + tracing::debug!( "Unregister peer subscription {} (peer: {})", res.expr(), peer @@ -414,7 +414,7 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); + tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } @@ -567,7 +567,7 @@ fn insert_faces_for_subs( } } } else { - log::trace!("Tree for node sid:{} not yet ready", source); + tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -621,7 +621,7 @@ impl HatPubSubTrait for HatCode { if key_expr.ends_with('/') { return Arc::new(route); } - log::trace!( + tracing::trace!( "compute_data_route({}, {:?}, {:?})", key_expr, source, @@ -630,7 +630,7 @@ impl HatPubSubTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return Arc::new(route); } }; diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index b965a6f58b..ba9b7bc02d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -133,7 +133,7 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); + tracing::debug!("Send queryable {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -152,7 +152,7 @@ fn send_sourced_queryable_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -213,7 +213,7 @@ fn propagate_sourced_queryable( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating qabl {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -221,7 +221,7 @@ fn propagate_sourced_queryable( ); } } - None => log::error!( + None => tracing::error!( "Error propagating qabl {}: cannot get index of {}!", res.expr(), source @@ -240,7 +240,7 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); + tracing::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -275,7 +275,7 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); + tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -345,7 +345,7 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); + tracing::debug!("Send forget queryable {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -363,7 +363,7 @@ fn send_forget_sourced_queryable_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -410,7 +410,7 @@ fn propagate_forget_sourced_queryable( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -418,7 +418,7 @@ fn propagate_forget_sourced_queryable( ); } } - None => log::error!( + None => tracing::error!( "Error propagating forget qabl {}: cannot get index of {}!", res.expr(), source @@ -427,7 +427,7 @@ fn propagate_forget_sourced_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); + tracing::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -467,7 +467,7 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); + tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { @@ -628,7 +628,7 @@ fn insert_target_for_qabls( } } } else { - log::trace!("Tree for node sid:{} not yet ready", source); + tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -686,7 +686,7 @@ impl HatQueriesTrait for HatCode { if key_expr.ends_with('/') { return EMPTY_ROUTE.clone(); } - log::trace!( + tracing::trace!( "compute_query_route({}, {:?}, {:?})", key_expr, source, @@ -695,7 +695,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return EMPTY_ROUTE.clone(); } }; @@ -769,7 +769,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return result; } }; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index bbe7bd9024..a5b72a73eb 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -108,7 +108,7 @@ impl Network { autoconnect: WhatAmIMatcher, ) -> Self { let mut graph = petgraph::stable_graph::StableGraph::default(); - log::debug!("{} Add node (self) {}", name, zid); + tracing::debug!("{} Add node (self) {}", name, zid); let idx = graph.add_node(Node { zid, whatami: Some(runtime.whatami()), @@ -169,7 +169,7 @@ impl Network { if let Some(idx2) = self.get_idx(zid) { Some(idx2.index().try_into().unwrap()) } else { - log::error!( + tracing::error!( "{} Internal error building link state: cannot get index of {}", self.name, zid @@ -222,12 +222,12 @@ impl Network { fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { if let Ok(msg) = self.make_msg(idxs) { - log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + tracing::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); if let Err(e) = transport.schedule(msg) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -238,14 +238,14 @@ impl Network { if let Ok(msg) = self.make_msg(idxs) { for link in self.links.values() { if parameters(link) { - log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + tracing::trace!("{} Send to {} {:?}", self.name, link.zid, msg); if let Err(e) = link.transport.schedule(msg.clone()) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -266,7 +266,7 @@ impl Network { } pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) { - log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); let graph = &self.graph; @@ -275,7 +275,7 @@ impl Network { let src_link = match links.values_mut().find(|link| link.zid == src) { Some(link) => link, None => { - log::error!( + tracing::error!( "{} Received LinkStateList from unknown link {}", self.name, src @@ -310,7 +310,7 @@ impl Network { link_state.links, )), None => { - log::error!( + tracing::error!( "Received LinkState from {} with unknown node mapping {}", src, link_state.psid @@ -333,7 +333,7 @@ impl Network { if let Some(zid) = src_link.get_zid(l) { Some(*zid) } else { - log::error!( + tracing::error!( "{} Received LinkState from {} with unknown link mapping {}", self.name, src, @@ -347,14 +347,14 @@ impl Network { }) .collect::>(); - // log::trace!( + // tracing::trace!( // "{} Received from {} mapped: {:?}", // self.name, // src, // link_states // ); for link_state in &link_states { - log::trace!( + tracing::trace!( "{} Received from {} mapped: {:?}", self.name, src, @@ -448,7 +448,7 @@ impl Network { let (idx, new) = match self.get_idx(&zid) { Some(idx) => (idx, false), None => { - log::debug!("{} Add node (link) {}", self.name, zid); + tracing::debug!("{} Add node (link) {}", self.name, zid); ( self.add_node(Node { zid, @@ -536,7 +536,7 @@ impl Network { } pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { - log::trace!("{} remove_link {}", self.name, zid); + tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 8dc4f15ada..ea3e1e9550 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -320,7 +320,7 @@ impl HatBaseTrait for HatCode { hat_mut!(tables).gossip.as_mut().unwrap().remove_link(&zid); }; } - (_, _) => log::error!("Closed transport in session closing!"), + (_, _) => tracing::error!("Closed transport in session closing!"), } Ok(()) } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 8b670727dc..432b8e137e 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -89,7 +89,7 @@ fn register_client_subscription( // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); + tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -194,7 +194,7 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); + tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } @@ -297,7 +297,7 @@ impl HatPubSubTrait for HatCode { if key_expr.ends_with('/') { return Arc::new(route); } - log::trace!( + tracing::trace!( "compute_data_route({}, {:?}, {:?})", key_expr, source, @@ -306,7 +306,7 @@ impl HatPubSubTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return Arc::new(route); } }; diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 95d357fe11..0937e22a65 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -120,7 +120,7 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); + tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -189,7 +189,7 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); + tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { @@ -294,7 +294,7 @@ impl HatQueriesTrait for HatCode { if key_expr.ends_with('/') { return EMPTY_ROUTE.clone(); } - log::trace!( + tracing::trace!( "compute_query_route({}, {:?}, {:?})", key_expr, source, @@ -303,7 +303,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return EMPTY_ROUTE.clone(); } }; @@ -361,7 +361,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return result; } }; diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 030b8da4b4..4aaec7bf3b 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -246,14 +246,14 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); - log::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); + tracing::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); HatTables::failover_brokering_to(links, peer2) }) .unwrap_or(false) } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); + tracing::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) { @@ -266,7 +266,7 @@ impl HatTables { .await; let mut tables = zwrite!(tables_ref.tables); - log::trace!("Compute trees"); + tracing::trace!("Compute trees"); let new_childs = match net_type { WhatAmI::Router => hat_mut!(tables) .routers_net @@ -276,11 +276,11 @@ impl HatTables { _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), }; - log::trace!("Compute routes"); + tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); queries::queries_tree_change(&mut tables, &new_childs, net_type); - log::trace!("Computations completed"); + tracing::trace!("Computations completed"); match net_type { WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, _ => hat_mut!(tables).peers_trees_task = None, @@ -701,7 +701,7 @@ impl HatBaseTrait for HatCode { _ => (), }; } - (_, _) => log::error!("Closed transport in session closing!"), + (_, _) => tracing::error!("Closed transport in session closing!"), } Ok(()) } @@ -819,7 +819,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option< Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), None => { - log::error!( + tracing::error!( "Received router declaration with unknown routing context id {}", nodeid ); @@ -827,7 +827,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option< } }, None => { - log::error!( + tracing::error!( "Could not find corresponding link in routers network for {}", face ); @@ -846,7 +846,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), None => { - log::error!( + tracing::error!( "Received peer declaration with unknown routing context id {}", nodeid ); @@ -854,7 +854,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - log::error!( + tracing::error!( "Could not find corresponding link in peers network for {}", face ); diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 7ff42f1dc3..727eb6763e 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -131,7 +131,7 @@ impl Network { autoconnect: WhatAmIMatcher, ) -> Self { let mut graph = petgraph::stable_graph::StableGraph::default(); - log::debug!("{} Add node (self) {}", name, zid); + tracing::debug!("{} Add node (self) {}", name, zid); let idx = graph.add_node(Node { zid, whatami: Some(runtime.whatami()), @@ -194,7 +194,7 @@ impl Network { Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), None => { - log::error!( + tracing::error!( "Cannot find local psid for context {} on link {}", context, link_id @@ -203,7 +203,7 @@ impl Network { } }, None => { - log::error!("Cannot find link {}", link_id); + tracing::error!("Cannot find link {}", link_id); 0 } } @@ -229,7 +229,7 @@ impl Network { if let Some(idx2) = self.get_idx(zid) { Some(idx2.index().try_into().unwrap()) } else { - log::error!( + tracing::error!( "{} Internal error building link state: cannot get index of {}", self.name, zid @@ -282,12 +282,12 @@ impl Network { fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { if let Ok(msg) = self.make_msg(idxs) { - log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + tracing::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); if let Err(e) = transport.schedule(msg) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -298,14 +298,14 @@ impl Network { if let Ok(msg) = self.make_msg(idxs) { for link in self.links.values() { if parameters(link) { - log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + tracing::trace!("{} Send to {} {:?}", self.name, link.zid, msg); if let Err(e) = link.transport.schedule(msg.clone()) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); + tracing::debug!("{} Error sending LinkStateList: {}", self.name, e); } } } } else { - log::error!("Failed to encode Linkstate message"); + tracing::error!("Failed to encode Linkstate message"); } } @@ -340,7 +340,7 @@ impl Network { } pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { - log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let graph = &self.graph; let links = &mut self.links; @@ -348,7 +348,7 @@ impl Network { let src_link = match links.values_mut().find(|link| link.zid == src) { Some(link) => link, None => { - log::error!( + tracing::error!( "{} Received LinkStateList from unknown link {}", self.name, src @@ -386,7 +386,7 @@ impl Network { link_state.links, )), None => { - log::error!( + tracing::error!( "Received LinkState from {} with unknown node mapping {}", src, link_state.psid @@ -409,7 +409,7 @@ impl Network { if let Some(zid) = src_link.get_zid(l) { Some(*zid) } else { - log::error!( + tracing::error!( "{} Received LinkState from {} with unknown link mapping {}", self.name, src, @@ -423,14 +423,14 @@ impl Network { }) .collect::>(); - // log::trace!( + // tracing::trace!( // "{} Received from {} mapped: {:?}", // self.name, // src, // link_states // ); for link_state in &link_states { - log::trace!( + tracing::trace!( "{} Received from {} mapped: {:?}", self.name, src, @@ -546,7 +546,7 @@ impl Network { sn, links: links.clone(), }; - log::debug!("{} Add node (state) {}", self.name, zid); + tracing::debug!("{} Add node (state) {}", self.name, zid); let idx = self.add_node(node); Some((links, idx, true)) } @@ -560,7 +560,7 @@ impl Network { for link in links { if let Some(idx2) = self.get_idx(link) { if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { - log::trace!( + tracing::trace!( "{} Update edge (state) {} {}", self.name, self.graph[*idx1].zid, @@ -576,7 +576,7 @@ impl Network { sn: 0, links: vec![], }; - log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); + tracing::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); let idx = self.add_node(node); reintroduced_nodes.push((vec![], idx, true)); } @@ -588,7 +588,7 @@ impl Network { } for (eidx, idx2) in edges { if !links.contains(&self.graph[idx2].zid) { - log::trace!( + tracing::trace!( "{} Remove edge (state) {} {}", self.name, self.graph[*idx1].zid, @@ -710,7 +710,7 @@ impl Network { let (idx, new) = match self.get_idx(&zid) { Some(idx) => (idx, false), None => { - log::debug!("{} Add node (link) {}", self.name, zid); + tracing::debug!("{} Add node (link) {}", self.name, zid); ( self.add_node(Node { zid, @@ -724,7 +724,7 @@ impl Network { } }; if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { - log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); + tracing::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); self.update_edge(self.idx, idx); } self.graph[self.idx].links.push(zid); @@ -806,7 +806,7 @@ impl Network { } pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { - log::trace!("{} remove_link {}", self.name, zid); + tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); @@ -877,7 +877,7 @@ impl Network { let mut removed = vec![]; for idx in self.graph.node_indices().collect::>() { if !visit_map.is_visited(&idx) { - log::debug!("Remove node {}", &self.graph[idx].zid); + tracing::debug!("Remove node {}", &self.graph[idx].zid); removed.push((idx, self.graph.remove_node(idx).unwrap())); } } @@ -904,7 +904,7 @@ impl Network { self.distances = paths.distances; } - if log::log_enabled!(log::Level::Debug) { + if tracing::enabled!(tracing::Level::DEBUG) { let ps: Vec> = paths .predecessors .iter() @@ -919,7 +919,7 @@ impl Network { }) }) .collect(); - log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); + tracing::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); } self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index d840d85665..6bf91a0605 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -53,7 +53,7 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send subscription {} on {}", res.expr(), someface); + tracing::debug!("Send subscription {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -72,7 +72,7 @@ fn send_sourced_subscription_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -163,7 +163,7 @@ fn propagate_sourced_subscription( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating sub {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -171,7 +171,7 @@ fn propagate_sourced_subscription( ); } } - None => log::error!( + None => tracing::error!( "Error propagating sub {}: cannot get index of {}!", res.expr(), source @@ -189,7 +189,7 @@ fn register_router_subscription( if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { - log::debug!( + tracing::debug!( "Register router subscription {} (router: {})", res.expr(), router @@ -230,7 +230,7 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); + tracing::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -263,7 +263,7 @@ fn register_client_subscription( // Register subscription { let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); + tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { Some(ctx) => match &ctx.subs { Some(info) => { @@ -356,7 +356,7 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget subscription {} on {}", res.expr(), someface); + tracing::debug!("Send forget subscription {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -374,7 +374,7 @@ fn send_forget_sourced_subscription_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -462,7 +462,7 @@ fn propagate_forget_sourced_subscription( Some(tree_sid.index() as NodeId), ); } else { - log::trace!( + tracing::trace!( "Propagating forget sub {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -470,7 +470,7 @@ fn propagate_forget_sourced_subscription( ); } } - None => log::error!( + None => tracing::error!( "Error propagating forget sub {}: cannot get index of {}!", res.expr(), source @@ -479,7 +479,7 @@ fn propagate_forget_sourced_subscription( } fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( + tracing::debug!( "Unregister router subscription {} (router: {})", res.expr(), router @@ -522,7 +522,7 @@ fn forget_router_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( + tracing::debug!( "Unregister peer subscription {} (peer: {})", res.expr(), peer @@ -568,7 +568,7 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); + tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } @@ -867,7 +867,7 @@ fn insert_faces_for_subs( } } } else { - log::trace!("Tree for node sid:{} not yet ready", source); + tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -941,7 +941,7 @@ impl HatPubSubTrait for HatCode { if key_expr.ends_with('/') { return Arc::new(route); } - log::trace!( + tracing::trace!( "compute_data_route({}, {:?}, {:?})", key_expr, source, @@ -950,7 +950,7 @@ impl HatPubSubTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return Arc::new(route); } }; diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 12338eb339..2451b8c2b6 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -204,7 +204,7 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send queryable {} on {}", res.expr(), someface); + tracing::debug!("Send queryable {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -223,7 +223,7 @@ fn send_sourced_queryable_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -295,7 +295,7 @@ fn propagate_sourced_queryable( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating qabl {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -303,7 +303,7 @@ fn propagate_sourced_queryable( ); } } - None => log::error!( + None => tracing::error!( "Error propagating qabl {}: cannot get index of {}!", res.expr(), source @@ -322,7 +322,7 @@ fn register_router_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { - log::debug!( + tracing::debug!( "Register router queryable {} (router: {})", res.expr(), router, @@ -375,7 +375,7 @@ fn register_peer_queryable( if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); + tracing::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -408,7 +408,7 @@ fn register_client_queryable( // Register queryable { let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); + tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -486,7 +486,7 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); - log::debug!("Send forget queryable {} on {}", res.expr(), someface); + tracing::debug!("Send forget queryable {} on {}", res.expr(), someface); someface.primitives.send_declare(RoutingContext::with_expr( Declare { @@ -504,7 +504,7 @@ fn send_forget_sourced_queryable_to_net_childs( )); } } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), } } } @@ -593,7 +593,7 @@ fn propagate_forget_sourced_queryable( tree_sid.index() as NodeId, ); } else { - log::trace!( + tracing::trace!( "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", res.expr(), tree_sid.index(), @@ -601,7 +601,7 @@ fn propagate_forget_sourced_queryable( ); } } - None => log::error!( + None => tracing::error!( "Error propagating forget qabl {}: cannot get index of {}!", res.expr(), source @@ -610,7 +610,7 @@ fn propagate_forget_sourced_queryable( } fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( + tracing::debug!( "Unregister router queryable {} (router: {})", res.expr(), router, @@ -653,7 +653,7 @@ fn forget_router_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); + tracing::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -699,7 +699,7 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); + tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { @@ -1011,7 +1011,7 @@ fn insert_target_for_qabls( } } } else { - log::trace!("Tree for node sid:{} not yet ready", source); + tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -1089,7 +1089,7 @@ impl HatQueriesTrait for HatCode { if key_expr.ends_with('/') { return EMPTY_ROUTE.clone(); } - log::trace!( + tracing::trace!( "compute_query_route({}, {:?}, {:?})", key_expr, source, @@ -1098,7 +1098,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return EMPTY_ROUTE.clone(); } }; @@ -1193,7 +1193,7 @@ impl HatQueriesTrait for HatCode { let key_expr = match OwnedKeyExpr::try_from(key_expr) { Ok(ke) => ke, Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + tracing::warn!("Invalid KE reached the system: {}", e); return result; } }; diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 8cb3b18785..09d0d5235f 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -62,15 +62,15 @@ impl InterceptorFactoryTrait for DownsamplingInterceptorFactory { &self, transport: &TransportUnicast, ) -> (Option, Option) { - log::debug!("New downsampler transport unicast {:?}", transport); + tracing::debug!("New downsampler transport unicast {:?}", transport); if let Some(interfaces) = &self.interfaces { - log::debug!( + tracing::debug!( "New downsampler transport unicast config interfaces: {:?}", interfaces ); if let Ok(links) = transport.get_links() { for link in links { - log::debug!( + tracing::debug!( "New downsampler transport unicast link interfaces: {:?}", link.interfaces ); @@ -149,11 +149,11 @@ impl InterceptorTrait for DownsamplingInterceptor { return None; } } else { - log::debug!("unxpected cache ID {}", id); + tracing::debug!("unxpected cache ID {}", id); } } } else { - log::debug!("unxpected cache type {:?}", ctx.full_expr()); + tracing::debug!("unxpected cache type {:?}", ctx.full_expr()); } } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 9dfc03ac7e..01d1dc5b35 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -107,7 +107,7 @@ impl InterceptorTrait for InterceptorsChain { match interceptor.intercept(ctx, cache) { Some(newctx) => ctx = newctx, None => { - log::trace!("Msg intercepted!"); + tracing::trace!("Msg intercepted!"); return None; } } @@ -170,7 +170,7 @@ impl InterceptorTrait for IngressMsgLogger { .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) .or_else(|| ctx.full_expr()); - log::debug!( + tracing::debug!( "{} Recv {} Expr:{:?}", ctx.inface() .map(|f| f.to_string()) @@ -196,7 +196,7 @@ impl InterceptorTrait for EgressMsgLogger { let expr = cache .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) .or_else(|| ctx.full_expr()); - log::debug!( + tracing::debug!( "{} Send {} Expr:{:?}", ctx.outface() .map(|f| f.to_string()) @@ -215,7 +215,7 @@ impl InterceptorFactoryTrait for LoggerInterceptor { &self, transport: &TransportUnicast, ) -> (Option, Option) { - log::debug!("New transport unicast {:?}", transport); + tracing::debug!("New transport unicast {:?}", transport); ( Some(Box::new(IngressMsgLogger {})), Some(Box::new(EgressMsgLogger {})), @@ -223,12 +223,12 @@ impl InterceptorFactoryTrait for LoggerInterceptor { } fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { - log::debug!("New transport multicast {:?}", transport); + tracing::debug!("New transport multicast {:?}", transport); Some(Box::new(EgressMsgLogger {})) } fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { - log::debug!("New peer multicast {:?}", transport); + tracing::debug!("New peer multicast {:?}", transport); Some(Box::new(IngressMsgLogger {})) } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index c80d3bdc09..87766f021b 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -95,7 +95,7 @@ impl Router { ) }) .clone(); - log::debug!("New {}", newface); + tracing::debug!("New {}", newface); let mut face = Face { tables: self.tables.clone(), @@ -148,7 +148,7 @@ impl Router { ) }) .clone(); - log::debug!("New {}", newface); + tracing::debug!("New {}", newface); let mut face = Face { tables: self.tables.clone(), diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 16e44f072c..56aa3e43b1 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -19,13 +19,13 @@ use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; use crate::queryable::QueryInner; use crate::value::Value; -use log::{error, trace}; use serde_json::json; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; use std::sync::Arc; use std::sync::Mutex; +use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; @@ -91,7 +91,7 @@ impl AdminSpace { ) -> ZResult<()> { let name = &config.name; let declared = if let Some(declared) = plugin_mgr.plugin_mut(name) { - log::warn!("Plugin `{}` was already declared", declared.name()); + tracing::warn!("Plugin `{}` was already declared", declared.name()); declared } else if let Some(paths) = &config.paths { plugin_mgr.declare_dynamic_plugin_by_paths(name, paths)? @@ -100,7 +100,7 @@ impl AdminSpace { }; let loaded = if let Some(loaded) = declared.loaded_mut() { - log::warn!( + tracing::warn!( "Plugin `{}` was already loaded from {}", loaded.name(), loaded.path() @@ -111,10 +111,10 @@ impl AdminSpace { }; if let Some(started) = loaded.started_mut() { - log::warn!("Plugin `{}` was already started", started.name()); + tracing::warn!("Plugin `{}` was already started", started.name()); } else { let started = loaded.start(start_args)?; - log::info!( + tracing::info!( "Successfully started plugin `{}` from {}", started.name(), started.path() @@ -250,7 +250,7 @@ impl AdminSpace { if plugin.required { panic!("Failed to load plugin `{}`: {}", plugin.name, e) } else { - log::error!( + tracing::error!( "Failed to load plugin `{}`: {}", plugin.name, e @@ -261,7 +261,7 @@ impl AdminSpace { } } } - log::info!("Running plugins: {:?}", &active_plugins) + tracing::info!("Running plugins: {:?}", &active_plugins) } }); @@ -313,7 +313,7 @@ impl AdminSpace { impl Primitives for AdminSpace { fn send_declare(&self, msg: Declare) { - log::trace!("Recv declare {:?}", msg); + tracing::trace!("Recv declare {:?}", msg); if let DeclareBody::DeclareKeyExpr(m) = msg.body { match self.key_expr_to_string(&m.wire_expr) { Ok(s) => { @@ -329,7 +329,7 @@ impl Primitives for AdminSpace { { let conf = self.context.runtime.state.config.lock(); if !conf.adminspace.permissions().write { - log::error!( + tracing::error!( "Received PUT on '{}' but adminspace.permissions.write=false in configuration", msg.wire_expr ); @@ -345,7 +345,7 @@ impl Primitives for AdminSpace { match msg.payload { PushBody::Put(put) => match std::str::from_utf8(&put.payload.contiguous()) { Ok(json) => { - log::trace!( + tracing::trace!( "Insert conf value /@/router/{}/config/{} : {}", &self.context.zid_str, key, @@ -365,13 +365,13 @@ impl Primitives for AdminSpace { ), }, PushBody::Del(_) => { - log::trace!( + tracing::trace!( "Deleting conf value /@/router/{}/config/{}", &self.context.zid_str, key ); if let Err(e) = self.context.runtime.state.config.remove(key) { - log::error!("Error deleting conf value {} : {}", msg.wire_expr, e) + tracing::error!("Error deleting conf value {} : {}", msg.wire_expr, e) } } } @@ -385,7 +385,7 @@ impl Primitives for AdminSpace { { let conf = self.context.runtime.state.config.lock(); if !conf.adminspace.permissions().read { - log::error!( + tracing::error!( "Received GET on '{}' but adminspace.permissions.read=false in configuration", msg.wire_expr ); @@ -401,7 +401,7 @@ impl Primitives for AdminSpace { let key_expr = match self.key_expr_to_string(&msg.wire_expr) { Ok(key_expr) => key_expr.into_owned(), Err(e) => { - log::error!("Unknown KeyExpr: {}", e); + tracing::error!("Unknown KeyExpr: {}", e); primitives.send_response_final(ResponseFinal { rid: msg.id, ext_qos: ext::QoSType::response_final_default(), @@ -560,7 +560,7 @@ fn router_data(context: &AdminContext, query: Query) { } } - log::trace!("AdminSpace router_data: {:?}", json); + tracing::trace!("AdminSpace router_data: {:?}", json); if let Err(e) = query .reply(Ok(Sample::new( reply_key, @@ -569,7 +569,7 @@ fn router_data(context: &AdminContext, query: Query) { ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -603,7 +603,7 @@ zenoh_build{{version="{}"}} 1 ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -628,7 +628,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -653,7 +653,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -668,7 +668,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { .unwrap(); if query.key_expr().intersects(&key) { if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } } @@ -685,7 +685,7 @@ fn queryables_data(context: &AdminContext, query: Query) { .unwrap(); if query.key_expr().intersects(&key) { if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } } @@ -695,15 +695,15 @@ fn plugins_data(context: &AdminContext, query: Query) { let guard = zlock!(context.plugins_mgr); let root_key = format!("@/router/{}/plugins", &context.zid_str); let root_key = unsafe { keyexpr::from_str_unchecked(&root_key) }; - log::debug!("requested plugins status {:?}", query.key_expr()); + tracing::debug!("requested plugins status {:?}", query.key_expr()); if let [names, ..] = query.key_expr().strip_prefix(root_key)[..] { let statuses = guard.plugins_status(names); for status in statuses { - log::debug!("plugin status: {:?}", status); + tracing::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); if let Err(e) = query.reply(Ok(Sample::new(key, Value::from(status)))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } } @@ -727,11 +727,11 @@ fn plugins_status(context: &AdminContext, query: Query) { ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } } else { - log::error!("Error: invalid plugin path key {}", plugin_path_key); + tracing::error!("Error: invalid plugin path key {}", plugin_path_key); } }); let matches_plugin = |plugin_status_space: &mut String| { @@ -754,23 +754,23 @@ fn plugins_status(context: &AdminContext, query: Query) { ))) .res() { - log::error!("Error sending AdminSpace reply: {:?}", e); + tracing::error!("Error sending AdminSpace reply: {:?}", e); } } else { - log::error!("Error: plugin {} replied with an invalid key", plugin_key); + tracing::error!("Error: plugin {} replied with an invalid key", plugin_key); } } } Ok(Err(e)) => { - log::error!("Plugin {} bailed from responding to {}: {}", plugin.name(), query.key_expr(), e) + tracing::error!("Plugin {} bailed from responding to {}: {}", plugin.name(), query.key_expr(), e) } Err(e) => match e .downcast_ref::() .map(|s| s.as_str()) .or_else(|| e.downcast_ref::<&str>().copied()) { - Some(e) => log::error!("Plugin {} panicked while responding to {}: {}", plugin.name(), query.key_expr(), e), - None => log::error!("Plugin {} panicked while responding to {}. The panic message couldn't be recovered.", plugin.name(), query.key_expr()), + Some(e) => tracing::error!("Plugin {} panicked while responding to {}: {}", plugin.name(), query.key_expr(), e), + None => tracing::error!("Plugin {} panicked while responding to {}. The panic message couldn't be recovered.", plugin.name(), query.key_expr()), }, } }); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 9314186b2e..0f11e7cdb4 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -95,11 +95,11 @@ impl Runtime { } pub(crate) async fn init(config: Config) -> ZResult { - log::debug!("Zenoh Rust API {}", GIT_VERSION); + tracing::debug!("Zenoh Rust API {}", GIT_VERSION); let zid = *config.id(); - log::info!("Using PID: {}", zid); + tracing::info!("Using PID: {}", zid); let whatami = unwrap_or_default!(config.mode()); let metadata = config.metadata().clone(); @@ -151,7 +151,7 @@ impl Runtime { Some(event) => { if &*event == "connect/endpoints" { if let Err(e) = runtime2.update_peers().await { - log::error!("Error updating peers: {}", e); + tracing::error!("Error updating peers: {}", e); } } }, @@ -177,7 +177,7 @@ impl Runtime { } pub async fn close(&self) -> ZResult<()> { - log::trace!("Runtime::close())"); + tracing::trace!("Runtime::close())"); // TODO: Check this whether is able to terminate all spawned task by Runtime::spawn self.state .task_controller diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 3f1026268a..687fa90649 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -65,7 +65,7 @@ impl Runtime { match peers.len() { 0 => { if scouting { - log::info!("Scouting for router ..."); + tracing::info!("Scouting for router ..."); let ifaces = Runtime::get_interfaces(&ifaces); if ifaces.is_empty() { bail!("Unable to find multicast interface!") @@ -235,7 +235,7 @@ impl Runtime { self.manager().get_locators(), peers ); - log::error!("{}", &e); + tracing::error!("{}", &e); Err(e.into()) } } @@ -254,7 +254,7 @@ impl Runtime { for peer in peers { let endpoint = peer.clone(); let retry_config = self.get_connect_retry_config(&endpoint); - log::debug!( + tracing::debug!( "Try to connect: {:?}: global timeout: {:?}, retry: {:?}", endpoint, self.get_global_connect_timeout(), @@ -280,7 +280,7 @@ impl Runtime { self.manager().get_locators(), peers ); - log::error!("{}", &e); + tracing::error!("{}", &e); Err(e.into()) } @@ -288,7 +288,7 @@ impl Runtime { for peer in peers { let endpoint = peer.clone(); let retry_config = self.get_connect_retry_config(&endpoint); - log::debug!( + tracing::debug!( "Try to connect: {:?}: global timeout: {:?}, retry: {:?}", endpoint, self.get_global_connect_timeout(), @@ -318,11 +318,11 @@ impl Runtime { { Ok(Ok(_)) => Ok(()), Ok(Err(e)) => { - log::warn!("Unable to connect to {}! {}", peer, e); + tracing::warn!("Unable to connect to {}! {}", peer, e); Err(e) } Err(e) => { - log::warn!("Unable to connect to {}! {}", peer, e); + tracing::warn!("Unable to connect to {}! {}", peer, e); Err(e.into()) } } @@ -414,7 +414,7 @@ impl Runtime { match res { Ok(_) => Ok(()), Err(e) => { - log::error!("Unable to open listeners: {}", e); + tracing::error!("Unable to open listeners: {}", e); Err(Box::new(e)) } } @@ -425,7 +425,7 @@ impl Runtime { for listener in listeners { let endpoint = listener.clone(); let retry_config = self.get_listen_retry_config(&endpoint); - log::debug!("Try to add listener: {:?}: {:?}", endpoint, retry_config); + tracing::debug!("Try to add listener: {:?}: {:?}", endpoint, retry_config); if retry_config.timeout().is_zero() || self.get_global_listener_timeout().is_zero() { // try to add listener and exit immediately without retry if let Err(e) = self.add_listener(endpoint).await { @@ -474,9 +474,9 @@ impl Runtime { async fn add_listener(&self, listener: EndPoint) -> ZResult<()> { let endpoint = listener.clone(); match self.manager().add_listener(endpoint).await { - Ok(listener) => log::debug!("Listener added: {}", listener), + Ok(listener) => tracing::debug!("Listener added: {}", listener), Err(err) => { - log::warn!("Unable to open listener {}: {}", listener, err); + tracing::warn!("Unable to open listener {}: {}", listener, err); return Err(err); } } @@ -487,7 +487,7 @@ impl Runtime { let mut locators = self.state.locators.write().unwrap(); *locators = self.manager().get_locators(); for locator in &*locators { - log::info!("Zenoh can be reached at: {}", locator); + tracing::info!("Zenoh can be reached at: {}", locator); } } @@ -495,7 +495,7 @@ impl Runtime { if names == "auto" { let ifaces = zenoh_util::net::get_multicast_interfaces(); if ifaces.is_empty() { - log::warn!( + tracing::warn!( "Unable to find active, non-loopback multicast interface. Will use [::]." ); vec![Ipv6Addr::UNSPECIFIED.into()] @@ -511,12 +511,12 @@ impl Runtime { Ok(opt_addr) => match opt_addr { Some(addr) => Some(addr), None => { - log::error!("Unable to find interface {}", name); + tracing::error!("Unable to find interface {}", name); None } }, Err(err) => { - log::error!("Unable to find interface {}: {}", name, err); + tracing::error!("Unable to find interface {}: {}", name, err); None } }, @@ -529,12 +529,12 @@ impl Runtime { let socket = match Socket::new(Domain::IPV4, Type::DGRAM, None) { Ok(socket) => socket, Err(err) => { - log::error!("Unable to create datagram socket: {}", err); + tracing::error!("Unable to create datagram socket: {}", err); bail!(err => "Unable to create datagram socket"); } }; if let Err(err) = socket.set_reuse_address(true) { - log::error!("Unable to set SO_REUSEADDR option: {}", err); + tracing::error!("Unable to set SO_REUSEADDR option: {}", err); bail!(err => "Unable to set SO_REUSEADDR option"); } let addr: IpAddr = { @@ -548,18 +548,20 @@ impl Runtime { } }; match socket.bind(&SocketAddr::new(addr, sockaddr.port()).into()) { - Ok(()) => log::debug!("UDP port bound to {}", sockaddr), + Ok(()) => tracing::debug!("UDP port bound to {}", sockaddr), Err(err) => { - log::error!("Unable to bind UDP port {}: {}", sockaddr, err); + tracing::error!("Unable to bind UDP port {}: {}", sockaddr, err); bail!(err => "Unable to bind UDP port {}", sockaddr); } } match sockaddr.ip() { IpAddr::V6(addr) => match socket.join_multicast_v6(&addr, 0) { - Ok(()) => log::debug!("Joined multicast group {} on interface 0", sockaddr.ip()), + Ok(()) => { + tracing::debug!("Joined multicast group {} on interface 0", sockaddr.ip()) + } Err(err) => { - log::error!( + tracing::error!( "Unable to join multicast group {} on interface 0: {}", sockaddr.ip(), err @@ -574,12 +576,12 @@ impl Runtime { for iface in ifaces { if let IpAddr::V4(iface_addr) = iface { match socket.join_multicast_v4(&addr, iface_addr) { - Ok(()) => log::debug!( + Ok(()) => tracing::debug!( "Joined multicast group {} on interface {}", sockaddr.ip(), iface_addr, ), - Err(err) => log::warn!( + Err(err) => tracing::warn!( "Unable to join multicast group {} on interface {}: {}", sockaddr.ip(), iface_addr, @@ -587,7 +589,7 @@ impl Runtime { ), } } else { - log::warn!( + tracing::warn!( "Cannot join IpV4 multicast group {} on IpV6 iface {}", sockaddr.ip(), iface @@ -596,7 +598,7 @@ impl Runtime { } } } - log::info!("zenohd listening scout messages on {}", sockaddr); + tracing::info!("zenohd listening scout messages on {}", sockaddr); // Must set to nonblocking according to the doc of tokio // https://docs.rs/tokio/latest/tokio/net/struct.UdpSocket.html#notes @@ -612,7 +614,7 @@ impl Runtime { let socket = match Socket::new(Domain::IPV4, Type::DGRAM, None) { Ok(socket) => socket, Err(err) => { - log::warn!("Unable to create datagram socket: {}", err); + tracing::warn!("Unable to create datagram socket: {}", err); bail!(err=> "Unable to create datagram socket"); } }; @@ -624,10 +626,10 @@ impl Runtime { .unwrap_or(SocketAddr::new(addr, 0).into()) .as_socket() .unwrap_or(SocketAddr::new(addr, 0)); - log::debug!("UDP port bound to {}", local_addr); + tracing::debug!("UDP port bound to {}", local_addr); } Err(err) => { - log::warn!("Unable to bind udp port {}:0: {}", addr, err); + tracing::warn!("Unable to bind udp port {}:0: {}", addr, err); bail!(err => "Unable to bind udp port {}:0", addr); } } @@ -660,13 +662,13 @@ impl Runtime { let mut period = retry_config.period(); let cancellation_token = self.get_cancellation_token(); loop { - log::trace!("Trying to connect to configured peer {}", peer); + tracing::trace!("Trying to connect to configured peer {}", peer); let endpoint = peer.clone(); tokio::select! { res = tokio::time::timeout(retry_config.timeout(), self.manager().open_transport_unicast(endpoint)) => { match res { Ok(Ok(transport)) => { - log::debug!("Successfully connected to configured peer {}", peer); + tracing::debug!("Successfully connected to configured peer {}", peer); if let Ok(Some(orch_transport)) = transport.get_callback() { if let Some(orch_transport) = orch_transport .as_any() @@ -678,7 +680,7 @@ impl Runtime { break; } Ok(Err(e)) => { - log::debug!( + tracing::debug!( "Unable to connect to configured peer {}! {}. Retry in {:?}.", peer, e, @@ -686,7 +688,7 @@ impl Runtime { ); } Err(e) => { - log::debug!( + tracing::debug!( "Unable to connect to configured peer {}! {}. Retry in {:?}.", peer, e, @@ -727,7 +729,7 @@ impl Runtime { loop { for socket in sockets { - log::trace!( + tracing::trace!( "Send {:?} to {} on interface {}", scout.body, mcast_addr, @@ -739,7 +741,7 @@ impl Runtime { .send_to(wbuf.as_slice(), mcast_addr.to_string()) .await { - log::debug!( + tracing::debug!( "Unable to send {:?} to {} on interface {}: {}", scout.body, mcast_addr, @@ -767,25 +769,25 @@ impl Runtime { let codec = Zenoh080::new(); let res: Result = codec.read(&mut reader); if let Ok(msg) = res { - log::trace!("Received {:?} from {}", msg.body, peer); + tracing::trace!("Received {:?} from {}", msg.body, peer); if let ScoutingBody::Hello(hello) = &msg.body { if matcher.matches(hello.whatami) { if let Loop::Break = f(hello.clone()).await { break; } } else { - log::warn!("Received unexpected Hello: {:?}", msg.body); + tracing::warn!("Received unexpected Hello: {:?}", msg.body); } } } else { - log::trace!( + tracing::trace!( "Received unexpected UDP datagram from {}: {:?}", peer, &buf.as_slice()[..n] ); } } - Err(e) => log::debug!("Error receiving UDP datagram: {}", e), + Err(e) => tracing::debug!("Error receiving UDP datagram: {}", e), } } } @@ -806,7 +808,7 @@ impl Runtime { let is_multicast = match inspector.is_multicast(locator).await { Ok(im) => im, Err(e) => { - log::trace!("{} {} on {}: {}", ERR, zid, locator, e); + tracing::trace!("{} {} on {}: {}", ERR, zid, locator, e); continue; } }; @@ -822,14 +824,14 @@ impl Runtime { .await { Ok(Ok(transport)) => { - log::debug!( + tracing::debug!( "Successfully connected to newly scouted peer: {:?}", transport ); return true; } - Ok(Err(e)) => log::trace!("{} {} on {}: {}", ERR, zid, locator, e), - Err(e) => log::trace!("{} {} on {}: {}", ERR, zid, locator, e), + Ok(Err(e)) => tracing::trace!("{} {} on {}: {}", ERR, zid, locator, e), + Err(e) => tracing::trace!("{} {} on {}: {}", ERR, zid, locator, e), } } else { match tokio::time::timeout( @@ -839,19 +841,19 @@ impl Runtime { .await { Ok(Ok(transport)) => { - log::debug!( + tracing::debug!( "Successfully connected to newly scouted peer: {:?}", transport ); return true; } - Ok(Err(e)) => log::trace!("{} {} on {}: {}", ERR, zid, locator, e), - Err(e) => log::trace!("{} {} on {}: {}", ERR, zid, locator, e), + Ok(Err(e)) => tracing::trace!("{} {} on {}: {}", ERR, zid, locator, e), + Err(e) => tracing::trace!("{} {} on {}: {}", ERR, zid, locator, e), } } } - log::warn!( + tracing::warn!( "Unable to connect to any locator of scouted peer {}: {:?}", zid, locators @@ -876,10 +878,10 @@ impl Runtime { }; if !has_unicast && !has_multicast { - log::debug!("Try to connect to peer {} via any of {:?}", zid, locators); + tracing::debug!("Try to connect to peer {} via any of {:?}", zid, locators); let _ = self.connect(zid, locators).await; } else { - log::trace!("Already connected scouted peer: {}", zid); + tracing::trace!("Already connected scouted peer: {}", zid); } } } @@ -893,13 +895,13 @@ impl Runtime { ) -> ZResult<()> { let scout = async { Runtime::scout(sockets, what, addr, move |hello| async move { - log::info!("Found {:?}", hello); + tracing::info!("Found {:?}", hello); if !hello.locators.is_empty() { if self.connect(&hello.zid, &hello.locators).await { return Loop::Break; } } else { - log::warn!("Received Hello with no locators: {:?}", hello); + tracing::warn!("Received Hello with no locators: {:?}", hello); } Loop::Continue }) @@ -926,7 +928,7 @@ impl Runtime { if !hello.locators.is_empty() { self.connect_peer(&hello.zid, &hello.locators).await } else { - log::warn!("Received Hello with no locators: {:?}", hello); + tracing::warn!("Received Hello with no locators: {:?}", hello); } Loop::Continue }) @@ -962,11 +964,11 @@ impl Runtime { .iter() .filter_map(|sock| sock.local_addr().ok()) .collect(); - log::debug!("Waiting for UDP datagram..."); + tracing::debug!("Waiting for UDP datagram..."); loop { let (n, peer) = mcast_socket.recv_from(&mut buf).await.unwrap(); if local_addrs.iter().any(|addr| *addr == peer) { - log::trace!("Ignore UDP datagram from own socket"); + tracing::trace!("Ignore UDP datagram from own socket"); continue; } @@ -974,7 +976,7 @@ impl Runtime { let codec = Zenoh080::new(); let res: Result = codec.read(&mut reader); if let Ok(msg) = res { - log::trace!("Received {:?} from {}", msg.body, peer); + tracing::trace!("Received {:?} from {}", msg.body, peer); if let ScoutingBody::Scout(Scout { what, .. }) = &msg.body { if what.matches(self.whatami()) { let mut wbuf = vec![]; @@ -990,7 +992,7 @@ impl Runtime { } .into(); let socket = get_best_match(&peer.ip(), ucast_sockets).unwrap(); - log::trace!( + tracing::trace!( "Send {:?} to {} on interface {}", hello.body, peer, @@ -1001,12 +1003,12 @@ impl Runtime { codec.write(&mut writer, &hello).unwrap(); if let Err(err) = socket.send_to(wbuf.as_slice(), peer).await { - log::error!("Unable to send {:?} to {}: {}", hello.body, peer, err); + tracing::error!("Unable to send {:?} to {}: {}", hello.body, peer, err); } } } } else { - log::trace!( + tracing::trace!( "Received unexpected UDP datagram from {}: {:?}", peer, &buf.as_slice()[..n] diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index ef33115a6b..f5af22d0e7 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -810,7 +810,7 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { priority: self.priority, destination: self.destination, }; - log::trace!("publish({:?})", publisher.key_expr); + tracing::trace!("publish({:?})", publisher.key_expr); Ok(publisher) } } @@ -829,7 +829,7 @@ fn resolve_put( kind: SampleKind, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { - log::trace!("write({:?}, [...])", &publisher.key_expr); + tracing::trace!("write({:?}, [...])", &publisher.key_expr); let primitives = zread!(publisher.session.state) .primitives .as_ref() diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs index e94b1a9973..4bef4bca1d 100644 --- a/zenoh/src/sample.rs +++ b/zenoh/src/sample.rs @@ -534,7 +534,7 @@ impl QoS { match Priority::try_from(self.inner.get_priority()) { Ok(p) => p, Err(e) => { - log::trace!( + tracing::trace!( "Failed to convert priority: {}; replacing with default value", e.to_string() ); diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index f2c90123ce..82536be7d7 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -309,7 +309,7 @@ fn scout( config: zenoh_config::Config, callback: Callback<'static, Hello>, ) -> ZResult { - log::trace!("scout({}, {})", what, &config); + tracing::trace!("scout({}, {})", what, &config); let default_addr = SocketAddr::from(zenoh_config::defaults::scouting::multicast::address); let addr = config.scouting.multicast.address().unwrap_or(default_addr); let ifaces = config.scouting.multicast.interface().as_ref().map_or( @@ -337,7 +337,7 @@ fn scout( }); tokio::select! { _ = scout => {}, - _ = cancellation_token_clone.cancelled() => { log::trace!("stop scout({}, {})", what, &config); }, + _ = cancellation_token_clone.cancelled() => { tracing::trace!("stop scout({}, {})", what, &config); }, } }, cancellation_token.clone(), diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 88f3c2cb77..1791d39c77 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -40,7 +40,6 @@ use crate::Sample; use crate::SampleKind; use crate::Selector; use crate::Value; -use log::{error, trace, warn}; use std::collections::HashMap; use std::convert::TryFrom; use std::convert::TryInto; @@ -50,6 +49,7 @@ use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; +use tracing::{error, trace, warn}; use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; @@ -821,7 +821,7 @@ impl Session { #[allow(clippy::new_ret_no_self)] pub(super) fn new(config: Config) -> impl Resolve> { ResolveFuture::new(async move { - log::debug!("Config: {:?}", &config); + tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); match Runtime::init(config).await { @@ -909,7 +909,7 @@ impl Session { _key_expr: KeyExpr<'a>, ) -> impl Resolve> + 'a { ResolveClosure::new(move || { - // log::trace!("declare_publication({:?})", key_expr); + // tracing::trace!("declare_publication({:?})", key_expr); // let mut state = zwrite!(self.state); // if !state.publications.iter().any(|p| **p == **key_expr) { // let declared_pub = if let Some(join_pub) = state @@ -985,7 +985,7 @@ impl Session { info: &SubscriberInfo, ) -> ZResult> { let mut state = zwrite!(self.state); - log::trace!("subscribe({:?})", key_expr); + tracing::trace!("subscribe({:?})", key_expr); let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let key_expr = match scope { Some(scope) => scope / key_expr, @@ -1203,7 +1203,7 @@ impl Session { callback: Callback<'static, Query>, ) -> ZResult> { let mut state = zwrite!(self.state); - log::trace!("queryable({:?})", key_expr); + tracing::trace!("queryable({:?})", key_expr); let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let qable_state = Arc::new(QueryableState { id, @@ -1377,7 +1377,7 @@ impl Session { key_expr: &KeyExpr, ) -> ZResult> { let mut state = zwrite!(self.state); - log::trace!("declare_liveliness({:?})", key_expr); + tracing::trace!("declare_liveliness({:?})", key_expr); let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { @@ -1439,7 +1439,7 @@ impl Session { let mut state = zwrite!(self.state); let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); - log::trace!("matches_listener({:?}) => {id}", publisher.key_expr); + tracing::trace!("matches_listener({:?}) => {id}", publisher.key_expr); let listener_state = Arc::new(MatchingListenerState { id, current: std::sync::Mutex::new(false), @@ -1460,7 +1460,7 @@ impl Session { (listener_state.callback)(MatchingStatus { matching: true }); } } - Err(e) => log::error!("Error trying to acquire MathginListener lock: {}", e), + Err(e) => tracing::error!("Error trying to acquire MathginListener lock: {}", e), } Ok(listener_state) } @@ -1532,7 +1532,7 @@ impl Session { } } Err(e) => { - log::error!( + tracing::error!( "Error trying to acquire MathginListener lock: {}", e ); @@ -1570,7 +1570,7 @@ impl Session { } } Err(e) => { - log::error!( + tracing::error!( "Error trying to acquire MathginListener lock: {}", e ); @@ -1613,7 +1613,7 @@ impl Session { match &sub.scope { Some(scope) => { if !res.key_expr.starts_with(&***scope) { - log::warn!( + tracing::warn!( "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", res.key_expr, scope, @@ -1626,7 +1626,7 @@ impl Session { key_expr.into_owned(), )), Err(e) => { - log::warn!( + tracing::warn!( "Error unscoping received Data for `{}`: {}", res.key_expr, e, @@ -1642,14 +1642,14 @@ impl Session { } } Some(Resource::Prefix { prefix }) => { - log::error!( + tracing::error!( "Received Data for `{}`, which isn't a key expression", prefix ); return; } None => { - log::error!("Received Data for unknown expr_id: {}", key_expr.scope); + tracing::error!("Received Data for unknown expr_id: {}", key_expr.scope); return; } } @@ -1664,7 +1664,7 @@ impl Session { match &sub.scope { Some(scope) => { if !key_expr.starts_with(&***scope) { - log::warn!( + tracing::warn!( "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", key_expr, scope, @@ -1676,7 +1676,7 @@ impl Session { key_expr.into_owned(), )), Err(e) => { - log::warn!( + tracing::warn!( "Error unscoping received Data for `{}`: {}", key_expr, e, @@ -1692,7 +1692,7 @@ impl Session { } } Err(err) => { - log::error!("Received Data for unkown key_expr: {}", err); + tracing::error!("Received Data for unkown key_expr: {}", err); return; } } @@ -1755,7 +1755,7 @@ impl Session { #[cfg(feature = "unstable")] attachment: Option, callback: Callback<'static, Reply>, ) -> ZResult<()> { - log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); + tracing::trace!("get({}, {:?}, {:?})", selector, target, consolidation); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { Mode::Auto => { @@ -1784,7 +1784,7 @@ impl Session { let mut state = zwrite!(state); if let Some(query) = state.queries.remove(&qid) { std::mem::drop(state); - log::debug!("Timeout on query {}! Send error and close.", qid); + tracing::debug!("Timeout on query {}! Send error and close.", qid); if query.reception_mode == ConsolidationMode::Latest { for (_, reply) in query.replies.unwrap().into_iter() { (query.callback)(reply); @@ -1809,7 +1809,7 @@ impl Session { None => selector.clone(), }; - log::trace!("Register query {} (nb_final = {})", qid, nb_final); + tracing::trace!("Register query {} (nb_final = {})", qid, nb_final); let wexpr = selector.key_expr.to_wire(self).to_owned(); state.queries.insert( qid, @@ -2172,7 +2172,10 @@ impl Primitives for Session { } } Err(err) => { - log::error!("Received DeclareSubscriber for unkown wire_expr: {}", err) + tracing::error!( + "Received DeclareSubscriber for unkown wire_expr: {}", + err + ) } } } @@ -2206,7 +2209,10 @@ impl Primitives for Session { } } Err(err) => { - log::error!("Received Forget Subscriber for unkown key_expr: {}", err) + tracing::error!( + "Received Forget Subscriber for unkown key_expr: {}", + err + ) } } } @@ -2291,12 +2297,12 @@ impl Primitives for Session { trace!("recv Response {:?}", msg); match msg.payload { ResponseBody::Ack(_) => { - log::warn!( + tracing::warn!( "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." ) } ResponseBody::Put(_) => { - log::warn!( + tracing::warn!( "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." ) } @@ -2327,7 +2333,7 @@ impl Primitives for Session { callback(new_reply); } None => { - log::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); } } } @@ -2350,7 +2356,7 @@ impl Primitives for Session { Ok([true]) ) && !query.selector.key_expr.intersects(&key_expr) { - log::warn!( + tracing::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", key_expr, msg.ext_respid, @@ -2361,7 +2367,7 @@ impl Primitives for Session { let key_expr = match &query.scope { Some(scope) => { if !key_expr.starts_with(&***scope) { - log::warn!( + tracing::warn!( "Received Reply for `{}` from `{:?}, which didn't start with scope `{}`: dropping Reply.", key_expr, msg.ext_respid, @@ -2372,7 +2378,7 @@ impl Primitives for Session { match KeyExpr::try_from(&key_expr[(scope.len() + 1)..]) { Ok(key_expr) => key_expr, Err(e) => { - log::warn!( + tracing::warn!( "Error unscoping received Reply for `{}` from `{:?}: {}", key_expr, msg.ext_respid, @@ -2488,7 +2494,7 @@ impl Primitives for Session { } } None => { - log::warn!("Received ReplyData for unkown Query: {}", msg.rid); + tracing::warn!("Received ReplyData for unkown Query: {}", msg.rid); } } } diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 073d85566b..e38a64d200 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -46,7 +46,7 @@ impl IntervalCounter { } fn downsampling_by_keyexpr_impl(egress: bool) { - let _ = env_logger::builder().is_test(true).try_init(); + zenoh_util::init_log_from_env(); use zenoh::prelude::sync::*; @@ -163,7 +163,7 @@ fn downsampling_by_keyexpr() { #[cfg(unix)] fn downsampling_by_interface_impl(egress: bool) { - let _ = env_logger::builder().is_test(true).try_init(); + zenoh_util::init_log_from_env(); use zenoh::prelude::sync::*; @@ -265,7 +265,7 @@ fn downsampling_by_interface() { #[test] #[should_panic(expected = "unknown variant `down`")] fn downsampling_config_error_wrong_strategy() { - let _ = env_logger::builder().is_test(true).try_init(); + zenoh_util::init_log_from_env(); use zenoh::prelude::sync::*; diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 6c5afe0673..bd112c0314 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -115,7 +115,7 @@ impl Task { } Err(err) => { - log::warn!( + tracing::warn!( "Sample got from {} failed to unwrap! Error: {}.", ke, err @@ -365,7 +365,7 @@ impl Recipe { // And the message transmission should work even if the common node disappears after a while. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn gossip() -> Result<()> { - env_logger::try_init().unwrap_or_default(); + zenoh_util::init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17446"); let ke = String::from("testKeyExprGossip"); @@ -433,7 +433,7 @@ async fn gossip() -> Result<()> { // Simulate two peers connecting to a router but not directly reachable to each other can exchange messages via the brokering by the router. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn static_failover_brokering() -> Result<()> { - env_logger::try_init().unwrap_or_default(); + zenoh_util::init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprStaticFailoverBrokering"); let msg_size = 8; @@ -494,7 +494,7 @@ async fn static_failover_brokering() -> Result<()> { // Total cases = 2 x 4 x 6 = 48 #[tokio::test(flavor = "multi_thread", worker_threads = 9)] async fn three_node_combination() -> Result<()> { - env_logger::try_init().unwrap_or_default(); + zenoh_util::init_log_from_env(); let modes = [WhatAmI::Peer, WhatAmI::Client]; let delay_in_secs = [ (0, 1, 2), @@ -625,7 +625,7 @@ async fn three_node_combination() -> Result<()> { // Total cases = 2 x 8 = 16 #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn two_node_combination() -> Result<()> { - env_logger::try_init().unwrap_or_default(); + zenoh_util::init_log_from_env(); #[derive(Clone, Copy)] struct IsFirstListen(bool); diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 7e50f7a6bb..c70a10160b 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -184,7 +184,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_unicast() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; @@ -193,7 +193,7 @@ async fn zenoh_session_unicast() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_multicast() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index 865121308a..03fca50170 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -251,7 +251,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_p2p() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let (s01, s02, s03) = open_p2p_sessions().await; test_unicity_pubsub(&s01, &s02, &s03).await; @@ -261,7 +261,7 @@ async fn zenoh_unicity_p2p() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_brokered() { - let _ = env_logger::try_init(); + zenoh_util::init_log_from_env(); let r = open_router_session().await; let (s01, s02, s03) = open_client_sessions().await; diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index 81975ae2e7..9f471046a6 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -28,16 +28,20 @@ readme = "README.md" [features] default = ["zenoh/default"] shared-memory = ["zenoh/shared-memory"] +loki = ["tracing-loki","url"] [dependencies] tokio = { workspace = true, features = ["rt-multi-thread"] } clap = { workspace = true, features = ["derive"] } -env_logger = { workspace = true } +zenoh-util = {workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } lazy_static = { workspace = true } -log = { workspace = true } +tracing = {workspace = true} +tracing-subscriber = {workspace = true} +tracing-loki = {workspace = true, optional = true } +url = {workspace = true, optional = true } zenoh = { workspace = true, features = ["unstable"] } [dev-dependencies] diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index d7cb9a52a9..e602d7c8a1 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -15,12 +15,27 @@ use clap::Parser; use futures::future; use git_version::git_version; use std::collections::HashSet; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::EnvFilter; use zenoh::config::{Config, ModeDependentValue, PermissionsConf, PluginLoad, ValidatedMap}; use zenoh::plugins::PluginsManager; use zenoh::prelude::{EndPoint, WhatAmI}; use zenoh::runtime::{AdminSpace, Runtime}; use zenoh::Result; +#[cfg(feature = "loki")] +use url::Url; + +#[cfg(feature = "loki")] +const LOKI_ENDPOINT_VAR: &str = "LOKI_ENDPOINT"; + +#[cfg(feature = "loki")] +const LOKI_API_KEY_VAR: &str = "LOKI_API_KEY"; + +#[cfg(feature = "loki")] +const LOKI_API_KEY_HEADER_VAR: &str = "LOKI_API_KEY_HEADER"; + const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); lazy_static::lazy_static!( @@ -85,7 +100,7 @@ fn load_plugin( paths: &Option>, ) -> Result<()> { let declared = if let Some(declared) = plugin_mgr.plugin_mut(name) { - log::warn!("Plugin `{}` was already declared", declared.name()); + tracing::warn!("Plugin `{}` was already declared", declared.name()); declared } else if let Some(paths) = paths { plugin_mgr.declare_dynamic_plugin_by_paths(name, paths)? @@ -94,7 +109,7 @@ fn load_plugin( }; if let Some(loaded) = declared.loaded_mut() { - log::warn!( + tracing::warn!( "Plugin `{}` was already loaded from {}", loaded.name(), loaded.path() @@ -111,18 +126,13 @@ fn main() { .build() .unwrap() .block_on(async { - let mut log_builder = - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("z=info")); - #[cfg(feature = "stats")] - log_builder.format_timestamp_millis().init(); - #[cfg(not(feature = "stats"))] - log_builder.init(); + init_logging().unwrap(); - log::info!("zenohd {}", *LONG_VERSION); + tracing::info!("zenohd {}", *LONG_VERSION); let args = Args::parse(); let config = config_from_args(&args); - log::info!("Initial conf: {}", &config); + tracing::info!("Initial conf: {}", &config); let mut plugin_mgr = PluginsManager::dynamic(config.libloader(), "zenoh_plugin_"); // Static plugins are to be added here, with `.add_static::()` @@ -133,7 +143,7 @@ fn main() { paths, required, } = plugin_load; - log::info!( + tracing::info!( "Loading {req} plugin \"{name}\"", req = if required { "required" } else { "" } ); @@ -141,7 +151,7 @@ fn main() { if required { panic!("Plugin load failure: {}", e) } else { - log::error!("Plugin load failure: {}", e) + tracing::error!("Plugin load failure: {}", e) } } if required { @@ -159,14 +169,14 @@ fn main() { for plugin in plugin_mgr.loaded_plugins_iter_mut() { let required = required_plugins.contains(plugin.name()); - log::info!( + tracing::info!( "Starting {req} plugin \"{name}\"", req = if required { "required" } else { "" }, name = plugin.name() ); match plugin.start(&runtime) { Ok(_) => { - log::info!( + tracing::info!( "Successfully started plugin {} from {:?}", plugin.name(), plugin.path() @@ -188,7 +198,7 @@ fn main() { } ); } else { - log::error!( + tracing::error!( "Required plugin \"{}\" failed to start: {}", plugin.name(), if report.is_empty() { @@ -201,7 +211,7 @@ fn main() { } } } - log::info!("Finished loading plugins"); + tracing::info!("Finished loading plugins"); AdminSpace::start(&runtime, plugin_mgr, LONG_VERSION.clone()).await; @@ -355,17 +365,70 @@ fn config_from_args(args: &Args) -> Config { if let Err(e) = config.insert(key.strip_prefix('/').unwrap_or(key), &mut deserializer) { - log::warn!("Couldn't perform configuration {}: {}", json, e); + tracing::warn!("Couldn't perform configuration {}: {}", json, e); } } - Err(e) => log::warn!("Couldn't perform configuration {}: {}", json, e), + Err(e) => tracing::warn!("Couldn't perform configuration {}: {}", json, e), } } } - log::debug!("Config: {:?}", &config); + tracing::debug!("Config: {:?}", &config); config } +fn init_logging() -> Result<()> { + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("z=info")); + + let fmt_layer = tracing_subscriber::fmt::Layer::new() + .with_thread_ids(true) + .with_thread_names(true) + .with_level(true) + .with_target(true); + + let tracing_sub = tracing_subscriber::registry() + .with(env_filter) + .with(fmt_layer); + + #[cfg(feature = "loki")] + match ( + get_loki_endpoint(), + get_loki_apikey(), + get_loki_apikey_header(), + ) { + (Some(loki_url), Some(header), Some(apikey)) => { + let (loki_layer, task) = tracing_loki::builder() + .label("service", "zenoh")? + .http_header(header, apikey)? + .build_url(Url::parse(&loki_url)?)?; + + tracing_sub.with(loki_layer).init(); + tokio::spawn(task); + return Ok(()); + } + _ => { + tracing::warn!("Missing one of the required header for Loki!") + } + }; + + tracing_sub.init(); + Ok(()) +} + +#[cfg(feature = "loki")] +pub fn get_loki_endpoint() -> Option { + std::env::var(LOKI_ENDPOINT_VAR).ok() +} + +#[cfg(feature = "loki")] +pub fn get_loki_apikey() -> Option { + std::env::var(LOKI_API_KEY_VAR).ok() +} + +#[cfg(feature = "loki")] +pub fn get_loki_apikey_header() -> Option { + std::env::var(LOKI_API_KEY_HEADER_VAR).ok() +} + #[test] #[cfg(feature = "default")] fn test_default_features() {