diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f4c76a9cb312a..14e8ada57b38f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,5 @@ docs/ @vectordotdev/ux-team +lib/dnsmsg-parser/ @vectordotdev/integrations-team lib/file-source/ @spencergilbert @vectordotdev/integrations-team lib/k8s-e2e-tests/ @spencergilbert @vectordotdev/integrations-team lib/k8s-test-framework/ @spencergilbert @vectordotdev/integrations-team diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt index f7674ed58ce6c..ffffd7e0f95cb 100644 --- a/.github/actions/spelling/excludes.txt +++ b/.github/actions/spelling/excludes.txt @@ -2,13 +2,13 @@ (?:^|/)(?i)COPYRIGHT (?:^|/)(?i)LICEN[CS]E (?:^|/)3rdparty/ +(?:^|/)amplify\.yml$ (?:^|/)go\.sum$ (?:^|/)package(?:-lock|)\.json$ (?:^|/)Pipfile$ (?:^|/)pyproject.toml (?:^|/)requirements(?:-dev|-doc|-test|)\.txt$ (?:^|/)vendor/ -(?:^|/)amplify\.yml$ \.a$ \.ai$ \.all-contributorsrc$ @@ -77,6 +77,9 @@ ^\Qbenches/transform/route.rs\E$ ^\Qlib/codecs/tests/data/decoding/protobuf/test_protobuf.desc\E$ ^\Qlib/codecs/tests/data/decoding/protobuf/test_protobuf3.desc\E$ +^\Qlib/codecs/tests/data/protobuf/test.desc\E$ +^\Qlib/codecs/tests/data/protobuf/test_protobuf.desc\E$ +^\Qlib/codecs/tests/data/protobuf/test_protobuf3.desc\E$ ^\Qlib/dnsmsg-parser/benches/benches.rs\E$ ^\Qlib/dnsmsg-parser/src/dns_message_parser.rs\E$ ^\Qlib/lookup/tests/fixtures/lookup/quoted\E$ @@ -104,4 +107,3 @@ ^\Qwebsite/layouts/shortcodes/config/unit-tests.html\E$ ^lib/codecs/tests/data/native_encoding/ ignore$ - diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index 5da73dc7b12cb..3bcfc6a064995 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -840,6 +840,7 @@ posttrunc prebuild precpu preds +prefs preinst preread prereqs @@ -1074,6 +1075,7 @@ Takeaways targetgroup tarpit tcmalloc +teconsent telecom templatable templateable @@ -1133,6 +1135,8 @@ tripwires Trivago trivy Troutwine +trustarc +truste TRUSTSTORE TSDB Tsvg diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 969b7552114c1..0280e44156c73 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -316,14 +316,9 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - - name: First install of DEB package. + - name: Verify install of DEB package. run: | - dpkg -i target/artifacts/vector_${{ env.VECTOR_VERSION }}-1_amd64.deb - ./scripts/verify-install.sh - - name: Second install of DEB package. - run: | - dpkg -i target/artifacts/vector_${{ env.VECTOR_VERSION }}-1_amd64.deb - ./scripts/verify-install.sh + ./scripts/verify-install.sh target/artifacts/vector_${{ env.VECTOR_VERSION }}-1_amd64.deb rpm-verify: name: Verify RPM Packages @@ -372,14 +367,9 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-x86_64-unknown-linux-gnu path: target/artifacts - - name: First install of RPM package. - run: | - rpm -i --replacepkgs target/artifacts/vector-${{ env.VECTOR_VERSION }}-1.x86_64.rpm - ./scripts/verify-install.sh - - name: Second install of RPM package. + - name: Verify install of RPM package. run: | - rpm -i --replacepkgs target/artifacts/vector-${{ env.VECTOR_VERSION }}-1.x86_64.rpm - ./scripts/verify-install.sh + ./scripts/verify-install.sh target/artifacts/vector-${{ env.VECTOR_VERSION }}-1.x86_64.rpm macos-verify: name: Verify macOS Package @@ -563,6 +553,7 @@ jobs: - deb-verify - rpm-verify - macos-verify + - generate-sha256sum env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} steps: @@ -610,6 +601,11 @@ jobs: with: name: vector-${{ env.VECTOR_VERSION }}-armv7-unknown-linux-musleabihf path: target/artifacts + - name: Download artifact checksums + uses: actions/download-artifact@v3 + with: + name: vector-${{ env.VECTOR_VERSION }}-SHA256SUMS + path: target/artifacts - name: Publish release to GitHub env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index acf6b8c7b33e9..5752cca6e4c9c 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -5,7 +5,7 @@ on: jobs: unit-mac: - runs-on: macos-11 + runs-on: macos-13 env: CARGO_INCREMENTAL: 0 steps: diff --git a/Cargo.lock b/Cargo.lock index e74b6b2c2107c..c3e659723df50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -655,7 +655,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "regex", - "ring", + "ring 0.16.20", "rustls 0.21.7", "rustls-native-certs", "rustls-pemfile", @@ -808,7 +808,7 @@ dependencies = [ "hex", "http", "hyper", - "ring", + "ring 0.16.20", "time", "tokio", "tower", @@ -5863,7 +5863,7 @@ dependencies = [ "num-bigint", "oauth2", "rand 0.8.5", - "ring", + "ring 0.16.20", "serde", "serde-value", "serde_derive", @@ -7280,11 +7280,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.4", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "rkyv" version = "0.7.40" @@ -7500,7 +7514,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -7512,7 +7526,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring", + "ring 0.16.20", "rustls-webpki", "sct", ] @@ -7544,8 +7558,8 @@ version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -7665,8 +7679,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -8812,27 +8826,15 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.18.0", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", "rustls 0.21.7", "tokio", - "tungstenite 0.20.0", + "tungstenite", ] [[package]] @@ -9287,28 +9289,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes 1.5.0", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "tungstenite" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e862a1c4128df0112ab625f55cd5c934bcb4312ba80b39ae4b4835a3fd58e649" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", "bytes 1.5.0", @@ -9496,6 +9479,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "uom" version = "0.31.1" @@ -9609,7 +9598,7 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vector" -version = "0.33.0" +version = "0.33.1" dependencies = [ "apache-avro", "approx", @@ -9772,7 +9761,7 @@ dependencies = [ "tokio-postgres", "tokio-stream", "tokio-test", - "tokio-tungstenite 0.20.0", + "tokio-tungstenite", "tokio-util", "toml 0.8.0", "tonic 0.10.1", @@ -9822,7 +9811,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tokio-tungstenite 0.20.0", + "tokio-tungstenite", "url", "uuid", ] @@ -10256,9 +10245,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba431ef570df1287f7f8b07e376491ad54f84d26ac473489427231e1718e1f69" +checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" dependencies = [ "bytes 1.5.0", "futures-channel", @@ -10278,7 +10267,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-stream", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "tokio-util", "tower-service", "tracing 0.1.37", @@ -10404,12 +10393,12 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" +checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring", - "untrusted", + "ring 0.17.3", + "untrusted 0.9.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 343b4d9c36b99..91dc7cc20824b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vector" -version = "0.33.0" +version = "0.33.1" authors = ["Vector Contributors "] edition = "2021" description = "A lightweight and ultra-fast tool for building observability pipelines" @@ -48,6 +48,7 @@ debug = true name = "vector" section = "admin" maintainer-scripts = "distribution/debian/scripts/" +conf-files = ["/etc/vector/vector.yaml", "/etc/default/vector"] assets = [ ["target/release/vector", "/usr/bin/", "755"], ["config/vector.yaml", "/etc/vector/vector.yaml", "644"], @@ -320,14 +321,14 @@ strip-ansi-escapes = { version = "0.2.0", default-features = false } syslog = { version = "6.1.0", default-features = false, optional = true } tikv-jemallocator = { version = "0.5.4", default-features = false, optional = true } tokio-postgres = { version = "0.7.10", default-features = false, features = ["runtime", "with-chrono-0_4"], optional = true } -tokio-tungstenite = {version = "0.20.0", default-features = false, features = ["connect"], optional = true} +tokio-tungstenite = {version = "0.20.1", default-features = false, features = ["connect"], optional = true} toml = { version = "0.8.0", default-features = false, features = ["parse", "display"] } tonic = { version = "0.10", optional = true, default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } trust-dns-proto = { version = "0.23.0", default-features = false, features = ["dnssec"], optional = true } typetag = { version = "0.2.13", default-features = false } url = { version = "2.4.1", default-features = false, features = ["serde"] } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } -warp = { version = "0.3.5", default-features = false } +warp = { version = "0.3.6", default-features = false } zstd = { version = "0.12.4", default-features = false } arr_macro = { version = "0.2.1" } diff --git a/config/vector.yaml b/config/vector.yaml index 6e83ddf5332a4..fbc56f9de47f8 100644 --- a/config/vector.yaml +++ b/config/vector.yaml @@ -13,7 +13,7 @@ # ------------------------------------------------------------------------------ # Change this to use a non-default directory for Vector data storage: -# data_dir = "/var/lib/vector" +# data_dir: "/var/lib/vector" # Random Syslog-formatted logs sources: diff --git a/distribution/install.sh b/distribution/install.sh index c654c1826ba94..df07b61f7e90d 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -12,7 +12,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" -VECTOR_VERSION="0.33.0" +VECTOR_VERSION="0.33.1" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/lib/dnsmsg-parser/src/dns_message_parser.rs b/lib/dnsmsg-parser/src/dns_message_parser.rs index 25845444fffdc..48f76c697706e 100644 --- a/lib/dnsmsg-parser/src/dns_message_parser.rs +++ b/lib/dnsmsg-parser/src/dns_message_parser.rs @@ -744,6 +744,26 @@ fn format_rdata(rdata: &RData) -> DnsParserResult<(Option, Option { + let sig_rdata = format!( + "{} {} {} {} {} {} {} {} {}", + match format_record_type(sig.type_covered()) { + Some(record_type) => record_type, + None => String::from("Unknown record type"), + }, + u8::from(sig.algorithm()), + sig.num_labels(), + sig.original_ttl(), + sig.sig_expiration(), // currently in epoch convert to human readable ? + sig.sig_inception(), // currently in epoch convert to human readable ? + sig.key_tag(), + sig.signer_name(), + BASE64.encode(sig.sig()) + ); + Ok((Some(sig_rdata), None)) + } DNSSECRData::Unknown { code: _, rdata } => Ok((None, Some(rdata.anything().to_vec()))), _ => Err(DnsMessageParserError::SimpleError { cause: format!("Unsupported rdata {:?}", rdata), @@ -1117,7 +1137,7 @@ mod tests { dnssec::{ rdata::{ dnskey::DNSKEY, ds::DS, nsec::NSEC, nsec3::NSEC3, nsec3param::NSEC3PARAM, sig::SIG, - DNSSECRData, + DNSSECRData, RRSIG, }, Algorithm as DNSSEC_Algorithm, DigestType, Nsec3HashAlgorithm, }, @@ -1555,6 +1575,35 @@ mod tests { } } + // rsig is a derivation of the SIG record data, but the upstream crate does not handle that with an trait + // so there isn't really a great way to reduce code duplication here. + #[test] + fn test_format_rdata_for_rsig_type() { + let rdata = RData::DNSSEC(DNSSECRData::RRSIG(RRSIG::new( + RecordType::NULL, + DNSSEC_Algorithm::RSASHA256, + 0, + 0, + 2, + 1, + 5, + Name::from_str("www.example.com").unwrap(), + vec![ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 29, 31, + ], + ))); + let rdata_text = format_rdata(&rdata); + assert!(rdata_text.is_ok()); + if let Ok((parsed, raw_rdata)) = rdata_text { + assert!(raw_rdata.is_none()); + assert_eq!( + "NULL 8 0 0 2 1 5 www.example.com AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHR8=", + parsed.unwrap() + ); + } + } + #[test] fn test_format_rdata_for_ds_type() { let rdata = RData::DNSSEC(DNSSECRData::DS(DS::new( diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 4b6543f7648c0..bd1f8392eb34e 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -26,7 +26,7 @@ graphql_client = { version = "0.13.0", default-features = false, features = ["gr # HTTP / WebSockets reqwest = { version = "0.11.20", default-features = false, features = ["json"] } -tokio-tungstenite = { version = "0.20.0", default-features = false, features = ["connect", "rustls"] } +tokio-tungstenite = { version = "0.20.1", default-features = false, features = ["connect", "rustls"] } # External libs chrono = { version = "0.4.6", default-features = false, features = ["serde"] } diff --git a/license-tool.toml b/license-tool.toml index 03f02d26d7711..2e6902e69ba78 100644 --- a/license-tool.toml +++ b/license-tool.toml @@ -6,12 +6,13 @@ # `ring` has a custom license that is mostly "ISC-style" but parts of it also fall under OpenSSL licensing. "ring-0.16.20" = { license = "ISC AND Custom" } +"ring-0.17.3" = { license = "ISC AND Custom" } # `rustls-webpki` doesn't specify their license in the metadata, but the file contains the ISC terms. "rustls-webpki-0.100.1" = { license = "ISC" } # `webpki` doesn't specify their license in the metadata, but the file contains the ISC terms. -"webpki-0.22.1" = { license = "ISC" } +"webpki-0.22.4" = { license = "ISC" } # `zerocopy` et al don't specify their licenses in the metadata, but the file contains the 2-clause BSD terms. "zerocopy-0.6.1" = { license = "BSD-2-Clause" } diff --git a/regression/cases/syslog_regex_logs2metric_ddmetrics/vector/vector.toml b/regression/cases/syslog_regex_logs2metric_ddmetrics/vector/vector.toml index b8a0cb4fbf558..b04171757296c 100644 --- a/regression/cases/syslog_regex_logs2metric_ddmetrics/vector/vector.toml +++ b/regression/cases/syslog_regex_logs2metric_ddmetrics/vector/vector.toml @@ -27,7 +27,7 @@ type = "log_to_metric" inputs = ["remap"] [[transforms.log2metric.metrics]] - type = "gauge" + type = "counter" field = "procid" tags.hostname = "{{ hostname }}" tags.facility = "{{ facility }}" diff --git a/scripts/verify-install.sh b/scripts/verify-install.sh index 35d8404bc92c5..48e80ee36ffa0 100755 --- a/scripts/verify-install.sh +++ b/scripts/verify-install.sh @@ -1,12 +1,40 @@ #!/usr/bin/env bash set -euo pipefail -# verify-install.sh +# verify-install.sh # # SUMMARY # # Verifies vector packages have been installed correctly +package="${1:?must pass package as argument}" + +install_package () { + case "$1" in + *.deb) + dpkg -i "$1" + ;; + *.rpm) + rpm -i --replacepkgs "$1" + ;; + esac +} + +install_package "$package" + +getent passwd vector || (echo "vector user missing" && exit 1) +getent group vector || (echo "vector group missing" && exit 1) +vector --version || (echo "vector --version failed" && exit 1) +test -f /etc/default/vector || (echo "/etc/default/vector doesn't exist" && exit 1) +test -f /etc/vector/vector.yaml || (echo "/etc/vector/vector.yaml doesn't exist" && exit 1) + +echo "FOO=bar" > /etc/default/vector +echo "foo: bar" > /etc/vector/vector.yaml + +install_package "$package" + getent passwd vector || (echo "vector user missing" && exit 1) getent group vector || (echo "vector group missing" && exit 1) vector --version || (echo "vector --version failed" && exit 1) +grep -q "FOO=bar" "/etc/default/vector" || (echo "/etc/default/vector has incorrect contents" && exit 1) +grep -q "foo: bar" "/etc/vector/vector.yaml" || (echo "/etc/vector/vector.yaml has incorrect contents" && exit 1) diff --git a/src/sinks/datadog/metrics/sink.rs b/src/sinks/datadog/metrics/sink.rs index 5ceefc3c487d2..fccfe040fdd95 100644 --- a/src/sinks/datadog/metrics/sink.rs +++ b/src/sinks/datadog/metrics/sink.rs @@ -8,7 +8,6 @@ use futures_util::{ StreamExt, }; use tower::Service; -use vector_common::finalization::EventFinalizers; use vector_core::{ event::{Event, Metric, MetricValue}, partition::Partitioner, @@ -23,8 +22,8 @@ use super::{ use crate::{ internal_events::DatadogMetricsEncodingError, sinks::util::{ - buffer::metrics::sort::sort_for_compression, buffer::metrics::{AggregatedSummarySplitter, MetricSplitter}, + request_builder::default_request_builder_concurrency_limit, SinkBuilderExt, }, }; @@ -103,15 +102,18 @@ where // Aggregate counters with identical timestamps, otherwise identical counters (same // series and same timestamp, when rounded to whole seconds) will be dropped in a // last-write-wins situation when they hit the DD metrics intake. - .map(|((api_key, endpoint), metrics)| { - let collapsed_metrics = collapse_counters_by_series_and_timestamp(metrics); - ((api_key, endpoint), collapsed_metrics) - }) - // Sort metrics by name, which significantly improves HTTP compression. - .map(|((api_key, endpoint), mut metrics)| { - sort_for_compression(&mut metrics); - ((api_key, endpoint), metrics) - }) + // + // This also sorts metrics by name, which significantly improves HTTP compression. + .concurrent_map( + default_request_builder_concurrency_limit(), + |((api_key, endpoint), metrics)| { + Box::pin(async move { + let collapsed_metrics = + sort_and_collapse_counters_by_series_and_timestamp(metrics); + ((api_key, endpoint), collapsed_metrics) + }) + }, + ) // We build our requests "incrementally", which means that for a single batch of metrics, we might generate // N requests to send them all, as Datadog has API-level limits on payload size, so we keep adding metrics // to a request until we reach the limit, and then create a new request, and so on and so forth, until all @@ -159,142 +161,98 @@ where } } -fn collapse_counters_by_series_and_timestamp(mut metrics: Vec) -> Vec { - // NOTE: Astute observers may recognize that this behavior could also be achieved by using - // `Vec::dedup_by`, but the clincher is that `dedup_by` requires a sorted vector to begin with. - // - // This function is designed to collapse duplicate counters even if the metrics are unsorted, - // which leads to a measurable boost in performance, being nearly 35% faster than `dedup_by` - // when the inputs are sorted, and up to 50% faster when the inputs are unsorted. - // - // These numbers are based on sorting a newtype wrapper around the metric instead of the metric - // itself, which does involve allocating a string in our tests. _However_, sorting the `Metric` - // directly is not possible without a customized `PartialOrd` implementation, as some of the - // nested fields containing `f64` values makes it underivable, and I'm not 100% sure that we - // could/would want to have a narrowly-focused impl of `PartialOrd` on `Metric` to fit this use - // case (metric type -> metric name -> metric timestamp, nothing else) vs being able to sort - // metrics by name first, etc. Then there's the potential issue of the reordering of fields - // changing the ordering behavior of `Metric`... and it just felt easier to write this tailored - // algorithm for the use case at hand. - let mut idx = 0; +/// Collapses counters by series and timestamp, leaving all other metrics unmodified. +/// The return value is sorted by metric series, which is desirable for compression. A sorted vector +/// tends to compress better than a random ordering by 2-3x (JSON encoded, deflate algorithm). +/// +/// Note that the time complexity of this function is O(n log n) and the space complexity is O(1). +/// If needed, we can trade space for time by using a HashMap, which would be O(n) time and O(n) space. +fn sort_and_collapse_counters_by_series_and_timestamp(mut metrics: Vec) -> Vec { let now_ts = Utc::now().timestamp(); - // For each metric, see if it's a counter. If so, we check the rest of the metrics - // _after_ it to see if they share the same series _and_ timestamp, when converted - // to a Unix timestamp. If they match, we take that counter's value and merge it - // with our "current" counter metric, and then drop the secondary one from the - // vector. - // - // For any non-counter, we simply ignore it and leave it as-is. - while idx < metrics.len() { - let curr_idx = idx; - let counter_ts = match metrics[curr_idx].value() { - MetricValue::Counter { .. } => metrics[curr_idx] - .data() - .timestamp() - .map(|dt| dt.timestamp()) - .unwrap_or(now_ts), - // If it's not a counter, we can skip it. - _ => { - idx += 1; - continue; - } - }; - - let mut accumulated_value = 0.0; - let mut accumulated_finalizers = EventFinalizers::default(); - - // Now go through each metric _after_ the current one to see if it matches the - // current metric: is a counter, with the same name and timestamp. If it is, we - // accumulate its value and then remove it. - // - // Otherwise, we skip it. - let mut is_disjoint = false; - let mut had_match = false; - let mut inner_idx = curr_idx + 1; - while inner_idx < metrics.len() { - let mut should_advance = true; - if let MetricValue::Counter { value } = metrics[inner_idx].value() { - let other_counter_ts = metrics[inner_idx] - .data() - .timestamp() - .map(|dt| dt.timestamp()) - .unwrap_or(now_ts); - if metrics[curr_idx].series() == metrics[inner_idx].series() - && counter_ts == other_counter_ts - { - had_match = true; - - // Collapse this counter by accumulating its value, and its - // finalizers, and removing it from the original vector of metrics. - accumulated_value += *value; - - let mut old_metric = metrics.swap_remove(inner_idx); - accumulated_finalizers.merge(old_metric.metadata_mut().take_finalizers()); - should_advance = false; - } else { - // We hit a counter that _doesn't_ match, but we can't just skip - // it because we also need to evaluate it against all the - // counters that come after it, so we only increment the index - // for this inner loop. - // - // As well, we mark ourselves to stop incrementing the outer - // index if we find more counters to accumulate, because we've - // hit a disjoint counter here. While we may be continuing to - // shrink the count of remaining metrics from accumulating, - // we have to ensure this counter we just visited is visited by - // the outer loop. - is_disjoint = true; - } - } - - if should_advance { - inner_idx += 1; - - if !is_disjoint { - idx += 1; - } - } + // Sort by series and timestamp which is required for the below dedupe to behave as desired. + // This also tends to compress better than a random ordering by 2-3x (JSON encoded, deflate algorithm). + // Note that `sort_unstable_by_key` would be simpler but results in lifetime errors without cloning. + metrics.sort_unstable_by(|a, b| { + ( + a.value().as_name(), + a.series(), + a.timestamp().map(|dt| dt.timestamp()).unwrap_or(now_ts), + ) + .cmp(&( + a.value().as_name(), + b.series(), + b.timestamp().map(|dt| dt.timestamp()).unwrap_or(now_ts), + )) + }); + + // Aggregate counters that share the same series and timestamp. + // While `coalesce` is semantically more fitting here than `dedupe_by`, we opt for the latter because + // they share the same functionality and `dedupe_by`'s implementation is more optimized, doing the + // operation in place. + metrics.dedup_by(|left, right| { + if left.series() != right.series() { + return false; } - // If we had matches during the accumulator phase, update our original counter. - if had_match { - let metric = metrics.get_mut(curr_idx).expect("current index must exist"); - match metric.value_mut() { - MetricValue::Counter { value } => { - *value += accumulated_value; - metric - .metadata_mut() - .merge_finalizers(accumulated_finalizers); - } - _ => unreachable!("current index must represent a counter"), - } + let left_ts = left.timestamp().map(|dt| dt.timestamp()).unwrap_or(now_ts); + let right_ts = right.timestamp().map(|dt| dt.timestamp()).unwrap_or(now_ts); + if left_ts != right_ts { + return false; } - idx += 1; - } + // Only aggregate counters. All other types can be skipped. + if let ( + MetricValue::Counter { value: left_value }, + MetricValue::Counter { value: right_value }, + ) = (left.value(), right.value_mut()) + { + // NOTE: The docs for `dedup_by` specify that if `left`/`right` are equal, then + // `left` is the element that gets removed. + *right_value += left_value; + right + .metadata_mut() + .merge_finalizers(left.metadata_mut().take_finalizers()); + + true + } else { + false + } + }); metrics } #[cfg(test)] mod tests { + use std::{collections::HashSet, time::Duration}; + use chrono::{DateTime, Utc}; use proptest::prelude::*; - use vector_core::event::{Metric, MetricKind, MetricValue}; + use vector_core::{ + event::{Metric, MetricKind, MetricValue}, + metric_tags, + }; - use super::collapse_counters_by_series_and_timestamp; + use super::sort_and_collapse_counters_by_series_and_timestamp; fn arb_collapsible_metrics() -> impl Strategy> { let ts = Utc::now(); any::>().prop_map(move |values| { + let mut unique_metrics = HashSet::new(); values .into_iter() .map(|(id, value)| { let name = format!("{}-{}", value.as_name(), id); Metric::new(name, MetricKind::Incremental, value).with_timestamp(Some(ts)) }) + // Filter out duplicates other than counters. We do this to prevent false positives. False positives would occur + // because we don't collapse other metric types and we can't sort metrics by their values. + .filter(|metric| { + matches!(metric.value(), MetricValue::Counter { .. }) + || unique_metrics.insert(metric.series().clone()) + }) .collect() }) } @@ -315,7 +273,7 @@ mod tests { fn collapse_no_metrics() { let input = Vec::new(); let expected = input.clone(); - let actual = collapse_counters_by_series_and_timestamp(input); + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); assert_eq!(expected, actual); } @@ -324,7 +282,7 @@ mod tests { fn collapse_single_metric() { let input = vec![create_counter("basic", 42.0)]; let expected = input.clone(); - let actual = collapse_counters_by_series_and_timestamp(input); + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); assert_eq!(expected, actual); } @@ -333,7 +291,7 @@ mod tests { fn collapse_identical_metrics_gauge() { let input = vec![create_gauge("basic", 42.0), create_gauge("basic", 42.0)]; let expected = input.clone(); - let actual = collapse_counters_by_series_and_timestamp(input); + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); assert_eq!(expected, actual); @@ -348,7 +306,7 @@ mod tests { create_gauge("basic", gauge_value), ]; let expected = input.clone(); - let actual = collapse_counters_by_series_and_timestamp(input); + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); assert_eq!(expected, actual); } @@ -368,7 +326,91 @@ mod tests { let expected_counter_value = input.len() as f64 * counter_value; let expected = vec![create_counter("basic", expected_counter_value)]; - let actual = collapse_counters_by_series_and_timestamp(input); + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); + + assert_eq!(expected, actual); + } + + #[test] + fn collapse_identical_metrics_counter_unsorted() { + let gauge_value = 1.0; + let counter_value = 42.0; + let input = vec![ + create_gauge("gauge", gauge_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + create_gauge("gauge", gauge_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + ]; + + let expected_counter_value = (input.len() - 2) as f64 * counter_value; + let expected = vec![ + create_counter("basic", expected_counter_value), + create_gauge("gauge", gauge_value), + create_gauge("gauge", gauge_value), + ]; + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); + + assert_eq!(expected, actual); + } + + #[test] + fn collapse_identical_metrics_multiple_timestamps() { + let ts_1 = Utc::now() - Duration::from_secs(5); + let ts_2 = ts_1 - Duration::from_secs(5); + let counter_value = 42.0; + let input = vec![ + create_counter("basic", counter_value), + create_counter("basic", counter_value).with_timestamp(Some(ts_1)), + create_counter("basic", counter_value).with_timestamp(Some(ts_2)), + create_counter("basic", counter_value), + create_counter("basic", counter_value).with_timestamp(Some(ts_2)), + create_counter("basic", counter_value).with_timestamp(Some(ts_1)), + create_counter("basic", counter_value), + ]; + + let expected = vec![ + create_counter("basic", counter_value * 2.).with_timestamp(Some(ts_2)), + create_counter("basic", counter_value * 2.).with_timestamp(Some(ts_1)), + create_counter("basic", counter_value * 3.), + ]; + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); + + assert_eq!(expected, actual); + } + + #[test] + fn collapse_identical_metrics_with_tags() { + let counter_value = 42.0; + let input = vec![ + create_counter("basic", counter_value).with_tags(Some(metric_tags!("a" => "a"))), + create_counter("basic", counter_value).with_tags(Some(metric_tags!( + "a" => "a", + "b" => "b", + ))), + create_counter("basic", counter_value), + create_counter("basic", counter_value).with_tags(Some(metric_tags!( + "b" => "b", + "a" => "a", + ))), + create_counter("basic", counter_value), + create_counter("basic", counter_value), + create_counter("basic", counter_value).with_tags(Some(metric_tags!("a" => "a"))), + ]; + + let expected = vec![ + create_counter("basic", counter_value * 3.), + create_counter("basic", counter_value * 2.).with_tags(Some(metric_tags!("a" => "a"))), + create_counter("basic", counter_value * 2.).with_tags(Some(metric_tags!( + "a" => "a", + "b" => "b", + ))), + ]; + let actual = sort_and_collapse_counters_by_series_and_timestamp(input); assert_eq!(expected, actual); } @@ -419,8 +461,7 @@ mod tests { expected_output.sort_by_cached_key(MetricCollapseSort::from_metric); expected_output.dedup_by(collapse_dedup_fn); - let mut actual_output = collapse_counters_by_series_and_timestamp(input); - actual_output.sort_by_cached_key(MetricCollapseSort::from_metric); + let actual_output = sort_and_collapse_counters_by_series_and_timestamp(input); prop_assert_eq!(expected_output, actual_output); } diff --git a/src/sinks/kafka/service.rs b/src/sinks/kafka/service.rs index 0f1d122b7750c..607e7cd5fd4ea 100644 --- a/src/sinks/kafka/service.rs +++ b/src/sinks/kafka/service.rs @@ -1,11 +1,18 @@ -use std::task::{Context, Poll}; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::Duration, +}; use bytes::Bytes; use rdkafka::{ error::KafkaError, message::OwnedHeaders, producer::{FutureProducer, FutureRecord}, - util::Timeout, + types::RDKafkaErrorCode, }; use crate::{kafka::KafkaStatisticsContext, sinks::prelude::*}; @@ -59,16 +66,38 @@ impl MetaDescriptive for KafkaRequest { } } +/// BlockedRecordState manages state for a record blocked from being enqueued on the producer. +struct BlockedRecordState { + records_blocked: Arc, +} + +impl BlockedRecordState { + fn new(records_blocked: Arc) -> Self { + records_blocked.fetch_add(1, Ordering::Relaxed); + Self { records_blocked } + } +} + +impl Drop for BlockedRecordState { + fn drop(&mut self) { + self.records_blocked.fetch_sub(1, Ordering::Relaxed); + } +} + #[derive(Clone)] pub struct KafkaService { kafka_producer: FutureProducer, + + /// The number of records blocked from being enqueued on the producer. + records_blocked: Arc, } impl KafkaService { - pub(crate) const fn new( - kafka_producer: FutureProducer, - ) -> KafkaService { - KafkaService { kafka_producer } + pub(crate) fn new(kafka_producer: FutureProducer) -> KafkaService { + KafkaService { + kafka_producer, + records_blocked: Arc::new(AtomicUsize::new(0)), + } } } @@ -78,13 +107,21 @@ impl Service for KafkaService { type Future = BoxFuture<'static, Result>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) + // The Kafka service is at capacity if any records are currently blocked from being enqueued + // on the producer. + if self.records_blocked.load(Ordering::Relaxed) > 0 { + Poll::Pending + } else { + Poll::Ready(Ok(())) + } } fn call(&mut self, request: KafkaRequest) -> Self::Future { let this = self.clone(); Box::pin(async move { + let raw_byte_size = + request.body.len() + request.metadata.key.as_ref().map_or(0, |x| x.len()); let event_byte_size = request .request_metadata .into_events_estimated_json_encoded_byte_size(); @@ -101,17 +138,39 @@ impl Service for KafkaService { record = record.headers(headers); } - // rdkafka will internally retry forever if the queue is full - match this.kafka_producer.send(record, Timeout::Never).await { - Ok((_partition, _offset)) => { - let raw_byte_size = - request.body.len() + request.metadata.key.map_or(0, |x| x.len()); - Ok(KafkaResponse { - event_byte_size, - raw_byte_size, - }) - } - Err((kafka_err, _original_record)) => Err(kafka_err), + // Manually poll [FutureProducer::send_result] instead of [FutureProducer::send] to track + // records that fail to be enqueued on the producer. + let mut blocked_state: Option = None; + loop { + match this.kafka_producer.send_result(record) { + // Record was successfully enqueued on the producer. + Ok(fut) => { + // Drop the blocked state (if any), as the producer is no longer blocked. + drop(blocked_state.take()); + return fut + .await + .expect("producer unexpectedly dropped") + .map(|_| KafkaResponse { + event_byte_size, + raw_byte_size, + }) + .map_err(|(err, _)| err); + } + // Producer queue is full. + Err(( + KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), + original_record, + )) => { + if blocked_state.is_none() { + blocked_state = + Some(BlockedRecordState::new(Arc::clone(&this.records_blocked))); + } + record = original_record; + tokio::time::sleep(Duration::from_millis(100)).await; + } + // A different error occurred. + Err((err, _)) => return Err(err), + }; } }) } diff --git a/src/sinks/kafka/sink.rs b/src/sinks/kafka/sink.rs index 141c32f7cb3b7..db4395db15799 100644 --- a/src/sinks/kafka/sink.rs +++ b/src/sinks/kafka/sink.rs @@ -6,7 +6,6 @@ use rdkafka::{ }; use snafu::{ResultExt, Snafu}; use tokio::time::Duration; -use tower::limit::ConcurrencyLimit; use vrl::path::OwnedTargetPath; use super::config::{KafkaRole, KafkaSinkConfig}; @@ -62,11 +61,6 @@ impl KafkaSink { } async fn run_inner(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { - // rdkafka will internally retry forever, so we need some limit to prevent this from overflowing. - // 64 should be plenty concurrency here, as a rdkafka send operation does not block until its underlying - // buffer is full. - let service = ConcurrencyLimit::new(self.service.clone(), 64); - let request_builder = KafkaRequestBuilder { key_field: self.key_field, headers_key: self.headers_key, @@ -100,8 +94,7 @@ impl KafkaSink { Ok(req) => Some(req), } }) - .into_driver(service) - .protocol("kafka") + .into_driver(self.service) .protocol("kafka") .run() .await diff --git a/src/sinks/util/buffer/metrics/mod.rs b/src/sinks/util/buffer/metrics/mod.rs index 877cdc9c4bcc1..e66b3c2364140 100644 --- a/src/sinks/util/buffer/metrics/mod.rs +++ b/src/sinks/util/buffer/metrics/mod.rs @@ -1,5 +1,3 @@ -pub mod sort; - use std::cmp::Ordering; use vector_core::event::metric::{Metric, MetricValue, Sample}; diff --git a/src/sinks/util/buffer/metrics/sort.rs b/src/sinks/util/buffer/metrics/sort.rs deleted file mode 100644 index feaa563493789..0000000000000 --- a/src/sinks/util/buffer/metrics/sort.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::event::Metric; - -/// Sorts metrics in an order that is likely to achieve good compression. -pub fn sort_for_compression(metrics: &mut [Metric]) { - // This just sorts by series today. This tends to compress better than a random ordering by - // 2-3x (JSON encoded, deflate algorithm) - metrics.sort_unstable_by(|a, b| a.series().cmp(b.series())) -} - -#[cfg(test)] -mod test { - use crate::event::MetricValue; - use rand::prelude::SliceRandom; - use rand::thread_rng; - use vector_core::event::{Metric, MetricKind}; - use vector_core::metric_tags; - - // This just ensures the sorting does not change. `sort_for_compression` relies on - // the default `PartialOrd` on `MetricSeries`. - #[test] - fn test_compression_order() { - let sorted_metrics = vec![ - Metric::new( - "metric_1", - MetricKind::Absolute, - MetricValue::Gauge { value: 0.0 }, - ), - Metric::new( - "metric_2", - MetricKind::Incremental, - MetricValue::Gauge { value: 0.0 }, - ), - Metric::new( - "metric_3", - MetricKind::Absolute, - MetricValue::Gauge { value: 0.0 }, - ) - .with_tags(Some(metric_tags!("z" => "z"))), - Metric::new( - "metric_4", - MetricKind::Absolute, - MetricValue::Gauge { value: 0.0 }, - ) - .with_tags(Some(metric_tags!("a" => "a"))), - Metric::new( - "metric_4", - MetricKind::Absolute, - MetricValue::Gauge { value: 0.0 }, - ) - .with_tags(Some(metric_tags!( - "a" => "a", - "b" => "b", - ))), - Metric::new( - "metric_4", - MetricKind::Absolute, - MetricValue::Gauge { value: 0.0 }, - ) - .with_tags(Some(metric_tags!("b" => "b"))), - ]; - - let mut rand_metrics = sorted_metrics.clone(); - rand_metrics.shuffle(&mut thread_rng()); - super::sort_for_compression(&mut rand_metrics); - assert_eq!(sorted_metrics, rand_metrics); - } -} diff --git a/src/sinks/util/builder.rs b/src/sinks/util/builder.rs index c51bf405dc4d1..617697ea84ec1 100644 --- a/src/sinks/util/builder.rs +++ b/src/sinks/util/builder.rs @@ -82,13 +82,13 @@ pub trait SinkBuilderExt: Stream { /// /// If the spawned future panics, the panic will be carried through and resumed on the task /// calling the stream. - fn concurrent_map(self, limit: Option, f: F) -> ConcurrentMap + fn concurrent_map(self, limit: NonZeroUsize, f: F) -> ConcurrentMap where Self: Sized, F: Fn(Self::Item) -> Pin + Send + 'static>> + Send + 'static, T: Send + 'static, { - ConcurrentMap::new(self, limit, f) + ConcurrentMap::new(self, Some(limit), f) } /// Constructs a [`Stream`] which transforms the input into a request suitable for sending to @@ -114,7 +114,7 @@ pub trait SinkBuilderExt: Stream { { let builder = Arc::new(builder); - self.concurrent_map(Some(limit), move |input| { + self.concurrent_map(limit, move |input| { let builder = Arc::clone(&builder); Box::pin(async move { diff --git a/website/assets/js/cookie-banner.js b/website/assets/js/cookie-banner.js index ee3044b545802..10afcfd6df806 100644 --- a/website/assets/js/cookie-banner.js +++ b/website/assets/js/cookie-banner.js @@ -16,7 +16,7 @@ window.addEventListener('load', function () { divB.style = "position:fixed; bottom:0px; right:0px; width:100%;"; document.body.appendChild(divA); document.body.appendChild(divB); - + // update Cookie link this.setTimeout(function () { const banner = document.getElementById('consent-banner'); @@ -25,7 +25,7 @@ window.addEventListener('load', function () { prefsElement.className = cookieLink.className; if (banner) { - // listen for click and remove banner to avoid interfering with + // listen for click and remove banner to avoid interfering with document.addEventListener('click', function (event) { const targetElement = event.target; if (targetElement.matches('#truste-consent-required') || targetElement.matches('#truste-consent-button')) { @@ -38,4 +38,4 @@ window.addEventListener('load', function () { return (cookieLink && document.getElementById('teconsent').innerHTML.length > 0) ? cookieLink.replaceWith(prefsElement) : false; }, 200); } -}); \ No newline at end of file +}); diff --git a/website/content/en/releases/0.33.1.md b/website/content/en/releases/0.33.1.md new file mode 100644 index 0000000000000..464ec0095aefa --- /dev/null +++ b/website/content/en/releases/0.33.1.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.33.1 release notes +weight: 21 +--- diff --git a/website/cue/reference/releases/0.33.1.cue b/website/cue/reference/releases/0.33.1.cue new file mode 100644 index 0000000000000..d9809e1536fe4 --- /dev/null +++ b/website/cue/reference/releases/0.33.1.cue @@ -0,0 +1,70 @@ +package metadata + +releases: "0.33.1": { + date: "2023-10-30" + codename: "" + + whats_next: [] + + description: """ + This patch release contains fixes for regressions in 0.33.0 and fixes an issues with the Debian release artifacts. + + **Note:** Please see the release notes for [`v0.33.0`](/releases/0.33.0/) for additional changes if upgrading from + `v0.32.X`. In particular, the upgrade guide for breaking changes. + """ + + changelog: [ + { + type: "fix" + scopes: ["releasing", "debian"] + description: """ + Debian packages again avoid overwriting existing configuration files when upgrading. + """ + pr_numbers: [18718] + }, + { + type: "fix" + scopes: ["datadog_metrics sink"] + description: """ + The performance of the Datadog Metrics sink was greatly improved when the incoming + metric stream contains mostly counters. + """ + pr_numbers: [18759] + }, + { + type: "fix" + scopes: ["dnstap source"] + description: """ + The `dnstap` source can again parse DNSSEC/RRSIG RRs records. + """ + pr_numbers: [18878] + }, + { + type: "fix" + scopes: ["kafka source"] + description: """ + A performance regression in the `kafka` source was corrected. + """ + pr_numbers: [18770] + }, + ] + + commits: [ + {sha: "eae7b827fb885af5af12419b3451c841df06abdf", date: "2023-09-30 03:07:13 UTC", description: "Add known issue for 0.33.0 debian packaging regression", pr_number: 18727, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 3, deletions_count: 1}, + {sha: "4b72f7e13c7607705fe16227259bd7b1429fc1f7", date: "2023-10-04 06:52:37 UTC", description: "Set download page dropdown to latest version", pr_number: 18758, scopes: ["website"], type: "chore", breaking_change: false, author: "Devin Ford", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "9c31f322df7114b70231b843fcd975087ede2a5d", date: "2023-09-27 10:13:36 UTC", description: "Bump tokio-tungstenite from 0.20.0 to 0.20.1", pr_number: 18661, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 9, deletions_count: 9}, + {sha: "45a9fcf60e51ac213f3cd018183b5890feb5a317", date: "2023-10-04 05:19:08 UTC", description: "Bump webpki from 0.22.1 to 0.22.2", pr_number: 18744, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "abe84489cc5cfa19490d83576f867073a30f62da", date: "2023-09-30 05:01:39 UTC", description: "Bump warp from 0.3.5 to 0.3.6", pr_number: 18704, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 7, deletions_count: 38}, + {sha: "1b9a012cd4590b3f5f40b3190b3577ea9eb53046", date: "2023-09-30 05:16:11 UTC", description: "Re-add `conf-files` directive for `cargo-deb`", pr_number: 18726, scopes: ["debian platform"], type: "fix", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "b8b6f9ed76141de290307b3d414c785dd4230ce1", date: "2023-10-06 05:45:13 UTC", description: "improve aggregation performance", pr_number: 18759, scopes: ["datadog_metrics sink"], type: "fix", breaking_change: false, author: "Doug Smith", files_count: 5, insertions_count: 173, deletions_count: 201}, + {sha: "05527172d1ab16d4c4481d392bdc26c89528beab", date: "2023-10-21 02:31:29 UTC", description: "Update example YAML config data_dir", pr_number: 18896, scopes: ["releasing"], type: "fix", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "1d4487c3b12033dba8dc58ae4199706552e3014b", date: "2023-10-21 01:33:10 UTC", description: "support DNSSEC RRSIG record data", pr_number: 18878, scopes: ["dnstap source"], type: "fix", breaking_change: false, author: "neuronull", files_count: 2, insertions_count: 52, deletions_count: 1}, + {sha: "b246610fd53f62ec07682a22f79d664f5ad031bc", date: "2023-10-25 07:21:51 UTC", description: "Make KafkaService return `Poll::Pending` when producer queue is full", pr_number: 18770, scopes: ["kafka sink"], type: "fix", breaking_change: false, author: "Doug Smith", files_count: 2, insertions_count: 78, deletions_count: 26}, + {sha: "391067761a210341f68ad3a4db8fcd0cfa42e578", date: "2023-09-28 06:28:14 UTC", description: "Remove or replace mentions of vector in functions doc", pr_number: 18679, scopes: ["external docs"], type: "chore", breaking_change: false, author: "May Lee", files_count: 9, insertions_count: 21, deletions_count: 21}, + {sha: "9ca6c7b186e73605359844db0bb20946bfdc6390", date: "2023-10-24 05:48:36 UTC", description: "add new dedicated page for TLS configuration", pr_number: 18844, scopes: ["tls"], type: "docs", breaking_change: false, author: "Hugo Hromic", files_count: 7, insertions_count: 164, deletions_count: 3}, + {sha: "0e0f6411608b8f37c311c3923c352e58f88d869b", date: "2023-09-28 11:44:20 UTC", description: "Add SHA256 checksums file to GH releases", pr_number: 18701, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 6, deletions_count: 0}, + {sha: "5037fe756a2216cfa340b842387f745af2c29363", date: "2023-10-04 05:46:40 UTC", description: "Add a test to assert conf files aren't overwritten", pr_number: 18728, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 33, deletions_count: 15}, + {sha: "b4262335582c4466bbef5a9371433bdfbefaf587", date: "2023-10-12 06:27:08 UTC", description: "Bump MacOS unit test runners to 13", pr_number: 18823, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "792a1b541aaa1b34bef605bb9be4f0787b35afab", date: "2023-10-03 06:52:13 UTC", description: "Fix cookie banner style issues", pr_number: 18745, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 11, deletions_count: 6}, + ] +} diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index 27f172e53bf9a..1de27dad1d867 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.33.1", "0.33.0", "0.32.2", "0.32.1",