diff --git a/.circleci/README.md b/.circleci/README.md index d8bcf8453694e..78dfeebaa200f 100644 --- a/.circleci/README.md +++ b/.circleci/README.md @@ -4,28 +4,19 @@ CircleCI is used to run unit tests on Unix env. ## Upgrading Golang version -/!\ Disclaimer: the datadog-agent-runner-circle image should never be used for anything else than CircleCI tests /!\ +/!\ Disclaimer: the datadog/agent-buildimages-circleci-runner image should never be used for anything else than CircleCI tests /!\ -Change the Golang version in this file `images/runner/Dockerfile`. +This image is now built alongside other images in [agent-buildimages](https://github.com/DataDog/datadog-agent-buildimages) repository. Change of Golang version must occur in this repository. -Then locally build and push the new image using -`datadog/datadog-agent-runner-circle:go` for the image's -name. You will need write access to that repo on DockerHub (the Agent's team -should have it). +Once you have created a new image by building a new version of agent-buildimages, you can test your modification with the associated invoke task: -Example: ```bash -cd .circleci/images/runner -docker build --platform=linux/amd64 -t datadog/datadog-agent-runner-circle:go1205 . -docker login -docker push datadog/datadog-agent-runner-circle:go1205 +invoke -e pipeline.update-buildimages --image-tag v12345678-c0mm1t5 +``` +This will update the configuration of circleci and gitlab to use the __test version__ of these images. +Once your test is successful, you can either move the `_test_version` from files or invoke +```bash +invoke -e pipeline.update-buildimages --image-tag v12345678-c0mm1t5 --no-test-version ``` - -Once your image is pushed, update this file: -https://github.com/DataDog/datadog-agent/blob/main/.circleci/config.yml. -Change `image: datadog/datadog-agent-runner-circle:goXXXX` for the tag you -just pushed. - -Push your change as a new PR to see if CircleCI is still green. If everything is green, get a review and merge the PR. diff --git a/.circleci/config.yml b/.circleci/config.yml index b5e567b39d2e5..6efa73cc94755 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: datadog/datadog-agent-runner-circle:go1206 + - image: datadog/agent-buildimages-circleci-runner:v18021293-6b0809a environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent @@ -29,11 +29,11 @@ templates: # If incremental dep fails, increase the cache gen number # in restore_deps AND save_deps # See https://github.com/DataDog/datadog-agent/pull/2384 - - gen19-godeps-{{ checksum "requirements.txt" }}-{{ checksum ".circleci/requirements.txt" }}-{{ checksum ".circleci/images/runner/Dockerfile" }}-{{ .Branch }}-{{ .Revision }} - - gen19-godeps-{{ checksum "requirements.txt" }}-{{ checksum ".circleci/requirements.txt" }}-{{ checksum ".circleci/images/runner/Dockerfile" }}-{{ .Branch }}- - - gen19-godeps-{{ checksum "requirements.txt" }}-{{ checksum ".circleci/requirements.txt" }}-{{ checksum ".circleci/images/runner/Dockerfile" }}-main- + - gen19-godeps-{{ checksum ".circleci/config.yml" }}-{{ .Branch }}-{{ .Revision }} + - gen19-godeps-{{ checksum ".circleci/config.yml" }}-{{ .Branch }}- + - gen19-godeps-{{ checksum ".circleci/config.yml" }}-main- - save_cache: &save_deps - key: gen19-godeps-{{ checksum "requirements.txt" }}-{{ checksum ".circleci/requirements.txt" }}-{{ checksum ".circleci/images/runner/Dockerfile" }}-{{ .Branch }}-{{ .Revision }} + key: gen19-godeps-{{ checksum ".circleci/config.yml" }}-{{ .Branch }}-{{ .Revision }} - restore_cache: &restore_source keys: # Cache retrieval is faster than full git checkout @@ -58,12 +58,6 @@ jobs: steps: - restore_cache: *restore_source - restore_cache: *restore_deps - - run: - name: setup python deps - command: | - python3 -m pip install wheel - python3 -m pip install -r requirements.txt - python3 -m pip install -r .circleci/requirements.txt - run: name: check go version command: | @@ -95,7 +89,6 @@ jobs: - /go/pkg/mod - /go/bin - /go/src/github.com/DataDog/datadog-agent/dev - - /usr/local/lib/python3.8/dist-packages - /usr/local/bin unit_tests: diff --git a/.circleci/images/builder/Dockerfile b/.circleci/images/builder/Dockerfile deleted file mode 100644 index bee5b34cb70f8..0000000000000 --- a/.circleci/images/builder/Dockerfile +++ /dev/null @@ -1,86 +0,0 @@ -FROM golang:1.20.6 - -RUN sed -i 's/^#\s*\(deb.*universe\)$/\1/g' /etc/apt/sources.list \ - && sed -i 's/^#\s*\(deb.*multiverse\)$/\1/g' /etc/apt/sources.list \ - && sed -i 's/main/main contrib non-free/' /etc/apt/sources.list - -RUN apt-get update && apt-get install -y python2.7-dev autoconf autogen intltool libssl1.0-dev -RUN apt-get install -y libpq-dev libsystemd-dev - -# Ruby,,, -RUN mkdir -p /usr/local/etc \ - && { \ - echo 'install: --no-document'; \ - echo 'update: --no-document'; \ - } >> /usr/local/etc/gemrc - -ENV RUBY_MAJOR 2.4 -ENV RUBY_VERSION 2.4.2 -ENV RUBY_DOWNLOAD_SHA256 748a8980d30141bd1a4124e11745bb105b436fb1890826e0d2b9ea31af27f735 -ENV RUBYGEMS_VERSION 2.6.12 - -# some of ruby's build scripts are written in ruby -# we purge system ruby later to make sure our final image uses what we just built -RUN set -ex \ - \ - && buildDeps=' \ - bison \ - dpkg-dev \ - libgdbm-dev \ - ruby \ - ' \ - && apt-get update \ - && apt-get install -y --no-install-recommends $buildDeps \ - && rm -rf /var/lib/apt/lists/* \ - \ - && wget -O ruby.tar.xz "https://cache.ruby-lang.org/pub/ruby/${RUBY_MAJOR%-rc}/ruby-$RUBY_VERSION.tar.xz" \ - && echo "$RUBY_DOWNLOAD_SHA256 *ruby.tar.xz" | sha256sum -c - \ - \ - && mkdir -p /usr/src/ruby \ - && tar -xJf ruby.tar.xz -C /usr/src/ruby --strip-components=1 \ - && rm ruby.tar.xz \ - \ - && cd /usr/src/ruby \ - \ -# hack in "ENABLE_PATH_CHECK" disabling to suppress: -# warning: Insecure world writable dir - && { \ - echo '#define ENABLE_PATH_CHECK 0'; \ - echo; \ - cat file.c; \ - } > file.c.new \ - && mv file.c.new file.c \ - \ - && autoconf \ - && gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" \ - && ./configure \ - --build="$gnuArch" \ - --disable-install-doc \ - --enable-shared \ - --with-openssl=/usr/lib/ssl \ - && make -j "$(nproc)" \ - && make install \ - \ - && apt-get purge -y --auto-remove $buildDeps \ - && cd / \ - && gem update --system "$RUBYGEMS_VERSION" \ - && rm -r /usr/src/ruby - -ENV BUNDLER_VERSION 1.15.3 - -RUN gem install bundler --version "$BUNDLER_VERSION" - -RUN apt-get update && apt-get install -y python-pip \ - && pip install virtualenv==16.0.0 - -# install things globally, for great justice -# and don't create ".bundle" in all our apps -ENV GEM_HOME /usr/local/bundle -ENV BUNDLE_PATH="$GEM_HOME" \ - BUNDLE_BIN="$GEM_HOME/bin" \ - BUNDLE_SILENCE_ROOT_WARNING=1 \ - BUNDLE_APP_CONFIG="$GEM_HOME" -ENV PATH $BUNDLE_BIN:$PATH -RUN mkdir -p "$GEM_HOME" "$BUNDLE_BIN" \ - && chmod 777 "$GEM_HOME" "$BUNDLE_BIN" - diff --git a/.circleci/images/runner/Dockerfile b/.circleci/images/runner/Dockerfile deleted file mode 100644 index 4193153cf3331..0000000000000 --- a/.circleci/images/runner/Dockerfile +++ /dev/null @@ -1,71 +0,0 @@ -# We cannot use Ubuntu 22.04 because the E2E tests -# are currently using a Docker Compose v1 imaged based on Debian. -# The glibc version is too old to allow running CGO binaries built on Ubuntu 22.04 -# We'll be able to migrate when we get rid of Docker Compose or use Docker Compose v2 -FROM ubuntu:20.04 - -ENV DEBIAN_FRONTEND noninteractive - -# Pre-requisites -# Python 3 dev is required for rtloader -RUN set -ex \ - && apt-get update && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - curl \ - default-jre \ - doxygen \ - file \ - g++ \ - gcc \ - git \ - gnupg ca-certificates \ - graphviz \ - libpq-dev \ - libsnmp-base \ - libsnmp-dev \ - libssl-dev \ - libsystemd-dev \ - make \ - pkg-config \ - python3 \ - python3-dev \ - python3-distutils \ - python3-pip \ - python3-setuptools \ - python3-yaml \ - snmp-mibs-downloader \ - ssh \ - xz-utils - -# Golang -ENV GIMME_GO_VERSION 1.20.6 -ENV GOROOT /root/.gimme/versions/go$GIMME_GO_VERSION.linux.amd64 -ENV GOPATH /go -ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH -RUN curl -sL -o /bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme -RUN chmod +x /bin/gimme -RUN gimme $GIMME_GO_VERSION -RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" - -# CMake -ENV CMAKE_NAME cmake-3.13.3-Linux-x86_64 -ENV CMAKE_ARCHIVE $CMAKE_NAME.tar.gz -ENV CMAKE_DEST_DIR /cmake -ENV PATH $CMAKE_DEST_DIR/bin/:$PATH -RUN set -ex \ - && curl -sL -O https://github.com/Kitware/CMake/releases/download/v3.13.3/$CMAKE_ARCHIVE \ - && tar xzf $CMAKE_ARCHIVE \ - && mv $CMAKE_NAME $CMAKE_DEST_DIR \ - && rm $CMAKE_ARCHIVE - -# Other dependencies -RUN set -ex \ - # clang-format - && echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal main" >> /etc/apt/sources.list \ - && curl -sL https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - \ - && apt-get update \ - && apt-get -t llvm-toolchain-focal install -y --no-install-recommends \ - clang-format - -# Setup entrypoint -WORKDIR $GOPATH diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt deleted file mode 100644 index 79b813bcae831..0000000000000 --- a/.circleci/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -black==22.10.0 -# The Codecov Python uploader was deprecated on Feb 1, 2022 and disappeared from PyPi on Apr 12, 2023. -# codecov==2.1.12 -flake8-bugbear==22.10.27 -flake8-comprehensions==3.10.1 -flake8-unused-arguments==0.0.12 -flake8-use-fstring==1.4 -flake8==5.0.4 -isort==5.10.1 -vulture==2.6 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 689e24bb71d33..58072877db477 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -335,6 +335,9 @@ /pkg/network/driver/ @DataDog/windows-kernel-integrations /pkg/network/ebpf/c/prebuilt/usm* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/runtime/usm* @DataDog/universal-service-monitoring +/pkg/network/ebpf/c/prebuilt/shared-libraries* @DataDog/universal-service-monitoring +/pkg/network/ebpf/c/runtime/shared-libraries* @DataDog/universal-service-monitoring +/pkg/network/ebpf/c/shared-libraries/ @DataDog/universal-service-monitoring /pkg/network/ebpf/c/prebuilt/http* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/runtime/http* @DataDog/universal-service-monitoring /pkg/network/ebpf/c/protocols/ @DataDog/universal-service-monitoring diff --git a/.gitignore b/.gitignore index 6f4899e3fa63b..200f0f0ebc193 100644 --- a/.gitignore +++ b/.gitignore @@ -116,6 +116,7 @@ pkg/ebpf/bytecode/build/**/*.d pkg/ebpf/bytecode/runtime/conntrack.go pkg/ebpf/bytecode/runtime/http.go pkg/ebpf/bytecode/runtime/usm.go +pkg/ebpf/bytecode/runtime/shared-libraries.go pkg/ebpf/bytecode/runtime/offsetguess-test.go pkg/ebpf/bytecode/runtime/oom-kill.go pkg/ebpf/bytecode/runtime/runtime-security.go diff --git a/.gitlab/binary_build/system_probe.yml b/.gitlab/binary_build/system_probe.yml index 8dab5ce70b012..77069b011e143 100644 --- a/.gitlab/binary_build/system_probe.yml +++ b/.gitlab/binary_build/system_probe.yml @@ -19,6 +19,8 @@ - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/offset-guess-debug.o $S3_ARTIFACTS_URI/offset-guess-debug.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/usm.o $S3_ARTIFACTS_URI/usm.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/usm-debug.o $S3_ARTIFACTS_URI/usm-debug.o.$ARCH + - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/shared-libraries.o $S3_ARTIFACTS_URI/shared-libraries.o.$ARCH + - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/shared-libraries-debug.o $S3_ARTIFACTS_URI/shared-libraries-debug.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/dns.o $S3_ARTIFACTS_URI/dns.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/dns-debug.o $S3_ARTIFACTS_URI/dns-debug.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/conntrack.o $S3_ARTIFACTS_URI/conntrack.o.$ARCH @@ -28,6 +30,8 @@ - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime-security-offset-guesser.o $S3_ARTIFACTS_URI/runtime-security-offset-guesser.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/usm.o $S3_ARTIFACTS_URI/usm-co-re.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/usm-debug.o $S3_ARTIFACTS_URI/usm-debug-co-re.o.$ARCH + - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/shared-libraries.o $S3_ARTIFACTS_URI/shared-libraries-co-re.o.$ARCH + - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/shared-libraries-debug.o $S3_ARTIFACTS_URI/shared-libraries-debug-co-re.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/oom-kill.o $S3_ARTIFACTS_URI/oom-kill-co-re.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/tcp-queue-length.o $S3_ARTIFACTS_URI/tcp-queue-length-co-re.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/ebpf.o $S3_ARTIFACTS_URI/ebpf-co-re.o.$ARCH @@ -38,6 +42,7 @@ - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/tracer-debug.o $S3_ARTIFACTS_URI/tracer-debug-co-re.o.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/tracer.c $S3_ARTIFACTS_URI/tracer.c.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/usm.c $S3_ARTIFACTS_URI/usm.c.$ARCH + - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/shared-libraries.c $S3_ARTIFACTS_URI/shared-libraries.c.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/runtime-security.c $S3_ARTIFACTS_URI/runtime-security.c.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/conntrack.c $S3_ARTIFACTS_URI/conntrack.c.$ARCH - $S3_CP_CMD $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/oom-kill.c $S3_ARTIFACTS_URI/oom-kill.c.$ARCH diff --git a/.gitlab/internal_image_deploy.yml b/.gitlab/internal_image_deploy.yml index 9868d54428a80..719891e20184e 100644 --- a/.gitlab/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy.yml @@ -27,7 +27,7 @@ docker_trigger_internal: - export GITLAB_TOKEN=$(aws ssm get-parameter --region us-east-1 --name ci.datadog-agent.gitlab_pipelines_scheduler_token --with-decryption --query "Parameter.Value" --out text) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - if [ "$BUCKET_BRANCH" = "nightly" ]; then RELEASE_TAG="${RELEASE_TAG}-${CI_COMMIT_SHORT_SHA}"; fi - - inv pipeline.trigger-child-pipeline --project-name "DataDog/images" --git-ref "master" --variables "IMAGE_VERSION,IMAGE_NAME,RELEASE_TAG,BUILD_TAG,TMPL_SRC_IMAGE,TMPL_SRC_REPO,RELEASE_STAGING,RELEASE_PROD" + - inv pipeline.trigger-child-pipeline --project-name "DataDog/images" --git-ref "master" --variables "IMAGE_VERSION,IMAGE_NAME,RELEASE_TAG,BUILD_TAG,TMPL_SRC_IMAGE,TMPL_SRC_REPO,RELEASE_STAGING,RELEASE_PROD,DYNAMIC_BUILD_RENDER_RULES" docker_trigger_cluster_agent_internal: @@ -56,4 +56,4 @@ docker_trigger_cluster_agent_internal: - export GITLAB_TOKEN=$(aws ssm get-parameter --region us-east-1 --name ci.datadog-agent.gitlab_pipelines_scheduler_token --with-decryption --query "Parameter.Value" --out text) - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - if [ "$BUCKET_BRANCH" = "nightly" ]; then RELEASE_TAG="${RELEASE_TAG}-${CI_COMMIT_SHORT_SHA}"; fi - - inv pipeline.trigger-child-pipeline --project-name "DataDog/images" --git-ref "master" --variables "IMAGE_VERSION,IMAGE_NAME,RELEASE_TAG,BUILD_TAG,TMPL_SRC_IMAGE,TMPL_SRC_REPO,RELEASE_STAGING,RELEASE_PROD" + - inv pipeline.trigger-child-pipeline --project-name "DataDog/images" --git-ref "master" --variables "IMAGE_VERSION,IMAGE_NAME,RELEASE_TAG,BUILD_TAG,TMPL_SRC_IMAGE,TMPL_SRC_REPO,RELEASE_STAGING,RELEASE_PROD,DYNAMIC_BUILD_RENDER_RULES" diff --git a/.gitlab/package_build/deb.yml b/.gitlab/package_build/deb.yml index cb322195e09cf..61f7396dc1f05 100644 --- a/.gitlab/package_build/deb.yml +++ b/.gitlab/package_build/deb.yml @@ -26,6 +26,8 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/offset-guess-debug.o.${PACKAGE_ARCH} /tmp/system-probe/offset-guess-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.o.${PACKAGE_ARCH} /tmp/system-probe/usm.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns.o.${PACKAGE_ARCH} /tmp/system-probe/dns.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns-debug.o.${PACKAGE_ARCH} /tmp/system-probe/dns-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.o.${PACKAGE_ARCH} /tmp/system-probe/conntrack.o @@ -43,8 +45,11 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer-fentry-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/tracer-fentry-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer.c.${PACKAGE_ARCH} /tmp/system-probe/tracer.c - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.c.${PACKAGE_ARCH} /tmp/system-probe/usm.c + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.c.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.c - $S3_CP_CMD $S3_ARTIFACTS_URI/runtime-security.c.${PACKAGE_ARCH} /tmp/system-probe/runtime-security.c - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.c.${PACKAGE_ARCH} /tmp/system-probe/conntrack.c - $S3_CP_CMD $S3_ARTIFACTS_URI/oom-kill.c.${PACKAGE_ARCH} /tmp/system-probe/oom-kill.c diff --git a/.gitlab/package_build/rpm.yml b/.gitlab/package_build/rpm.yml index 0f74e14099859..99c06edcc5f19 100644 --- a/.gitlab/package_build/rpm.yml +++ b/.gitlab/package_build/rpm.yml @@ -23,6 +23,8 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/offset-guess-debug.o.${PACKAGE_ARCH} /tmp/system-probe/offset-guess-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.o.${PACKAGE_ARCH} /tmp/system-probe/usm.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns.o.${PACKAGE_ARCH} /tmp/system-probe/dns.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns-debug.o.${PACKAGE_ARCH} /tmp/system-probe/dns-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.o.${PACKAGE_ARCH} /tmp/system-probe/conntrack.o @@ -40,8 +42,11 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer-fentry-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/tracer-fentry-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer.c.${PACKAGE_ARCH} /tmp/system-probe/tracer.c - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.c.${PACKAGE_ARCH} /tmp/system-probe/usm.c + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.c.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.c - $S3_CP_CMD $S3_ARTIFACTS_URI/runtime-security.c.${PACKAGE_ARCH} /tmp/system-probe/runtime-security.c - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.c.${PACKAGE_ARCH} /tmp/system-probe/conntrack.c - $S3_CP_CMD $S3_ARTIFACTS_URI/oom-kill.c.${PACKAGE_ARCH} /tmp/system-probe/oom-kill.c diff --git a/.gitlab/package_build/suse_rpm.yml b/.gitlab/package_build/suse_rpm.yml index 60bb5ed7d3ff5..c939dd287b720 100644 --- a/.gitlab/package_build/suse_rpm.yml +++ b/.gitlab/package_build/suse_rpm.yml @@ -22,6 +22,8 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/offset-guess-debug.o.${PACKAGE_ARCH} /tmp/system-probe/offset-guess-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.o.${PACKAGE_ARCH} /tmp/system-probe/usm.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns.o.${PACKAGE_ARCH} /tmp/system-probe/dns.o - $S3_CP_CMD $S3_ARTIFACTS_URI/dns-debug.o.${PACKAGE_ARCH} /tmp/system-probe/dns-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.o.${PACKAGE_ARCH} /tmp/system-probe/conntrack.o @@ -39,8 +41,11 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer-fentry-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/tracer-fentry-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/usm-debug-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-co-re.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug-co-re.o.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries-debug-co-re.o - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer.c.${PACKAGE_ARCH} /tmp/system-probe/tracer.c - $S3_CP_CMD $S3_ARTIFACTS_URI/usm.c.${PACKAGE_ARCH} /tmp/system-probe/usm.c + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries.c.${PACKAGE_ARCH} /tmp/system-probe/shared-libraries.c - $S3_CP_CMD $S3_ARTIFACTS_URI/runtime-security.c.${PACKAGE_ARCH} /tmp/system-probe/runtime-security.c - $S3_CP_CMD $S3_ARTIFACTS_URI/conntrack.c.${PACKAGE_ARCH} /tmp/system-probe/conntrack.c - $S3_CP_CMD $S3_ARTIFACTS_URI/oom-kill.c.${PACKAGE_ARCH} /tmp/system-probe/oom-kill.c diff --git a/.gitlab/package_deps_build.yml b/.gitlab/package_deps_build.yml index 51803910af10e..316ea293b08cd 100644 --- a/.gitlab/package_deps_build.yml +++ b/.gitlab/package_deps_build.yml @@ -20,7 +20,9 @@ - $S3_CP_CMD $S3_ARTIFACTS_URI/tracer-fentry-debug-co-re.o.$ARCH tracer-fentry-debug.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-co-re.o.$ARCH usm.o - $S3_CP_CMD $S3_ARTIFACTS_URI/usm-debug-co-re.o.$ARCH usm-debug.o - - inv -e system-probe.generate-minimized-btfs --source-dir "$CI_PROJECT_DIR/btfs-$ARCH" --output-dir "$CI_PROJECT_DIR/minimized-btfs" --input-bpf-programs "$CI_PROJECT_DIR/oom-kill.o $CI_PROJECT_DIR/tcp-queue-length.o $CI_PROJECT_DIR/ebpf.o $CI_PROJECT_DIR/ebpf-debug.o $CI_PROJECT_DIR/usm.o $CI_PROJECT_DIR/usm-debug.o $CI_PROJECT_DIR/tracer.o $CI_PROJECT_DIR/tracer-fentry.o $CI_PROJECT_DIR/tracer-debug.o $CI_PROJECT_DIR/tracer-fentry-debug.o" + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-co-re.o.$ARCH shared-libraries.o + - $S3_CP_CMD $S3_ARTIFACTS_URI/shared-libraries-debug-co-re.o.$ARCH shared-libraries-debug.o + - inv -e system-probe.generate-minimized-btfs --source-dir "$CI_PROJECT_DIR/btfs-$ARCH" --output-dir "$CI_PROJECT_DIR/minimized-btfs" --input-bpf-programs "$CI_PROJECT_DIR/oom-kill.o $CI_PROJECT_DIR/tcp-queue-length.o $CI_PROJECT_DIR/ebpf.o $CI_PROJECT_DIR/ebpf-debug.o $CI_PROJECT_DIR/usm.o $CI_PROJECT_DIR/usm-debug.o $CI_PROJECT_DIR/shared-libraries.o $CI_PROJECT_DIR/shared-libraries-debug.o $CI_PROJECT_DIR/tracer.o $CI_PROJECT_DIR/tracer-fentry.o $CI_PROJECT_DIR/tracer-debug.o $CI_PROJECT_DIR/tracer-fentry-debug.o" - cd minimized-btfs - tar -cJf minimized-btfs.tar.xz * - $S3_CP_CMD minimized-btfs.tar.xz $S3_ARTIFACTS_URI/minimized-btfs-$ARCH.tar.xz diff --git a/.gitlab/source_test/ebpf.yml b/.gitlab/source_test/ebpf.yml index 3425d14179715..664e2d2281a46 100644 --- a/.gitlab/source_test/ebpf.yml +++ b/.gitlab/source_test/ebpf.yml @@ -14,12 +14,16 @@ - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/offset-guess-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/offset-guess-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/usm.o $CI_PROJECT_DIR/.tmp/binary-ebpf/usm.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/usm-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/usm-debug.o + - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/shared-libraries.o $CI_PROJECT_DIR/.tmp/binary-ebpf/shared-libraries.o + - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/shared-libraries-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/shared-libraries-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/dns.o $CI_PROJECT_DIR/.tmp/binary-ebpf/dns.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/dns-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/dns-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/conntrack.o $CI_PROJECT_DIR/.tmp/binary-ebpf/conntrack.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/conntrack-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/conntrack-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/usm-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/usm-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/usm.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/usm.o + - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/shared-libraries-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/shared-libraries-debug.o + - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/shared-libraries.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/shared-libraries.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/oom-kill.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/oom-kill.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/tcp-queue-length.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/tcp-queue-length.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/tracer-fentry.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/tracer-fentry.o @@ -28,6 +32,7 @@ - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/co-re/tracer-debug.o $CI_PROJECT_DIR/.tmp/binary-ebpf/co-re/tracer-debug.o - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/tracer.c $CI_PROJECT_DIR/.tmp/binary-ebpf/tracer.c - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/usm.c $CI_PROJECT_DIR/.tmp/binary-ebpf/usm.c + - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/shared-libraries.c $CI_PROJECT_DIR/.tmp/binary-ebpf/shared-libraries.c - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/runtime-security.c $CI_PROJECT_DIR/.tmp/binary-ebpf/runtime-security.c - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/conntrack.c $CI_PROJECT_DIR/.tmp/binary-ebpf/conntrack.c - cp $CI_PROJECT_DIR/pkg/ebpf/bytecode/build/runtime/oom-kill.c $CI_PROJECT_DIR/.tmp/binary-ebpf/oom-kill.c diff --git a/.golangci.yml b/.golangci.yml index a3d3853903e2b..afc132e345cd3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,6 +3,9 @@ run: - pkg/util/cloudproviders/cloudfoundry/bbscache_test.go # implements interface from imported package whose method names fail linting - pkg/util/intern/string.go # TODO: fix govet 'unsafeptr' error - pkg/serverless/trace/inferredspan/constants.go # TODO: fox revive exported const error + skip-dirs: + - pkg/proto/patches + issues: exclude-use-default: false diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 04472dbbc2d4c..18b104e54f6b4 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -41,6 +41,7 @@ core,github.com/Azure/go-autorest/tracing,Apache-2.0,Copyright 2015 Microsoft Co core,github.com/BurntSushi/toml,MIT,Copyright (c) 2013 TOML authors core,github.com/BurntSushi/toml/internal,MIT,Copyright (c) 2013 TOML authors core,github.com/CycloneDX/cyclonedx-go,Apache-2.0,Copyright & License | Copyright (c) OWASP Foundation | Copyright (c) OWASP Foundation. All Rights Reserved | Copyright OWASP Foundation +core,github.com/DataDog/agent-payload/pb,BSD-3-Clause,"Copyright (c) 2017, Datadog, Inc" core,github.com/DataDog/agent-payload/v5/contimage,BSD-3-Clause,"Copyright (c) 2017, Datadog, Inc" core,github.com/DataDog/agent-payload/v5/contlcycle,BSD-3-Clause,"Copyright (c) 2017, Datadog, Inc" core,github.com/DataDog/agent-payload/v5/cws/dumpsv1,BSD-3-Clause,"Copyright (c) 2017, Datadog, Inc" @@ -881,6 +882,7 @@ core,github.com/golang/protobuf/protoc-gen-go/plugin,BSD-3-Clause,Copyright (c) core,github.com/golang/protobuf/ptypes,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/any,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/duration,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. +core,github.com/golang/protobuf/ptypes/empty,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/struct,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/timestamp,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/wrappers,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. @@ -1727,6 +1729,7 @@ core,golang.org/x/crypto/ssh/knownhosts,BSD-3-Clause,Copyright (c) 2009 The Go A core,golang.org/x/crypto/ssh/terminal,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/exp/constraints,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/exp/maps,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,golang.org/x/exp/mmap,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/exp/slices,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/exp/typeparams,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,golang.org/x/lint,BSD-3-Clause,Copyright (c) 2013 The Go Authors. All rights reserved diff --git a/README.md b/README.md index c73c799529040..c052bdf33fbf6 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ and development, is located under [the docs directory](docs) of the present repo ## Getting started To build the Agent you need: - * [Go](https://golang.org/doc/install) 1.18 or later. You'll also need to set your `$GOPATH` and have `$GOPATH/bin` in your path. + * [Go](https://golang.org/doc/install) 1.20 or later. You'll also need to set your `$GOPATH` and have `$GOPATH/bin` in your path. * Python 3.7+ along with development libraries for tooling. You will also need Python 2.7 if you are building the Agent with Python 2 support. * Python dependencies. You may install these with `pip install -r requirements.txt` This will also pull in [Invoke](http://www.pyinvoke.org) if not yet installed. diff --git a/cmd/agent/api/grpc.go b/cmd/agent/api/grpc.go index 9776f9d83c6bc..122b178c17d5c 100644 --- a/cmd/agent/api/grpc.go +++ b/cmd/agent/api/grpc.go @@ -24,7 +24,7 @@ import ( dsdReplay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/tagger/replay" taggerserver "github.com/DataDog/datadog-agent/pkg/tagger/server" diff --git a/cmd/agent/api/server.go b/cmd/agent/api/server.go index 3c160c683c84c..ed2d8c60cd1d5 100644 --- a/cmd/agent/api/server.go +++ b/cmd/agent/api/server.go @@ -37,7 +37,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/config" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" taggerserver "github.com/DataDog/datadog-agent/pkg/tagger/server" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" diff --git a/cmd/agent/subcommands/dogstatsdcapture/command.go b/cmd/agent/subcommands/dogstatsdcapture/command.go index f16795accae4e..403c95a0749a5 100644 --- a/cmd/agent/subcommands/dogstatsdcapture/command.go +++ b/cmd/agent/subcommands/dogstatsdcapture/command.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/api/security" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/spf13/cobra" diff --git a/cmd/agent/subcommands/dogstatsdreplay/command.go b/cmd/agent/subcommands/dogstatsdreplay/command.go index b8032e1c6c2eb..3cc4dc6919dad 100644 --- a/cmd/agent/subcommands/dogstatsdreplay/command.go +++ b/cmd/agent/subcommands/dogstatsdreplay/command.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/replay" "github.com/DataDog/datadog-agent/pkg/api/security" pkgconfig "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/spf13/cobra" diff --git a/cmd/cluster-agent/api/grpc.go b/cmd/cluster-agent/api/grpc.go index f1857afb0e033..baffa02cb6578 100644 --- a/cmd/cluster-agent/api/grpc.go +++ b/cmd/cluster-agent/api/grpc.go @@ -8,7 +8,7 @@ package api import ( "context" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" taggerserver "github.com/DataDog/datadog-agent/pkg/tagger/server" ) diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index 64f3253138b85..c68a33f57173b 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -32,7 +32,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" taggerserver "github.com/DataDog/datadog-agent/pkg/tagger/server" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index b64afcfb6af90..6c55103e12b93 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" logConfig "github.com/DataDog/datadog-agent/pkg/logs/config" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless" "github.com/DataDog/datadog-agent/pkg/serverless/appsec" "github.com/DataDog/datadog-agent/pkg/serverless/appsec/httpsec" @@ -31,7 +32,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/serverless/registration" "github.com/DataDog/datadog-agent/pkg/serverless/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/cmd/serverless/startchecker.test b/cmd/serverless/startchecker.test deleted file mode 100755 index 644e48858dbf5..0000000000000 Binary files a/cmd/serverless/startchecker.test and /dev/null differ diff --git a/cmd/trace-agent/config/config.go b/cmd/trace-agent/config/config.go index bfffca1cfd59a..98f546a0c6693 100644 --- a/cmd/trace-agent/config/config.go +++ b/cmd/trace-agent/config/config.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/otlp" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" "github.com/DataDog/datadog-agent/pkg/trace/config" diff --git a/cmd/trace-agent/remote_config.go b/cmd/trace-agent/remote_config.go index 8d451c16433e5..c16bb33e1a1c5 100644 --- a/cmd/trace-agent/remote_config.go +++ b/cmd/trace-agent/remote_config.go @@ -15,7 +15,7 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config/remote" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/metrics" diff --git a/cmd/trace-agent/remote_config_test.go b/cmd/trace-agent/remote_config_test.go index c39b5712dfae9..da125b6867741 100644 --- a/cmd/trace-agent/remote_config_test.go +++ b/cmd/trace-agent/remote_config_test.go @@ -14,7 +14,7 @@ import ( "strings" "testing" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/sampler" diff --git a/cmd/trace-agent/test/agent.go b/cmd/trace-agent/test/agent.go index 9510c7a55608b..855ab680efc20 100644 --- a/cmd/trace-agent/test/agent.go +++ b/cmd/trace-agent/test/agent.go @@ -159,7 +159,7 @@ func (s *agentRunner) runAgentConfig(path string) <-chan error { go func() { ch <- cmd.Wait() if s.verbose { - log.Print("agent: killed") + log.Printf("agent: killed") } }() return ch diff --git a/cmd/trace-agent/test/backend.go b/cmd/trace-agent/test/backend.go index fb540dfb47fa0..266811e1ee4b3 100644 --- a/cmd/trace-agent/test/backend.go +++ b/cmd/trace-agent/test/backend.go @@ -17,7 +17,7 @@ import ( "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/tinylib/msgp/msgp" "google.golang.org/protobuf/proto" diff --git a/cmd/trace-agent/test/example_test.go b/cmd/trace-agent/test/example_test.go index db64087e1da8a..0636ac701dc39 100644 --- a/cmd/trace-agent/test/example_test.go +++ b/cmd/trace-agent/test/example_test.go @@ -11,7 +11,7 @@ import ( "os" "time" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/cmd/trace-agent/test/runner.go b/cmd/trace-agent/test/runner.go index 02a2f9ed49894..a8b6ce4f891fb 100644 --- a/cmd/trace-agent/test/runner.go +++ b/cmd/trace-agent/test/runner.go @@ -17,7 +17,7 @@ import ( "github.com/tinylib/msgp/msgp" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) // ErrNotStarted is returned when attempting to operate an unstarted Runner. diff --git a/cmd/trace-agent/test/testsuite/cards_test.go b/cmd/trace-agent/test/testsuite/cards_test.go index f2332c3ea9728..931a4e22acaee 100644 --- a/cmd/trace-agent/test/testsuite/cards_test.go +++ b/cmd/trace-agent/test/testsuite/cards_test.go @@ -14,8 +14,8 @@ import ( vmsgp "github.com/vmihailenco/msgpack/v4" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/cmd/trace-agent/test/testsuite/chunking_test.go b/cmd/trace-agent/test/testsuite/chunking_test.go index b81f6de0947c9..a56a954f8513c 100644 --- a/cmd/trace-agent/test/testsuite/chunking_test.go +++ b/cmd/trace-agent/test/testsuite/chunking_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/writer" "github.com/stretchr/testify/assert" diff --git a/cmd/trace-agent/test/testsuite/events_test.go b/cmd/trace-agent/test/testsuite/events_test.go index 69b2a12ef56df..76363a77711b8 100644 --- a/cmd/trace-agent/test/testsuite/events_test.go +++ b/cmd/trace-agent/test/testsuite/events_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/cmd/trace-agent/test/testsuite/hostname_test.go b/cmd/trace-agent/test/testsuite/hostname_test.go index a59f79145fcec..6620983cd0e0a 100644 --- a/cmd/trace-agent/test/testsuite/hostname_test.go +++ b/cmd/trace-agent/test/testsuite/hostname_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/cmd/trace-agent/test/testsuite/otlp_test.go b/cmd/trace-agent/test/testsuite/otlp_test.go index d3e1e4ecbc653..d4050cecc108c 100644 --- a/cmd/trace-agent/test/testsuite/otlp_test.go +++ b/cmd/trace-agent/test/testsuite/otlp_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/stretchr/testify/assert" diff --git a/cmd/trace-agent/test/testsuite/proxy_test.go b/cmd/trace-agent/test/testsuite/proxy_test.go index 0cb97879463d9..d56685551bc0b 100644 --- a/cmd/trace-agent/test/testsuite/proxy_test.go +++ b/cmd/trace-agent/test/testsuite/proxy_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/trace-agent/test" "github.com/DataDog/datadog-agent/cmd/trace-agent/test/testsuite/testdata" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/cmd/trace-agent/test/testsuite/stats_test.go b/cmd/trace-agent/test/testsuite/stats_test.go index 5b938a60f9348..ba740fe8d5768 100644 --- a/cmd/trace-agent/test/testsuite/stats_test.go +++ b/cmd/trace-agent/test/testsuite/stats_test.go @@ -11,9 +11,10 @@ import ( "github.com/DataDog/datadog-agent/cmd/trace-agent/test" "github.com/DataDog/datadog-agent/cmd/trace-agent/test/testsuite/testdata" - "github.com/DataDog/datadog-agent/pkg/trace/pb" - + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + protoutil "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/runtime/protoiface" ) func TestClientStats(t *testing.T) { @@ -53,7 +54,15 @@ func TestClientStats(t *testing.T) { continue } assert.Equalf(t, len(res), len(tt.Out), "res had so many elements: %d\ntt has:%d", len(res), len(tt.Out)) - assert.ElementsMatch(t, res, tt.Out) + actual := []protoiface.MessageV1{} + expected := []protoiface.MessageV1{} + for _, msg := range res { + actual = append(actual, msg) + } + for _, msg := range tt.Out { + expected = append(expected, msg) + } + assert.ElementsMatch(t, protoutil.PbToStringSlice(actual), protoutil.PbToStringSlice(expected)) return case <-timeout: t.Fatalf("timed out, log was:\n%s", r.AgentLog()) diff --git a/cmd/trace-agent/test/testsuite/testdata/clientstats.go b/cmd/trace-agent/test/testsuite/testdata/clientstats.go index 4f63672b94fde..8b1d1d1e2c250 100644 --- a/cmd/trace-agent/test/testsuite/testdata/clientstats.go +++ b/cmd/trace-agent/test/testsuite/testdata/clientstats.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/sketches-go/ddsketch/store" "github.com/golang/protobuf/proto" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) func getEmptyDDSketch() []byte { @@ -34,11 +34,11 @@ var ClientStatsTests = []struct { RuntimeID: "1", Sequence: 2, Service: "test-service", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 1, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "", Name: "___noname00___", @@ -61,7 +61,7 @@ var ClientStatsTests = []struct { AgentEnv: "agent-env", AgentVersion: "6.0.0", ClientComputed: true, - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: "testhost", Env: "testing", Version: "0.1-alpha", @@ -70,11 +70,11 @@ var ClientStatsTests = []struct { RuntimeID: "1", Sequence: 2, Service: "test-service", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 0, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "unnamed-go-service", Name: "noname00", @@ -104,11 +104,11 @@ var ClientStatsTests = []struct { RuntimeID: "1", Sequence: 2, Service: "test-service", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 1, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "svc", Name: "noname00", @@ -137,7 +137,7 @@ var ClientStatsTests = []struct { { Start: 3, Duration: 4, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "profiles-db", Name: "sql.query", @@ -159,7 +159,7 @@ var ClientStatsTests = []struct { AgentEnv: "agent-env", AgentVersion: "6.0.0", ClientComputed: true, - Stats: []pb.ClientStatsPayload{ + Stats: []*pb.ClientStatsPayload{ { Hostname: "testhost", Env: "testing", @@ -170,11 +170,11 @@ var ClientStatsTests = []struct { Sequence: 2, AgentAggregation: "distributions", Service: "test-service", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 0, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "svc", Name: "noname00", @@ -212,11 +212,11 @@ var ClientStatsTests = []struct { Sequence: 2, AgentAggregation: "distributions", Service: "test-service", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 0, Duration: 4, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "profiles-db", Name: "sql.query", diff --git a/cmd/trace-agent/test/testsuite/traces_test.go b/cmd/trace-agent/test/testsuite/traces_test.go index f6d2f6fa70989..a236af6c583bf 100644 --- a/cmd/trace-agent/test/testsuite/traces_test.go +++ b/cmd/trace-agent/test/testsuite/traces_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/cmd/trace-agent/test" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/comp/dogstatsd/replay/reader.go b/comp/dogstatsd/replay/reader.go index 4a9afda4acf9d..189448a1c8a73 100644 --- a/comp/dogstatsd/replay/reader.go +++ b/comp/dogstatsd/replay/reader.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/zstd" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/log" diff --git a/comp/dogstatsd/replay/writer.go b/comp/dogstatsd/replay/writer.go index 0799ce2c8be8e..f51af8c5af9b2 100644 --- a/comp/dogstatsd/replay/writer.go +++ b/comp/dogstatsd/replay/writer.go @@ -21,7 +21,7 @@ import ( "github.com/spf13/afero" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/util/log" protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" diff --git a/comp/dogstatsd/replay/writer_test.go b/comp/dogstatsd/replay/writer_test.go index a827d5c9fac9e..3285a1b7f9319 100644 --- a/comp/dogstatsd/replay/writer_test.go +++ b/comp/dogstatsd/replay/writer_test.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) diff --git a/comp/forwarder/defaultforwarder/blocked_endpoints.go b/comp/forwarder/defaultforwarder/blocked_endpoints.go index cfcf1494ed7a8..2d06f05d3a3f3 100644 --- a/comp/forwarder/defaultforwarder/blocked_endpoints.go +++ b/comp/forwarder/defaultforwarder/blocked_endpoints.go @@ -55,7 +55,7 @@ func newBlockedEndpoints(config config.Component, log log.Component) *blockedEnd return &blockedEndpoints{ errorPerEndpoint: make(map[string]*block), - backoffPolicy: backoff.NewPolicy(backoffFactor, backoffBase, backoffMax, recInterval, recoveryReset), + backoffPolicy: backoff.NewExpBackoffPolicy(backoffFactor, backoffBase, backoffMax, recInterval, recoveryReset), } } diff --git a/comp/forwarder/defaultforwarder/blocked_endpoints_test.go b/comp/forwarder/defaultforwarder/blocked_endpoints_test.go index 22f8e530161e0..81bbd571d5372 100644 --- a/comp/forwarder/defaultforwarder/blocked_endpoints_test.go +++ b/comp/forwarder/defaultforwarder/blocked_endpoints_test.go @@ -16,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/util/backoff" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -28,19 +29,25 @@ func TestMinBackoffFactorValid(t *testing.T) { log := fxutil.Test[log.Component](t, log.MockModule) e := newBlockedEndpoints(mockConfig, log) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) // Verify default - defaultValue := e.backoffPolicy.MinBackoffFactor + defaultValue := policy.MinBackoffFactor assert.Equal(t, float64(2), defaultValue) // Verify configuration updates global var mockConfig.Set("forwarder_backoff_factor", 4) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, float64(4), e.backoffPolicy.MinBackoffFactor) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, float64(4), policy.MinBackoffFactor) // Verify invalid values recover gracefully mockConfig.Set("forwarder_backoff_factor", 1.5) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, defaultValue, e.backoffPolicy.MinBackoffFactor) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, defaultValue, policy.MinBackoffFactor) } func TestBaseBackoffTimeValid(t *testing.T) { @@ -48,19 +55,26 @@ func TestBaseBackoffTimeValid(t *testing.T) { log := fxutil.Test[log.Component](t, log.MockModule) e := newBlockedEndpoints(mockConfig, log) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + // Verify default - defaultValue := e.backoffPolicy.BaseBackoffTime + defaultValue := policy.BaseBackoffTime assert.Equal(t, float64(2), defaultValue) // Verify configuration updates global var mockConfig.Set("forwarder_backoff_base", 4) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, float64(4), e.backoffPolicy.BaseBackoffTime) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, float64(4), policy.BaseBackoffTime) // Verify invalid values recover gracefully mockConfig.Set("forwarder_backoff_base", 0) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, defaultValue, e.backoffPolicy.BaseBackoffTime) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, defaultValue, policy.BaseBackoffTime) } func TestMaxBackoffTimeValid(t *testing.T) { @@ -68,19 +82,26 @@ func TestMaxBackoffTimeValid(t *testing.T) { log := fxutil.Test[log.Component](t, log.MockModule) e := newBlockedEndpoints(mockConfig, log) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + // Verify default - defaultValue := e.backoffPolicy.MaxBackoffTime + defaultValue := policy.MaxBackoffTime assert.Equal(t, float64(64), defaultValue) // Verify configuration updates global var mockConfig.Set("forwarder_backoff_max", 128) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, float64(128), e.backoffPolicy.MaxBackoffTime) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, float64(128), policy.MaxBackoffTime) // Verify invalid values recover gracefully mockConfig.Set("forwarder_backoff_max", 0) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, defaultValue, e.backoffPolicy.MaxBackoffTime) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, defaultValue, policy.MaxBackoffTime) } func TestRecoveryIntervalValid(t *testing.T) { @@ -88,8 +109,11 @@ func TestRecoveryIntervalValid(t *testing.T) { log := fxutil.Test[log.Component](t, log.MockModule) e := newBlockedEndpoints(mockConfig, log) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + // Verify default - defaultValue := e.backoffPolicy.RecoveryInterval + defaultValue := policy.RecoveryInterval recoveryReset := config.Datadog.GetBool("forwarder_recovery_reset") assert.Equal(t, 2, defaultValue) assert.Equal(t, false, recoveryReset) @@ -97,17 +121,23 @@ func TestRecoveryIntervalValid(t *testing.T) { // Verify configuration updates global var mockConfig.Set("forwarder_recovery_interval", 1) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, 1, e.backoffPolicy.RecoveryInterval) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, 1, policy.RecoveryInterval) // Verify invalid values recover gracefully mockConfig.Set("forwarder_recovery_interval", 0) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, defaultValue, e.backoffPolicy.RecoveryInterval) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, defaultValue, policy.RecoveryInterval) // Verify reset error count mockConfig.Set("forwarder_recovery_reset", true) e = newBlockedEndpoints(mockConfig, log) - assert.Equal(t, e.backoffPolicy.MaxErrors, e.backoffPolicy.RecoveryInterval) + policy, ok = e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + assert.Equal(t, policy.MaxErrors, policy.RecoveryInterval) } // Test we increase delay on average @@ -145,7 +175,10 @@ func TestMaxGetBackoffDuration(t *testing.T) { e := newBlockedEndpoints(mockConfig, log) backoffDuration := e.getBackoffDuration(100) - assert.Equal(t, time.Duration(e.backoffPolicy.MaxBackoffTime)*time.Second, backoffDuration) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + + assert.Equal(t, time.Duration(policy.MaxBackoffTime)*time.Second, backoffDuration) } func TestMaxErrors(t *testing.T) { @@ -168,7 +201,10 @@ func TestMaxErrors(t *testing.T) { previousBackoffDuration = backoffDuration } - assert.Equal(t, e.backoffPolicy.MaxErrors, attempts) + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + + assert.Equal(t, policy.MaxErrors, attempts) } func TestBlock(t *testing.T) { @@ -193,10 +229,13 @@ func TestMaxBlock(t *testing.T) { e.close("test") now := time.Now() - maxBackoffDuration := time.Duration(e.backoffPolicy.MaxBackoffTime) * time.Second + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + + maxBackoffDuration := time.Duration(policy.MaxBackoffTime) * time.Second assert.Contains(t, e.errorPerEndpoint, "test") - assert.Equal(t, e.backoffPolicy.MaxErrors, e.errorPerEndpoint["test"].nbError) + assert.Equal(t, policy.MaxErrors, e.errorPerEndpoint["test"].nbError) assert.True(t, now.Add(maxBackoffDuration).After(e.errorPerEndpoint["test"].until) || now.Add(maxBackoffDuration).Equal(e.errorPerEndpoint["test"].until)) } @@ -214,7 +253,11 @@ func TestUnblock(t *testing.T) { e.close("test") e.recover("test") - assert.True(t, e.errorPerEndpoint["test"].nbError == int(math.Max(0, float64(5-e.backoffPolicy.RecoveryInterval)))) + + policy, ok := e.backoffPolicy.(*backoff.ExpBackoffPolicy) + assert.True(t, ok) + + assert.True(t, e.errorPerEndpoint["test"].nbError == int(math.Max(0, float64(5-policy.RecoveryInterval)))) } func TestMaxUnblock(t *testing.T) { diff --git a/go.mod b/go.mod index c3a2cb6d4635d..196ae05945e7b 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( github.com/DataDog/gopsutil v1.2.2 github.com/DataDog/nikos v1.12.0 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.3-0.20230720121352-2db90e0b9570 github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 github.com/DataDog/sketches-go v1.4.2 github.com/DataDog/viper v1.12.0 @@ -567,7 +567,8 @@ require ( require github.com/lorenzosaino/go-sysctl v0.3.1 require ( - github.com/DataDog/datadog-agent/pkg/proto v0.47.0-rc.3 + github.com/DataDog/agent-payload v4.89.0+incompatible + github.com/DataDog/datadog-agent/pkg/proto v0.47.0-rc.3.0.20230717151521-271965684571 github.com/aquasecurity/trivy v0.0.0-00010101000000-000000000000 github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820 github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6 @@ -652,9 +653,6 @@ replace ( // Fixes CVE-2023-1732, imported by nikos replace github.com/cloudflare/circl v1.1.0 => github.com/cloudflare/circl v1.3.3 -// Fixes CVE-2023-30551, imported by trivy -replace github.com/sigstore/rekor v1.0.1 => github.com/sigstore/rekor v1.1.1 - // Fixes CVE-2023-26054, imported by trivy replace github.com/moby/buildkit v0.11.0 => github.com/moby/buildkit v0.11.4 diff --git a/go.sum b/go.sum index 4497cc4fbe875..c69d9fc971dd4 100644 --- a/go.sum +++ b/go.sum @@ -123,6 +123,8 @@ github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CycloneDX/cyclonedx-go v0.7.1 h1:5w1SxjGm9MTMNTuRbEPyw21ObdbaagTWF/KfF0qHTRE= github.com/CycloneDX/cyclonedx-go v0.7.1/go.mod h1:N/nrdWQI2SIjaACyyDs/u7+ddCkyl/zkNs8xFsHF2Ps= +github.com/DataDog/agent-payload v4.89.0+incompatible h1:0C7DwBSz1iqkgwCp2Lwzzguu5dgcxNxLo9QeE4OCDJQ= +github.com/DataDog/agent-payload v4.89.0+incompatible/go.mod h1:/2RW4IC/2z54jtB6RLgq5UtVI1TsX0joDRjKbkLT+mk= github.com/DataDog/agent-payload/v5 v5.0.90-0.20230717070528-bfb5d051f2ab h1:Y0SYoy3CuSVUSKJ3U9a6zevj3AGUGqRctmCBVaSgGrs= github.com/DataDog/agent-payload/v5 v5.0.90-0.20230717070528-bfb5d051f2ab/go.mod h1:oQZi1VZp1e3QvlSUX4iphZCpJaFepUxWq0hNXxihKBM= github.com/DataDog/appsec-internal-go v0.0.0-20230215162203-5149228be86a h1:7ZiVdU4j19IYuy8rR0uUzC7I7HjWul61ZEyUgvLkZBM= @@ -163,8 +165,8 @@ github.com/DataDog/nikos v1.12.0/go.mod h1:vboQtY04KmE+Ua8m7gVheZJcnStQY+fIiSPY/ github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.5.2 h1:W47xIROVye+D6WxkZcy8ETomfZlTNWoVZODwAh4LdeE= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 h1:JRVQga0KlFCMyuKF/ghrZtRpmYL3XWRGXpSB5Qdk5Ko= github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2/go.mod h1:6x6OujLzkt7Wwlu/6kYO5+8FNRBi1HEw8Qm6/qvTOQA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2 h1:FbQSZ6uXhuHzgwC73MUxqvHwV0uxKiGAeAAZIMrfUAc= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.2/go.mod h1:oPpGMNpwga8zTGUJfLy3Z/u4l6bvEYuRatJkgSUazr4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.3-0.20230720121352-2db90e0b9570 h1:KcLkS0KO3hyPBfu3HD7W809hj77HVTrnmXxyHgiTeXI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.5.3-0.20230720121352-2db90e0b9570/go.mod h1:6OccRRoSKgE+FHOs5i0Zfa4QiXgVg+jiQdHmMQBMEKk= github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2 h1:C0uzQwHCKubfmbvaZF/Qi6ernigbcoWt9A+U+s0iQGg= github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.5.2/go.mod h1:RT78x34OmVb0wuZLtmzzRRy43+7pCCA6ZEOGQ9mA5w0= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= diff --git a/internal/tools/proto/go.mod b/internal/tools/proto/go.mod index 1785111b4814f..09a8f079024ec 100644 --- a/internal/tools/proto/go.mod +++ b/internal/tools/proto/go.mod @@ -7,6 +7,7 @@ require ( github.com/golang/mock v1.5.0 github.com/golang/protobuf v1.5.2 github.com/grpc-ecosystem/grpc-gateway v1.12.2 + github.com/planetscale/vtprotobuf v0.4.0 github.com/tinylib/msgp v1.1.6 google.golang.org/grpc v1.24.0 ) @@ -14,13 +15,12 @@ require ( require ( github.com/ghodss/yaml v1.0.0 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/philhofer/fwd v1.1.1 // indirect - golang.org/x/mod v0.3.0 // indirect - golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect - golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f // indirect - golang.org/x/text v0.3.3 // indirect - golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + github.com/philhofer/fwd v1.1.2 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/net v0.3.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + golang.org/x/tools v0.4.0 // indirect google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/internal/tools/proto/go.sum b/internal/tools/proto/go.sum index 439cd0cdc9d30..93ea4f8632304 100644 --- a/internal/tools/proto/go.sum +++ b/internal/tools/proto/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/favadi/protoc-go-inject-tag v1.4.0 h1:K3KXxbgRw5WT4f43LbglARGz/8jVsDOS7uMjG4oNvXY= github.com/favadi/protoc-go-inject-tag v1.4.0/go.mod h1:AZ+PK+QDKUOLlBRG0rYiKkUX5Hw7+7GTFzlU99GFSbQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -21,9 +22,15 @@ github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/grpc-ecosystem/grpc-gateway v1.12.2 h1:D0EVSTwQoQOyfY35QNSuPJA4jpZRtkoGYWQMB7XNg5o= github.com/grpc-ecosystem/grpc-gateway v1.12.2/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY= +github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -34,8 +41,9 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -43,8 +51,9 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -53,19 +62,22 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -88,5 +100,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/tools/proto/tools.go b/internal/tools/proto/tools.go index 985266391df97..743e27cca5c92 100644 --- a/internal/tools/proto/tools.go +++ b/internal/tools/proto/tools.go @@ -20,6 +20,7 @@ import ( _ "github.com/golang/mock/mockgen" _ "github.com/golang/protobuf/protoc-gen-go" _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" + _ "github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto" _ "github.com/tinylib/msgp" _ "google.golang.org/grpc" ) diff --git a/omnibus/config/patches/openscap/0005-Fix-leak-of-filename-in-oval_agent_new_session.patch b/omnibus/config/patches/openscap/0005-Fix-leak-of-filename-in-oval_agent_new_session.patch new file mode 100644 index 0000000000000..1c0cef22cf6ed --- /dev/null +++ b/omnibus/config/patches/openscap/0005-Fix-leak-of-filename-in-oval_agent_new_session.patch @@ -0,0 +1,24 @@ +From e07bee9290f5dde4456ecef98eed2ac7d2092d9c Mon Sep 17 00:00:00 2001 +From: David du Colombier +Date: Wed, 19 Jul 2023 08:53:23 +0200 +Subject: [PATCH 5/6] Fix leak of filename in oval_agent_new_session + +--- + src/OVAL/oval_agent.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/OVAL/oval_agent.c b/src/OVAL/oval_agent.c +index 1fbed8ae6..a4ee34d12 100644 +--- a/src/OVAL/oval_agent.c ++++ b/src/OVAL/oval_agent.c +@@ -112,6 +112,7 @@ oval_agent_session_t * oval_agent_new_session(struct oval_definition_model *mode + /* probe sysinfo */ + ret = oval_probe_query_sysinfo(ag_sess->psess, &sysinfo); + if (ret != 0) { ++ free(ag_sess->filename); + oval_probe_session_destroy(ag_sess->psess); + oval_syschar_model_free(ag_sess->sys_model); + free(ag_sess); +-- +2.34.1 + diff --git a/omnibus/config/patches/openscap/0006-Fix-leak-of-item-in-probe_item_collect.patch b/omnibus/config/patches/openscap/0006-Fix-leak-of-item-in-probe_item_collect.patch new file mode 100644 index 0000000000000..3d66a8e6c3918 --- /dev/null +++ b/omnibus/config/patches/openscap/0006-Fix-leak-of-item-in-probe_item_collect.patch @@ -0,0 +1,28 @@ +From 83592087efc58d72892d784cc709664ed91e81b3 Mon Sep 17 00:00:00 2001 +From: David du Colombier +Date: Wed, 19 Jul 2023 08:46:55 +0200 +Subject: [PATCH 6/6] Fix leak of item in probe_item_collect + +--- + src/OVAL/probes/probe/icache.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/src/OVAL/probes/probe/icache.c b/src/OVAL/probes/probe/icache.c +index a397d35ec..04ddbfb9e 100644 +--- a/src/OVAL/probes/probe/icache.c ++++ b/src/OVAL/probes/probe/icache.c +@@ -552,9 +552,11 @@ int probe_item_collect(struct probe_ctx *ctx, SEXP_t *item) + memcheck_ret = probe_cobj_memcheck(cobj_itemcnt, ctx->max_mem_ratio); + if (memcheck_ret == -1) { + dE("Failed to check available memory"); ++ SEXP_free(item); + return -1; + } + if (memcheck_ret == 1) { ++ SEXP_free(item); + + /* + * Don't set the message again if the collected object is +-- +2.34.1 + diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index 768329e6d359b..e3305dab65633 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -194,6 +194,7 @@ strip_exclude("*tracer*") strip_exclude("*offset-guess*") strip_exclude("*usm*") + strip_exclude("*shared-libraries*") strip_exclude("*runtime-security*") strip_exclude("*dns*") strip_exclude("*conntrack*") diff --git a/omnibus/config/software/openscap.rb b/omnibus/config/software/openscap.rb index 8fadb573d70e6..833cf0587327c 100644 --- a/omnibus/config/software/openscap.rb +++ b/omnibus/config/software/openscap.rb @@ -39,6 +39,10 @@ build do env = with_standard_compiler_flags(with_embedded_path) + # Fixes since release 1.3.8 + patch source: "0005-Fix-leak-of-filename-in-oval_agent_new_session.patch", env: env + patch source: "0006-Fix-leak-of-item-in-probe_item_collect.patch", env: env + patch source: "get_results_from_session.patch", env: env # add a function to retrieve results from session patch source: "session_result_reset.patch", env: env # add a function to reset results from session patch source: "session_reset_syschar.patch", env: env # also reset system characteristics diff --git a/omnibus/config/software/system-probe.rb b/omnibus/config/software/system-probe.rb index 4dc013659f951..4bc7eda1719d6 100644 --- a/omnibus/config/software/system-probe.rb +++ b/omnibus/config/software/system-probe.rb @@ -23,6 +23,8 @@ copy "#{ENV['SYSTEM_PROBE_BIN']}/system-probe", "#{install_dir}/embedded/bin/system-probe" copy "#{ENV['SYSTEM_PROBE_BIN']}/usm.o", "#{install_dir}/embedded/share/system-probe/ebpf/" copy "#{ENV['SYSTEM_PROBE_BIN']}/usm-debug.o", "#{install_dir}/embedded/share/system-probe/ebpf/" + copy "#{ENV['SYSTEM_PROBE_BIN']}/shared-libraries.o", "#{install_dir}/embedded/share/system-probe/ebpf/" + copy "#{ENV['SYSTEM_PROBE_BIN']}/shared-libraries-debug.o", "#{install_dir}/embedded/share/system-probe/ebpf/" copy "#{ENV['SYSTEM_PROBE_BIN']}/dns.o", "#{install_dir}/embedded/share/system-probe/ebpf/" copy "#{ENV['SYSTEM_PROBE_BIN']}/dns-debug.o", "#{install_dir}/embedded/share/system-probe/ebpf/" copy "#{ENV['SYSTEM_PROBE_BIN']}/tracer.o", "#{install_dir}/embedded/share/system-probe/ebpf/" @@ -44,8 +46,11 @@ copy "#{ENV['SYSTEM_PROBE_BIN']}/tracer-fentry-debug-co-re.o", "#{install_dir}/embedded/share/system-probe/ebpf/co-re/tracer-fentry-debug.o" copy "#{ENV['SYSTEM_PROBE_BIN']}/usm-co-re.o", "#{install_dir}/embedded/share/system-probe/ebpf/co-re/usm.o" copy "#{ENV['SYSTEM_PROBE_BIN']}/usm-debug-co-re.o", "#{install_dir}/embedded/share/system-probe/ebpf/co-re/usm-debug.o" + copy "#{ENV['SYSTEM_PROBE_BIN']}/shared-libraries-co-re.o", "#{install_dir}/embedded/share/system-probe/ebpf/co-re/shared-libraries.o" + copy "#{ENV['SYSTEM_PROBE_BIN']}/shared-libraries-debug-co-re.o", "#{install_dir}/embedded/share/system-probe/ebpf/co-re/shared-libraries-debug.o" copy "#{ENV['SYSTEM_PROBE_BIN']}/tracer.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" copy "#{ENV['SYSTEM_PROBE_BIN']}/usm.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" + copy "#{ENV['SYSTEM_PROBE_BIN']}/shared-libraries.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" copy "#{ENV['SYSTEM_PROBE_BIN']}/runtime-security.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" copy "#{ENV['SYSTEM_PROBE_BIN']}/conntrack.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" copy "#{ENV['SYSTEM_PROBE_BIN']}/oom-kill.c", "#{install_dir}/embedded/share/system-probe/ebpf/runtime/" diff --git a/pkg/clusteragent/externalmetrics/metrics_retriever.go b/pkg/clusteragent/externalmetrics/metrics_retriever.go index 599c8038be78c..761afcb433f62 100644 --- a/pkg/clusteragent/externalmetrics/metrics_retriever.go +++ b/pkg/clusteragent/externalmetrics/metrics_retriever.go @@ -30,7 +30,7 @@ const ( // Backoff range for number of retries R: // For R < 6 random(2^(R-1) * 30, 2^R * 30) // Otherwise 1800sec -var backoffPolicy backoff.Policy = backoff.NewPolicy(2, 30, 1800, 2, false) +var backoffPolicy backoff.Policy = backoff.NewExpBackoffPolicy(2, 30, 1800, 2, false) // MetricsRetriever is responsible for querying and storing external metrics type MetricsRetriever struct { diff --git a/pkg/config/config.go b/pkg/config/config.go index 50cd1c4078ae9..4467bdef0e368 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -1207,6 +1207,7 @@ func InitConfig(config Config) { config.BindEnvAndSetDefault("serverless.trace_enabled", false, "DD_TRACE_ENABLED") config.BindEnvAndSetDefault("serverless.trace_managed_services", true, "DD_TRACE_MANAGED_SERVICES") config.BindEnvAndSetDefault("serverless.service_mapping", nil, "DD_SERVICE_MAPPING") + config.BindEnvAndSetDefault("serverless.constant_backoff_interval", 100*time.Millisecond) // trace-agent's evp_proxy config.BindEnv("evp_proxy_config.enabled") diff --git a/pkg/config/remote/api/http.go b/pkg/config/remote/api/http.go index fa8c0eba396aa..8219a9f78f97e 100644 --- a/pkg/config/remote/api/http.go +++ b/pkg/config/remote/api/http.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/utils" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/config/remote/client.go b/pkg/config/remote/client.go index 13ac98de586ed..98c2710dcb126 100644 --- a/pkg/config/remote/client.go +++ b/pkg/config/remote/client.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/config/remote/meta" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/backoff" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -160,7 +160,7 @@ func newClient(agentName string, updater ConfigUpdater, doTufVerification bool, // // The following values mean each range will always be [pollInterval*2^, min(maxBackoffTime, pollInterval*2^)]. // Every success will cause numErrors to shrink by 2. - backoffPolicy := backoff.NewPolicy(minBackoffFactor, pollInterval.Seconds(), + backoffPolicy := backoff.NewExpBackoffPolicy(minBackoffFactor, pollInterval.Seconds(), maximalMaxBackoffTime.Seconds(), recoveryInterval, false) // If we're the cluster agent, we want to report our cluster name and cluster ID in order to allow products diff --git a/pkg/config/remote/service/clients.go b/pkg/config/remote/service/clients.go index d53bbffe62ebf..82a0522bebe3a 100644 --- a/pkg/config/remote/service/clients.go +++ b/pkg/config/remote/service/clients.go @@ -11,7 +11,7 @@ import ( "github.com/benbjohnson/clock" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) type client struct { diff --git a/pkg/config/remote/service/clients_test.go b/pkg/config/remote/service/clients_test.go index 375d17b7b09ec..6248e3926c8f0 100644 --- a/pkg/config/remote/service/clients_test.go +++ b/pkg/config/remote/service/clients_test.go @@ -12,7 +12,7 @@ import ( "github.com/benbjohnson/clock" "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) func TestClients(t *testing.T) { diff --git a/pkg/config/remote/service/service.go b/pkg/config/remote/service/service.go index 26e3422e9592b..f762cdf04901c 100644 --- a/pkg/config/remote/service/service.go +++ b/pkg/config/remote/service/service.go @@ -33,7 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/remote/meta" "github.com/DataDog/datadog-agent/pkg/config/remote/telemetry" "github.com/DataDog/datadog-agent/pkg/config/remote/uptane" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/backoff" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -174,7 +174,7 @@ func NewService() (*Service, error) { recoveryInterval := 2 recoveryReset := false - backoffPolicy := backoff.NewPolicy(minBackoffFactor, baseBackoffTime, + backoffPolicy := backoff.NewExpBackoffPolicy(minBackoffFactor, baseBackoffTime, maxBackoffTime.Seconds(), recoveryInterval, recoveryReset) apiKey := config.Datadog.GetString("api_key") diff --git a/pkg/config/remote/service/service_test.go b/pkg/config/remote/service/service_test.go index a357c9d2d7baf..99ec4bf531043 100644 --- a/pkg/config/remote/service/service_test.go +++ b/pkg/config/remote/service/service_test.go @@ -25,7 +25,7 @@ import ( rdata "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/config/remote/uptane" "github.com/DataDog/datadog-agent/pkg/proto/msgpgo" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/version" ) diff --git a/pkg/config/remote/service/tracer_predicates.go b/pkg/config/remote/service/tracer_predicates.go index 453e37cdfa275..5173b5926847d 100644 --- a/pkg/config/remote/service/tracer_predicates.go +++ b/pkg/config/remote/service/tracer_predicates.go @@ -13,7 +13,7 @@ import ( "github.com/Masterminds/semver" rdata "github.com/DataDog/datadog-agent/pkg/config/remote/data" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) // ConfigFileMetaCustom is the custom metadata of a config diff --git a/pkg/config/remote/service/tracer_predicates_test.go b/pkg/config/remote/service/tracer_predicates_test.go index 0fab077b3354a..0412f79d10b64 100644 --- a/pkg/config/remote/service/tracer_predicates_test.go +++ b/pkg/config/remote/service/tracer_predicates_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) func TestTracerPredicateBadTracerVersion(t *testing.T) { diff --git a/pkg/config/remote/service/util.go b/pkg/config/remote/service/util.go index 8aa465c109b28..37452735b9a0b 100644 --- a/pkg/config/remote/service/util.go +++ b/pkg/config/remote/service/util.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/config/remote/uptane" "github.com/DataDog/datadog-agent/pkg/proto/msgpgo" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" ) diff --git a/pkg/config/remote/uptane/client.go b/pkg/config/remote/uptane/client.go index e28740906d596..03c2fe7a332f8 100644 --- a/pkg/config/remote/uptane/client.go +++ b/pkg/config/remote/uptane/client.go @@ -18,7 +18,7 @@ import ( "go.etcd.io/bbolt" rdata "github.com/DataDog/datadog-agent/pkg/config/remote/data" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) // Client is an uptane client diff --git a/pkg/config/remote/uptane/client_test.go b/pkg/config/remote/uptane/client_test.go index 80a2511a59b73..3c3370c214d45 100644 --- a/pkg/config/remote/uptane/client_test.go +++ b/pkg/config/remote/uptane/client_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/meta" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) func getTestOrgUUIDProvider(orgID int) OrgUUIDProvider { diff --git a/pkg/config/remote/uptane/remote_store.go b/pkg/config/remote/uptane/remote_store.go index 47413a4bfbb50..0286057ce1474 100644 --- a/pkg/config/remote/uptane/remote_store.go +++ b/pkg/config/remote/uptane/remote_store.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/go-tuf/client" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) type role string diff --git a/pkg/config/remote/uptane/remote_store_test.go b/pkg/config/remote/uptane/remote_store_test.go index 0f403b25eb9bd..ed34fd4dd1e0f 100644 --- a/pkg/config/remote/uptane/remote_store_test.go +++ b/pkg/config/remote/uptane/remote_store_test.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/go-tuf/client" "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) func generateUpdate(baseVersion uint64) *pbgo.LatestConfigsResponse { diff --git a/pkg/config/remote/uptane/target_store.go b/pkg/config/remote/uptane/target_store.go index d6ac66bc2f1bd..d8caebc0552ed 100644 --- a/pkg/config/remote/uptane/target_store.go +++ b/pkg/config/remote/uptane/target_store.go @@ -8,7 +8,7 @@ package uptane import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) // targetStore persists all the target files present in the current director targets.json diff --git a/pkg/config/remote/uptane/target_store_test.go b/pkg/config/remote/uptane/target_store_test.go index 59e1147dceb6c..d1b3d7ea29b90 100644 --- a/pkg/config/remote/uptane/target_store_test.go +++ b/pkg/config/remote/uptane/target_store_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) func TestTargetStore(t *testing.T) { diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index a16019767e68f..cdf67a41116b9 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -19,7 +19,7 @@ import ( flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util" agentgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/fatih/color" diff --git a/pkg/logs/client/http/destination.go b/pkg/logs/client/http/destination.go index f951abab25252..f443f99fb865e 100644 --- a/pkg/logs/client/http/destination.go +++ b/pkg/logs/client/http/destination.go @@ -18,6 +18,7 @@ import ( "sync" "time" + coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/config" "github.com/DataDog/datadog-agent/pkg/logs/internal/metrics" @@ -111,9 +112,19 @@ func newDestination(endpoint config.Endpoint, if maxConcurrentBackgroundSends <= 0 { maxConcurrentBackgroundSends = 1 } - + var policy backoff.Policy if endpoint.Origin == config.ServerlessIntakeOrigin { - shouldRetry = false + policy = backoff.NewConstantBackoffPolicy( + coreConfig.Datadog.GetDuration("serverless.constant_backoff_interval"), + ) + } else { + policy = backoff.NewExpBackoffPolicy( + endpoint.BackoffFactor, + endpoint.BackoffBase, + endpoint.BackoffMax, + endpoint.RecoveryInterval, + endpoint.RecoveryReset, + ) } expVars := &expvar.Map{} @@ -123,14 +134,6 @@ func newDestination(endpoint config.Endpoint, metrics.DestinationExpVars.Set(telemetryName, expVars) } - policy := backoff.NewPolicy( - endpoint.BackoffFactor, - endpoint.BackoffBase, - endpoint.BackoffMax, - endpoint.RecoveryInterval, - endpoint.RecoveryReset, - ) - return &Destination{ host: endpoint.Host, url: buildURL(endpoint), diff --git a/pkg/logs/client/http/destination_test.go b/pkg/logs/client/http/destination_test.go index e85e85ccb6a29..598039ec867b0 100644 --- a/pkg/logs/client/http/destination_test.go +++ b/pkg/logs/client/http/destination_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" + coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -324,10 +325,10 @@ func TestBackoffDelayDisabled(t *testing.T) { server.Stop() } -func TestBackoffDelayDisabledServerless(t *testing.T) { +func TestBackoffShouldBeConstantServerless(t *testing.T) { dest := NewDestination(config.Endpoint{ Origin: "lambda-extension", }, "", nil, 0, true, "") - assert.False(t, dest.shouldRetry) + assert.Equal(t, dest.backoff.GetBackoffDuration(0), coreConfig.Datadog.GetDuration("serverless.constant_backoff_interval")) } diff --git a/pkg/logs/internal/pb/agent_logs_payload.pb.go b/pkg/logs/internal/pb/agent_logs_payload.pb.go deleted file mode 100644 index 8d2e9bdd1ec6a..0000000000000 --- a/pkg/logs/internal/pb/agent_logs_payload.pb.go +++ /dev/null @@ -1,595 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: agent_logs_payload.proto - -/* - Package pb is a generated protocol buffer package. - - It is generated from these files: - agent_logs_payload.proto - - It has these top-level messages: - Log -*/ -package pb - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Log struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // from host - Hostname string `protobuf:"bytes,4,opt,name=hostname,proto3" json:"hostname,omitempty"` - // from config - Service string `protobuf:"bytes,5,opt,name=service,proto3" json:"service,omitempty"` - Source string `protobuf:"bytes,6,opt,name=source,proto3" json:"source,omitempty"` - // from config, container tags, ... - Tags []string `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` -} - -func (m *Log) Reset() { *m = Log{} } -func (m *Log) String() string { return proto.CompactTextString(m) } -func (*Log) ProtoMessage() {} -func (*Log) Descriptor() ([]byte, []int) { return fileDescriptorAgentLogsPayload, []int{0} } - -func (m *Log) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Log) GetStatus() string { - if m != nil { - return m.Status - } - return "" -} - -func (m *Log) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *Log) GetHostname() string { - if m != nil { - return m.Hostname - } - return "" -} - -func (m *Log) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *Log) GetSource() string { - if m != nil { - return m.Source - } - return "" -} - -func (m *Log) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -func init() { - proto.RegisterType((*Log)(nil), "pb.Log") -} -func (m *Log) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Log) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Message) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - } - if len(m.Status) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - } - if m.Timestamp != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(m.Timestamp)) - } - if len(m.Hostname) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - } - if len(m.Service) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.Source) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintAgentLogsPayload(dAtA, i, uint64(len(m.Source))) - i += copy(dAtA[i:], m.Source) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintAgentLogsPayload(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Log) Size() (n int) { - var l int - _ = l - l = len(m.Message) - if l > 0 { - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - l = len(m.Status) - if l > 0 { - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - if m.Timestamp != 0 { - n += 1 + sovAgentLogsPayload(uint64(m.Timestamp)) - } - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - l = len(m.Source) - if l > 0 { - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sovAgentLogsPayload(uint64(l)) - } - } - return n -} - -func sovAgentLogsPayload(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAgentLogsPayload(x uint64) (n int) { - return sovAgentLogsPayload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Log) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAgentLogsPayload - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAgentLogsPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAgentLogsPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAgentLogsPayload(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthAgentLogsPayload - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAgentLogsPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAgentLogsPayload(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAgentLogsPayload = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAgentLogsPayload = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("agent_logs_payload.proto", fileDescriptorAgentLogsPayload) } - -var fileDescriptorAgentLogsPayload = []byte{ - // 249 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0xcd, 0x4a, 0xc3, 0x40, - 0x14, 0x85, 0x99, 0xa6, 0xa6, 0x66, 0x10, 0x84, 0x59, 0xc8, 0x20, 0x12, 0x82, 0x6e, 0xb2, 0x31, - 0x59, 0xf8, 0x04, 0x96, 0x2e, 0x5d, 0x94, 0x2c, 0xdd, 0x94, 0x3b, 0xc9, 0x30, 0x0d, 0x74, 0x7a, - 0x87, 0xdc, 0x1b, 0xc1, 0x27, 0xf3, 0x15, 0x5c, 0xfa, 0x08, 0x92, 0x27, 0x91, 0x4e, 0xa3, 0xee, - 0xce, 0x37, 0x3f, 0x7c, 0x9c, 0x23, 0x35, 0x38, 0x7b, 0xe4, 0xdd, 0x01, 0x1d, 0xed, 0x02, 0xbc, - 0x1f, 0x10, 0xba, 0x2a, 0x0c, 0xc8, 0xa8, 0x16, 0xc1, 0xdc, 0x7f, 0x08, 0x99, 0xbc, 0xa0, 0x53, - 0x5a, 0xae, 0xbc, 0x25, 0x02, 0x67, 0xb5, 0x28, 0x44, 0x99, 0x35, 0xbf, 0xa8, 0x6e, 0x64, 0x4a, - 0x0c, 0x3c, 0x92, 0x5e, 0xc4, 0x8b, 0x99, 0xd4, 0x9d, 0xcc, 0xb8, 0xf7, 0x96, 0x18, 0x7c, 0xd0, - 0x49, 0x21, 0xca, 0xa4, 0xf9, 0x3f, 0x50, 0xb7, 0xf2, 0x72, 0x8f, 0xc4, 0x47, 0xf0, 0x56, 0x2f, - 0xe3, 0xbf, 0x3f, 0x3e, 0xb9, 0xc8, 0x0e, 0x6f, 0x7d, 0x6b, 0xf5, 0xc5, 0xd9, 0x35, 0x63, 0x74, - 0xe1, 0x38, 0xb4, 0x56, 0xa7, 0xb3, 0x2b, 0x92, 0x52, 0x72, 0xc9, 0xe0, 0x48, 0xaf, 0x8a, 0xa4, - 0xcc, 0x9a, 0x98, 0xd7, 0xcd, 0xe7, 0x94, 0x8b, 0xaf, 0x29, 0x17, 0xdf, 0x53, 0x2e, 0xe4, 0x75, - 0x8b, 0xbe, 0xea, 0xba, 0x2a, 0x96, 0xad, 0x82, 0x59, 0x5f, 0x3d, 0x9f, 0xd2, 0xf6, 0x5c, 0x78, - 0x2b, 0x5e, 0x1f, 0x5c, 0xcf, 0xfb, 0xd1, 0x54, 0x2d, 0xfa, 0x7a, 0x03, 0x0c, 0x1b, 0x74, 0x75, - 0x7c, 0xfc, 0x38, 0x8f, 0x52, 0x07, 0x63, 0xd2, 0x38, 0xcc, 0xd3, 0x4f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x11, 0x4f, 0x88, 0xb8, 0x34, 0x01, 0x00, 0x00, -} diff --git a/pkg/logs/internal/processor/encoder_test.go b/pkg/logs/internal/processor/encoder_test.go index 439276e766664..dc64892f915b6 100644 --- a/pkg/logs/internal/processor/encoder_test.go +++ b/pkg/logs/internal/processor/encoder_test.go @@ -15,8 +15,9 @@ import ( "github.com/stretchr/testify/assert" + "github.com/DataDog/agent-payload/pb" + "github.com/DataDog/datadog-agent/pkg/logs/config" - "github.com/DataDog/datadog-agent/pkg/logs/internal/pb" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" ) diff --git a/pkg/logs/internal/processor/proto.go b/pkg/logs/internal/processor/proto.go index 806c987361f6e..e27b40caae143 100644 --- a/pkg/logs/internal/processor/proto.go +++ b/pkg/logs/internal/processor/proto.go @@ -8,7 +8,7 @@ package processor import ( "time" - "github.com/DataDog/datadog-agent/pkg/logs/internal/pb" + "github.com/DataDog/agent-payload/pb" "github.com/DataDog/datadog-agent/pkg/logs/message" ) diff --git a/pkg/network/ebpf/bpf_module.go b/pkg/network/ebpf/bpf_module.go index ebb76e36f90e2..9a49a8ac9d008 100644 --- a/pkg/network/ebpf/bpf_module.go +++ b/pkg/network/ebpf/bpf_module.go @@ -51,6 +51,11 @@ func ReadHTTPModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { return readModule(bpfDir, "usm", debug) } +// ReadSharedLibrariesModule from the asset file +func ReadSharedLibrariesModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { + return readModule(bpfDir, "shared-libraries", debug) +} + // ReadDNSModule from the asset file func ReadDNSModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { return readModule(bpfDir, "dns", debug) diff --git a/pkg/network/ebpf/c/prebuilt/shared-libraries.c b/pkg/network/ebpf/c/prebuilt/shared-libraries.c new file mode 100644 index 0000000000000..355e048ebb6f9 --- /dev/null +++ b/pkg/network/ebpf/c/prebuilt/shared-libraries.c @@ -0,0 +1,13 @@ +#include "kconfig.h" +#include "bpf_tracing.h" +#include "bpf_telemetry.h" +#include "bpf_builtins.h" + +#include + +#include "shared-libraries/types.h" +#include "shared-libraries/maps.h" +// all probes are shared among prebuilt and runtime, and can be found here +#include "shared-libraries/probes.h" + +char _license[] SEC("license") = "GPL"; diff --git a/pkg/network/ebpf/c/prebuilt/usm.c b/pkg/network/ebpf/c/prebuilt/usm.c index f16b18c45abd1..7be8da168fed4 100644 --- a/pkg/network/ebpf/c/prebuilt/usm.c +++ b/pkg/network/ebpf/c/prebuilt/usm.c @@ -13,7 +13,6 @@ #include "protocols/tls/java/erpc_dispatcher.h" #include "protocols/tls/java/erpc_handlers.h" #include "protocols/tls/https.h" -#include "protocols/tls/sowatcher.h" #include "protocols/tls/tags-types.h" SEC("socket/protocol_dispatcher") diff --git a/pkg/network/ebpf/c/protocols/http/maps.h b/pkg/network/ebpf/c/protocols/http/maps.h index 21f87ba11b3a4..d914199aae142 100644 --- a/pkg/network/ebpf/c/protocols/http/maps.h +++ b/pkg/network/ebpf/c/protocols/http/maps.h @@ -6,7 +6,6 @@ #include "protocols/http/types.h" #include "protocols/tls/go-tls-types.h" -#include "protocols/tls/sowatcher-types.h" /* This map is used to keep track of in-flight HTTP transactions for each TCP connection */ BPF_HASH_MAP(http_in_flight, conn_tuple_t, http_transaction_t, 0) @@ -27,8 +26,6 @@ BPF_LRU_MAP(fd_by_ssl_bio, __u32, void *, 1024) BPF_LRU_MAP(ssl_ctx_by_pid_tgid, __u64, void *, 1024) -BPF_LRU_MAP(open_at_args, __u64, lib_path_t, 1024) - // offsets_data map contains the information about the locations of structs in the inspected binary, mapped by the binary's inode number. BPF_HASH_MAP(offsets_data, go_tls_offsets_data_key_t, tls_offsets_data_t, 1024) @@ -46,7 +43,4 @@ BPF_LRU_MAP(go_tls_write_args, go_tls_function_args_key_t, go_tls_write_args_dat if goTLS is enabled. */ BPF_HASH_MAP(conn_tup_by_go_tls_conn, __u32, conn_tuple_t, 1) -/* This map used for notifying userspace of a shared library being loaded */ -BPF_PERF_EVENT_ARRAY_MAP(shared_libraries, __u32) - #endif diff --git a/pkg/network/ebpf/c/runtime/shared-libraries.c b/pkg/network/ebpf/c/runtime/shared-libraries.c new file mode 100644 index 0000000000000..982fa6169ed17 --- /dev/null +++ b/pkg/network/ebpf/c/runtime/shared-libraries.c @@ -0,0 +1,15 @@ +#include "bpf_tracing.h" +#include "bpf_builtins.h" + +#include "ktypes.h" +#ifdef COMPILE_RUNTIME +#include "kconfig.h" +#include +#endif + +#include "shared-libraries/types.h" +#include "shared-libraries/maps.h" +// all probes are shared among prebuilt and runtime, and can be found here +#include "shared-libraries/probes.h" + +char _license[] SEC("license") = "GPL"; diff --git a/pkg/network/ebpf/c/runtime/usm.c b/pkg/network/ebpf/c/runtime/usm.c index cde36a88e90c9..a4e5f6cb80696 100644 --- a/pkg/network/ebpf/c/runtime/usm.c +++ b/pkg/network/ebpf/c/runtime/usm.c @@ -24,7 +24,6 @@ #include "protocols/tls/go-tls-location.h" #include "protocols/tls/go-tls-conn.h" #include "protocols/tls/https.h" -#include "protocols/tls/sowatcher.h" #include "protocols/tls/tags-types.h" // The entrypoint for all packets classification & decoding in universal service monitoring. diff --git a/pkg/network/ebpf/c/shared-libraries/maps.h b/pkg/network/ebpf/c/shared-libraries/maps.h new file mode 100644 index 0000000000000..edd53a0c2c532 --- /dev/null +++ b/pkg/network/ebpf/c/shared-libraries/maps.h @@ -0,0 +1,12 @@ +#ifndef __SHARED_LIBRARIES_MAPS_H +#define __SHARED_LIBRARIES_MAPS_H + +#include "shared-libraries/types.h" +#include "map-defs.h" + +BPF_LRU_MAP(open_at_args, __u64, lib_path_t, 1024) + +/* This map used for notifying userspace of a shared library being loaded */ +BPF_PERF_EVENT_ARRAY_MAP(shared_libraries, __u32) + +#endif diff --git a/pkg/network/ebpf/c/protocols/tls/sowatcher.h b/pkg/network/ebpf/c/shared-libraries/probes.h similarity index 90% rename from pkg/network/ebpf/c/protocols/tls/sowatcher.h rename to pkg/network/ebpf/c/shared-libraries/probes.h index 56f4e84c99484..e8e55f947acfc 100644 --- a/pkg/network/ebpf/c/protocols/tls/sowatcher.h +++ b/pkg/network/ebpf/c/shared-libraries/probes.h @@ -1,7 +1,7 @@ -#ifndef __SOWATCHER_H -#define __SOWATCHER_H +#ifndef __SHARED_LIBRARIES_PROBES_H +#define __SHARED_LIBRARIES_PROBES_H -#include "protocols/tls/sowatcher-types.h" +#include "shared-libraries/types.h" static __always_inline void fill_path_safe(lib_path_t *path, const char *path_argument) { #pragma unroll @@ -16,7 +16,7 @@ static __always_inline void fill_path_safe(lib_path_t *path, const char *path_ar static __always_inline void do_sys_open_helper_enter(const char *filename) { lib_path_t path = {0}; - if (bpf_probe_read_user_with_telemetry(path.buf, sizeof(path.buf), filename) >= 0) { + if (bpf_probe_read_user(path.buf, sizeof(path.buf), filename) >= 0) { // Find the null character and clean up the garbage following it #pragma unroll for (int i = 0; i < LIB_PATH_MAX_SIZE; i++) { @@ -37,7 +37,7 @@ static __always_inline void do_sys_open_helper_enter(const char *filename) { u64 pid_tgid = bpf_get_current_pid_tgid(); path.pid = pid_tgid >> 32; - bpf_map_update_with_telemetry(open_at_args, &pid_tgid, &path, BPF_ANY); + bpf_map_update_elem(&open_at_args, &pid_tgid, &path, BPF_ANY); return; } @@ -84,7 +84,7 @@ static __always_inline void do_sys_open_helper_exit(exit_sys_openat_ctx *args) { } u32 cpu = bpf_get_smp_processor_id(); - bpf_perf_event_output_with_telemetry((void*)args, &shared_libraries, cpu, path, sizeof(lib_path_t)); + bpf_perf_event_output((void*)args, &shared_libraries, cpu, path, sizeof(lib_path_t)); cleanup: bpf_map_delete_elem(&open_at_args, &pid_tgid); return; diff --git a/pkg/network/ebpf/c/protocols/tls/sowatcher-types.h b/pkg/network/ebpf/c/shared-libraries/types.h similarity index 92% rename from pkg/network/ebpf/c/protocols/tls/sowatcher-types.h rename to pkg/network/ebpf/c/shared-libraries/types.h index 54057ae6a9ebe..ef234b0af9181 100644 --- a/pkg/network/ebpf/c/protocols/tls/sowatcher-types.h +++ b/pkg/network/ebpf/c/shared-libraries/types.h @@ -1,5 +1,5 @@ -#ifndef __SOWATCHER_TYPES_H -#define __SOWATCHER_TYPES_H +#ifndef __SHARED_LIBRARIES_TYPES_H +#define __SHARED_LIBRARIES_TYPES_H #include "ktypes.h" diff --git a/pkg/network/protocols/http/types.go b/pkg/network/protocols/http/types.go index fbe9d89705158..85badcfdb43e4 100644 --- a/pkg/network/protocols/http/types.go +++ b/pkg/network/protocols/http/types.go @@ -9,7 +9,6 @@ package http /* #include "../../ebpf/c/protocols/tls/tags-types.h" -#include "../../ebpf/c/protocols/tls/sowatcher-types.h" #include "../../ebpf/c/protocols/http/types.h" #include "../../ebpf/c/protocols/classification/defs.h" */ @@ -21,12 +20,8 @@ type SslReadArgs C.ssl_read_args_t type EbpfTx C.http_transaction_t -type LibPath C.lib_path_t - const ( BufferSize = C.HTTP_BUFFER_SIZE - - libPathMaxSize = C.LIB_PATH_MAX_SIZE ) type ConnTag = uint64 diff --git a/pkg/network/protocols/http/types_linux.go b/pkg/network/protocols/http/types_linux.go index e27665f9fab1a..cd42c76e9a980 100644 --- a/pkg/network/protocols/http/types_linux.go +++ b/pkg/network/protocols/http/types_linux.go @@ -35,16 +35,8 @@ type EbpfTx struct { Tags uint64 } -type LibPath struct { - Pid uint32 - Len uint32 - Buf [120]byte -} - const ( BufferSize = 0xa0 - - libPathMaxSize = 0x78 ) type ConnTag = uint64 diff --git a/pkg/network/protocols/kafka/statkeeper.go b/pkg/network/protocols/kafka/statkeeper.go index bf0d3213e53e6..30ff827ec8037 100644 --- a/pkg/network/protocols/kafka/statkeeper.go +++ b/pkg/network/protocols/kafka/statkeeper.go @@ -11,6 +11,7 @@ import ( "sync" "github.com/DataDog/datadog-agent/pkg/network/config" + "github.com/DataDog/datadog-agent/pkg/util/log" ) type StatKeeper struct { @@ -65,6 +66,11 @@ func (statKeeper *StatKeeper) GetAndResetAllStats() map[Key]*RequestStat { } func (statKeeper *StatKeeper) extractTopicName(tx *EbpfTx) string { + // Limit tx.Topic_name_size to not exceed the actual length of tx.Topic_name + if tx.Topic_name_size > uint16(len(tx.Topic_name)) { + log.Debugf("Topic name size was changed from %d, to size: %d", tx.Topic_name_size, len(tx.Topic_name)) + tx.Topic_name_size = uint16(len(tx.Topic_name)) + } b := tx.Topic_name[:tx.Topic_name_size] // the trick here is that the Go runtime doesn't allocate the string used in diff --git a/pkg/network/protocols/kafka/statkeeper_test.go b/pkg/network/protocols/kafka/statkeeper_test.go index 347a6fd314fc7..8df24a5f2ad47 100644 --- a/pkg/network/protocols/kafka/statkeeper_test.go +++ b/pkg/network/protocols/kafka/statkeeper_test.go @@ -8,6 +8,7 @@ package kafka import ( + "strings" "testing" "github.com/DataDog/datadog-agent/pkg/network/config" @@ -31,3 +32,39 @@ func BenchmarkStatKeeperSameTX(b *testing.B) { sk.Process(tx) } } + +func TestStatKeeper_extractTopicName(t *testing.T) { + tests := []struct { + name string + tx *EbpfTx + want string + }{ + { + name: "slice bigger then Topic_name", + tx: &EbpfTx{ + Topic_name: [80]byte{}, + Topic_name_size: 85, + }, + want: strings.Repeat("*", 80), + }, + { + name: "slice smaller then Topic_name", + tx: &EbpfTx{ + Topic_name: [80]byte{}, + Topic_name_size: 60, + }, + want: strings.Repeat("*", 60), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + statKeeper := &StatKeeper{ + topicNames: map[string]string{}, + } + copy(tt.tx.Topic_name[:], strings.Repeat("*", len(tt.tx.Topic_name))) + if got := statKeeper.extractTopicName(tt.tx); len(got) != len(tt.want) { + t.Errorf("extractTopicName() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index 73a1b611fe29f..b0db9c701a9f2 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -34,6 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/http/gotls/lookup" libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" errtelemetry "github.com/DataDog/datadog-agent/pkg/network/telemetry" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" "github.com/DataDog/datadog-agent/pkg/process/monitor" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -43,6 +44,10 @@ const ( goTLSReadArgsMap = "go_tls_read_args" goTLSWriteArgsMap = "go_tls_write_args" connectionTupleByGoTLSMap = "conn_tup_by_go_tls_conn" + + // The interval of the periodic scan for terminated processes. Increasing the interval, might cause larger spikes in cpu + // and lowering it might cause constant cpu usage. + scanTerminatedProcessesInterval = 30 * time.Second ) type uprobeInfo struct { @@ -496,7 +501,7 @@ func (p *GoTLSProgram) removeInspectionResultFromMap(binID binaryID) { } func (p *GoTLSProgram) attachHooks(result *bininspect.Result, binPath string) (probeIDs []manager.ProbeIdentificationPair, err error) { - pathID, err := newPathIdentifier(binPath) + pathID, err := sharedlibraries.NewPathIdentifier(binPath) if err != nil { return probeIDs, fmt.Errorf("can't create path identifier for path %s : %s", binPath, err) } diff --git a/pkg/network/usm/ebpf_main.go b/pkg/network/usm/ebpf_main.go index f1d29674cb80d..9e1aefb39cf9e 100644 --- a/pkg/network/usm/ebpf_main.go +++ b/pkg/network/usm/ebpf_main.go @@ -145,7 +145,7 @@ func newEBPFProgram(c *config.Config, connectionProtocolMap, sockFD *ebpf.Map, b if javaTLSProg != nil { subprograms = append(subprograms, javaTLSProg) } - openSSLProg := newSSLProgram(c, sockFD) + openSSLProg := newSSLProgram(c, mgr, sockFD, bpfTelemetry) subprogramProbesResolvers = append(subprogramProbesResolvers, openSSLProg) if openSSLProg != nil { subprograms = append(subprograms, openSSLProg) diff --git a/pkg/network/usm/ebpf_ssl.go b/pkg/network/usm/ebpf_ssl.go index 2fad5b8bcaae7..fce0d33a931a3 100644 --- a/pkg/network/usm/ebpf_ssl.go +++ b/pkg/network/usm/ebpf_ssl.go @@ -10,7 +10,6 @@ package usm import ( "debug/elf" "fmt" - "os" "regexp" "strings" @@ -18,14 +17,13 @@ import ( "github.com/cilium/ebpf" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" errtelemetry "github.com/DataDog/datadog-agent/pkg/network/telemetry" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" "github.com/DataDog/datadog-agent/pkg/util/common" - "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -197,38 +195,48 @@ var gnuTLSProbes = []manager.ProbesSelector{ const ( sslSockByCtxMap = "ssl_sock_by_ctx" sharedLibrariesPerfMap = "shared_libraries" - - // probe used for streaming shared library events - openatSysCall = "openat" - openat2SysCall = "openat2" -) - -var ( - traceTypes = []string{"enter", "exit"} ) type sslProgram struct { - cfg *config.Config - sockFDMap *ebpf.Map - perfHandler *ddebpf.PerfHandler - perfMap *manager.PerfMap - watcher *soWatcher - manager *errtelemetry.Manager - sysOpenHooksIdentifiers []manager.ProbeIdentificationPair + cfg *config.Config + sockFDMap *ebpf.Map + manager *errtelemetry.Manager + watcher *sharedlibraries.Watcher } var _ subprogram = &sslProgram{} -func newSSLProgram(c *config.Config, sockFDMap *ebpf.Map) *sslProgram { +func newSSLProgram(c *config.Config, m *manager.Manager, sockFDMap *ebpf.Map, bpfTelemetry *errtelemetry.EBPFTelemetry) *sslProgram { if !c.EnableHTTPSMonitoring || !http.HTTPSSupported(c) { return nil } + watcher, err := sharedlibraries.NewWatcher(c, + sharedlibraries.Rule{ + Re: regexp.MustCompile(`libssl.so`), + RegisterCB: addHooks(m, openSSLProbes), + UnregisterCB: removeHooks(m, openSSLProbes), + }, + sharedlibraries.Rule{ + Re: regexp.MustCompile(`libcrypto.so`), + RegisterCB: addHooks(m, cryptoProbes), + UnregisterCB: removeHooks(m, cryptoProbes), + }, + sharedlibraries.Rule{ + Re: regexp.MustCompile(`libgnutls.so`), + RegisterCB: addHooks(m, gnuTLSProbes), + UnregisterCB: removeHooks(m, gnuTLSProbes), + }, + ) + if err != nil { + log.Errorf("error initializating shared library watcher: %s", err) + return nil + } + return &sslProgram{ - cfg: c, - sockFDMap: sockFDMap, - perfHandler: ddebpf.NewPerfHandler(100), - sysOpenHooksIdentifiers: getSysOpenHooksIdentifiers(), + cfg: c, + sockFDMap: sockFDMap, + watcher: watcher, } } @@ -242,28 +250,6 @@ func (o *sslProgram) IsBuildModeSupported(_ buildMode) bool { func (o *sslProgram) ConfigureManager(m *errtelemetry.Manager) { o.manager = m - - o.perfMap = &manager.PerfMap{ - Map: manager.Map{Name: sharedLibrariesPerfMap}, - PerfMapOptions: manager.PerfMapOptions{ - PerfRingBufferSize: 8 * os.Getpagesize(), - Watermark: 1, - RecordHandler: o.perfHandler.RecordHandler, - LostHandler: o.perfHandler.LostHandler, - RecordGetter: o.perfHandler.RecordGetter, - }, - } - - m.PerfMaps = append(m.PerfMaps, o.perfMap) - - for _, identifier := range o.sysOpenHooksIdentifiers { - m.Probes = append(m.Probes, - &manager.Probe{ - ProbeIdentificationPair: identifier, - KProbeMaxActive: maxActive, - }, - ) - } } func (o *sslProgram) ConfigureOptions(options *manager.Options) { @@ -273,14 +259,6 @@ func (o *sslProgram) ConfigureOptions(options *manager.Options) { EditorFlag: manager.EditMaxEntries, } - for _, identifier := range o.sysOpenHooksIdentifiers { - options.ActivatedProbes = append(options.ActivatedProbes, - &manager.ProbeSelector{ - ProbeIdentificationPair: identifier, - }, - ) - } - if options.MapEditors == nil { options.MapEditors = make(map[string]*ebpf.Map) } @@ -289,54 +267,15 @@ func (o *sslProgram) ConfigureOptions(options *manager.Options) { } func (o *sslProgram) Start() { - // Setup shared library watcher and configure the appropriate callbacks - o.watcher = newSOWatcher(o.perfHandler, - soRule{ - re: regexp.MustCompile(`libssl.so`), - registerCB: addHooks(o.manager, openSSLProbes), - unregisterCB: removeHooks(o.manager, openSSLProbes), - }, - soRule{ - re: regexp.MustCompile(`libcrypto.so`), - registerCB: addHooks(o.manager, cryptoProbes), - unregisterCB: removeHooks(o.manager, cryptoProbes), - }, - soRule{ - re: regexp.MustCompile(`libgnutls.so`), - registerCB: addHooks(o.manager, gnuTLSProbes), - unregisterCB: removeHooks(o.manager, gnuTLSProbes), - }, - ) - o.watcher.Start() } func (o *sslProgram) Stop() { - // Detaching the sys-open hooks, as they are feeding the perf map we're going to close next. - for _, identifier := range o.sysOpenHooksIdentifiers { - probe, found := o.manager.GetProbe(identifier) - if !found { - continue - } - if err := probe.Stop(); err != nil { - log.Errorf("Failed to stop hook %q. Error: %s", identifier.EBPFFuncName, err) - } - } - - if o.perfMap != nil { - if err := o.perfMap.Stop(manager.CleanAll); err != nil { - log.Errorf("Failed to stop perf map. Error: %s", err) - } - } - - // We must stop the watcher first, as we can read from the perfHandler, before terminating the perfHandler, otherwise - // we might try to send events over the perfHandler. o.watcher.Stop() - o.perfHandler.Stop() } -func addHooks(m *errtelemetry.Manager, probes []manager.ProbesSelector) func(pathIdentifier, string, string) error { - return func(id pathIdentifier, root string, path string) error { +func addHooks(m *manager.Manager, probes []manager.ProbesSelector) func(sharedlibraries.PathIdentifier, string, string) error { + return func(id sharedlibraries.PathIdentifier, root string, path string) error { uid := getUID(id) elfFile, err := elf.Open(root + path) @@ -416,7 +355,7 @@ func addHooks(m *errtelemetry.Manager, probes []manager.ProbesSelector) func(pat ebpfcheck.AddProgramNameMapping(newProbe.ID(), fmt.Sprintf("%s_%s", newProbe.EBPFFuncName, identifier.UID), "usm_tls") } } - if err := singleProbe.RunValidator(m.Manager); err != nil { + if err := singleProbe.RunValidator(m); err != nil { return err } } @@ -425,8 +364,8 @@ func addHooks(m *errtelemetry.Manager, probes []manager.ProbesSelector) func(pat } } -func removeHooks(m *errtelemetry.Manager, probes []manager.ProbesSelector) func(pathIdentifier) error { - return func(lib pathIdentifier) error { +func removeHooks(m *manager.Manager, probes []manager.ProbesSelector) func(sharedlibraries.PathIdentifier) error { + return func(lib sharedlibraries.PathIdentifier) error { uid := getUID(lib) for _, singleProbe := range probes { for _, selector := range singleProbe.GetProbesIdentificationPairList() { @@ -463,7 +402,7 @@ func removeHooks(m *errtelemetry.Manager, probes []manager.ProbesSelector) func( // fmt.Sprintf("%s_%.*s_%s_%s", probeType, maxFuncNameLen, functionName, UID, attachPIDstr) // // functionName is variable but with a minimum guarantee of 10 chars -func getUID(lib pathIdentifier) string { +func getUID(lib sharedlibraries.PathIdentifier) string { return lib.Key()[:5] } @@ -480,47 +419,5 @@ func (*sslProgram) GetAllUndefinedProbes() []manager.ProbeIdentificationPair { } } - for _, hook := range []string{openatSysCall, openat2SysCall} { - for _, traceType := range traceTypes { - probeList = append(probeList, manager.ProbeIdentificationPair{ - EBPFFuncName: fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", traceType, hook), - }) - } - } - return probeList } - -func sysOpenAt2Supported() bool { - missing, err := ddebpf.VerifyKernelFuncs("do_sys_openat2") - if err == nil && len(missing) == 0 { - return true - } - kversion, err := kernel.HostVersion() - if err != nil { - log.Error("could not determine the current kernel version. fallback to do_sys_open") - return false - } - - return kversion >= kernel.VersionCode(5, 6, 0) -} - -// getSysOpenHooksIdentifiers returns the enter and exit tracepoints for openat and openat2 (if supported). -func getSysOpenHooksIdentifiers() []manager.ProbeIdentificationPair { - openatProbes := []string{openatSysCall} - if sysOpenAt2Supported() { - openatProbes = append(openatProbes, openat2SysCall) - } - - res := make([]manager.ProbeIdentificationPair, 0, len(traceTypes)*len(openatProbes)) - for _, probe := range openatProbes { - for _, traceType := range traceTypes { - res = append(res, manager.ProbeIdentificationPair{ - EBPFFuncName: fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", traceType, probe), - UID: probeUID, - }) - } - } - - return res -} diff --git a/pkg/network/usm/sharedlibraries/compile.go b/pkg/network/usm/sharedlibraries/compile.go new file mode 100644 index 0000000000000..373ce7bc4b380 --- /dev/null +++ b/pkg/network/usm/sharedlibraries/compile.go @@ -0,0 +1,30 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package sharedlibraries + +import ( + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" + "github.com/DataDog/datadog-agent/pkg/network/config" + "github.com/DataDog/datadog-agent/pkg/process/statsd" +) + +//go:generate $GOPATH/bin/include_headers pkg/network/ebpf/c/runtime/shared-libraries.c pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/c pkg/network/ebpf/c/runtime pkg/network/ebpf/c +//go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/bytecode/runtime/shared-libraries.go runtime + +func getRuntimeCompiledSharedLibraries(config *config.Config) (runtime.CompiledOutput, error) { + return runtime.SharedLibraries.Compile(&config.Config, getCFlags(config), statsd.Client) +} + +func getCFlags(config *config.Config) []string { + cflags := []string{"-g"} + + if config.BPFDebug { + cflags = append(cflags, "-DDEBUG=1") + } + return cflags +} diff --git a/pkg/network/usm/sharedlibraries/ebpf.go b/pkg/network/usm/sharedlibraries/ebpf.go new file mode 100644 index 0000000000000..bb2c6aee40f9d --- /dev/null +++ b/pkg/network/usm/sharedlibraries/ebpf.go @@ -0,0 +1,200 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package sharedlibraries + +import ( + "fmt" + "math" + "os" + + manager "github.com/DataDog/ebpf-manager" + "golang.org/x/sys/unix" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/network/config" + netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +const ( + maxActive = 1024 + sharedLibrariesPerfMap = "shared_libraries" + probeUID = "so" + + // probe used for streaming shared library events + openatSysCall = "openat" + openat2SysCall = "openat2" +) + +var traceTypes = []string{"enter", "exit"} + +type ebpfProgram struct { + cfg *config.Config + perfHandler *ddebpf.PerfHandler + *manager.Manager +} + +func newEBPFProgram(c *config.Config) *ebpfProgram { + perfHandler := ddebpf.NewPerfHandler(100) + mgr := &manager.Manager{ + PerfMaps: []*manager.PerfMap{ + { + Map: manager.Map{ + Name: sharedLibrariesPerfMap, + }, + PerfMapOptions: manager.PerfMapOptions{ + PerfRingBufferSize: 8 * os.Getpagesize(), + Watermark: 1, + RecordHandler: perfHandler.RecordHandler, + LostHandler: perfHandler.LostHandler, + RecordGetter: perfHandler.RecordGetter, + }, + }, + }, + } + + probeIDs := getSysOpenHooksIdentifiers() + for _, identifier := range probeIDs { + mgr.Probes = append(mgr.Probes, + &manager.Probe{ + ProbeIdentificationPair: identifier, + KProbeMaxActive: maxActive, + }, + ) + } + + return &ebpfProgram{ + cfg: c, + Manager: mgr, + perfHandler: perfHandler, + } +} + +func (e *ebpfProgram) Init() error { + var err error + if e.cfg.EnableCORE { + err = e.initCORE() + if err == nil { + return nil + } + + if !e.cfg.AllowRuntimeCompiledFallback && !e.cfg.AllowPrecompiledFallback { + return fmt.Errorf("co-re load failed: %w", err) + } + log.Warnf("co-re load failed. attempting fallback: %s", err) + } + + if e.cfg.EnableRuntimeCompiler || (err != nil && e.cfg.AllowRuntimeCompiledFallback) { + err = e.initRuntimeCompiler() + if err == nil { + return nil + } + + if !e.cfg.AllowPrecompiledFallback { + return fmt.Errorf("runtime compilation failed: %w", err) + } + log.Warnf("runtime compilation failed: attempting fallback: %s", err) + } + + return e.initPrebuilt() +} + +func (e *ebpfProgram) GetPerfHandler() *ddebpf.PerfHandler { + return e.perfHandler +} + +func (e *ebpfProgram) Stop() { + e.Manager.Stop(manager.CleanAll) //nolint:errcheck + e.perfHandler.Stop() +} + +func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { + options.RLimit = &unix.Rlimit{ + Cur: math.MaxUint64, + Max: math.MaxUint64, + } + + for _, probe := range e.Probes { + options.ActivatedProbes = append(options.ActivatedProbes, + &manager.ProbeSelector{ + ProbeIdentificationPair: probe.ProbeIdentificationPair, + }, + ) + } + + options.VerifierOptions.Programs.LogSize = 2 * 1024 * 1024 + return e.InitWithOptions(buf, options) +} + +func (e *ebpfProgram) initCORE() error { + assetName := getAssetName("shared-libraries", e.cfg.BPFDebug) + return ddebpf.LoadCOREAsset(&e.cfg.Config, assetName, e.init) +} + +func (e *ebpfProgram) initRuntimeCompiler() error { + bc, err := getRuntimeCompiledSharedLibraries(e.cfg) + if err != nil { + return err + } + defer bc.Close() + return e.init(bc, manager.Options{}) +} + +func (e *ebpfProgram) initPrebuilt() error { + bc, err := netebpf.ReadSharedLibrariesModule(e.cfg.BPFDir, e.cfg.BPFDebug) + if err != nil { + return err + } + defer bc.Close() + + return e.init(bc, manager.Options{}) +} + +func sysOpenAt2Supported() bool { + missing, err := ddebpf.VerifyKernelFuncs("do_sys_openat2") + if err == nil && len(missing) == 0 { + return true + } + kversion, err := kernel.HostVersion() + if err != nil { + log.Error("could not determine the current kernel version. fallback to do_sys_open") + return false + } + + return kversion >= kernel.VersionCode(5, 6, 0) +} + +// getSysOpenHooksIdentifiers returns the enter and exit tracepoints for openat and openat2 (if supported). +func getSysOpenHooksIdentifiers() []manager.ProbeIdentificationPair { + openatProbes := []string{openatSysCall} + if sysOpenAt2Supported() { + openatProbes = append(openatProbes, openat2SysCall) + } + + res := make([]manager.ProbeIdentificationPair, 0, len(traceTypes)*len(openatProbes)) + for _, probe := range openatProbes { + for _, traceType := range traceTypes { + res = append(res, manager.ProbeIdentificationPair{ + EBPFFuncName: fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", traceType, probe), + UID: probeUID, + }) + } + } + + return res +} + +func getAssetName(module string, debug bool) string { + if debug { + return fmt.Sprintf("%s-debug.o", module) + } + + return fmt.Sprintf("%s.o", module) +} diff --git a/pkg/network/usm/sharedlibraries/path_identifier.go b/pkg/network/usm/sharedlibraries/path_identifier.go new file mode 100644 index 0000000000000..6377ffad963b5 --- /dev/null +++ b/pkg/network/usm/sharedlibraries/path_identifier.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package sharedlibraries + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "os" + "syscall" + + "github.com/twmb/murmur3" + "golang.org/x/sys/unix" +) + +// PathIdentifier is the unique key (system wide) of a file based on dev/inode +type PathIdentifier struct { + dev uint64 + inode uint64 +} + +type pathIdentifierSet = map[PathIdentifier]struct{} + +func (p *PathIdentifier) String() string { + return fmt.Sprintf("dev/inode %d.%d/%d", unix.Major(p.dev), unix.Minor(p.dev), p.inode) +} + +// Key is a unique (system wide) TLDR Base64(murmur3.Sum64(device, inode)) +// It composes based the device (minor, major) and inode of a file +// murmur is a non-crypto hashing +// +// As multiple containers overlayfs (same inode but could be overwritten with different binary) +// device would be different +// +// a Base64 string representation is returned and could be used in a file path +func (p *PathIdentifier) Key() string { + buffer := make([]byte, 16) + binary.LittleEndian.PutUint64(buffer, p.dev) + binary.LittleEndian.PutUint64(buffer[8:], p.inode) + m := murmur3.Sum64(buffer) + bufferSum := make([]byte, 8) + binary.LittleEndian.PutUint64(bufferSum, m) + return base64.StdEncoding.EncodeToString(bufferSum) +} + +// NewPathIdentifier returns a new PathIdentifier instance +// Note that `path` must be an absolute path +func NewPathIdentifier(path string) (pi PathIdentifier, err error) { + if len(path) < 1 || path[0] != '/' { + return pi, fmt.Errorf("invalid path %q", path) + } + info, err := os.Stat(path) + if err != nil { + return pi, err + } + + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return pi, fmt.Errorf("invalid file %q stat %T", path, info.Sys()) + } + + return PathIdentifier{ + dev: stat.Dev, + inode: stat.Ino, + }, nil +} diff --git a/pkg/network/usm/sharedlibraries/registry.go b/pkg/network/usm/sharedlibraries/registry.go new file mode 100644 index 0000000000000..a10ac5e38cfa8 --- /dev/null +++ b/pkg/network/usm/sharedlibraries/registry.go @@ -0,0 +1,134 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package sharedlibraries + +import ( + "sync" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/cihub/seelog" + "go.uber.org/atomic" +) + +type soRegistry struct { + m sync.RWMutex + byID map[PathIdentifier]*soRegistration + byPID map[uint32]pathIdentifierSet + + // if we can't register a uprobe we don't try more than once + blocklistByID pathIdentifierSet + + telemetry soRegistryTelemetry +} + +func (r *soRegistry) newRegistration(unregister func(PathIdentifier) error) *soRegistration { + uniqueCounter := atomic.Int32{} + uniqueCounter.Store(int32(1)) + return &soRegistration{ + unregisterCB: unregister, + uniqueProcessesCount: uniqueCounter, + telemetry: &r.telemetry, + } +} + +// cleanup removes all registrations. +// This function should be called in the termination, and after we're stopping all other goroutines. +func (r *soRegistry) cleanup() { + for pathID, reg := range r.byID { + reg.unregisterPath(pathID) + } +} + +// unregister a pid if exists, unregisterCB will be called if his uniqueProcessesCount == 0 +func (r *soRegistry) unregister(pid int) { + pidU32 := uint32(pid) + r.m.RLock() + _, found := r.byPID[pidU32] + r.m.RUnlock() + if !found { + return + } + + r.m.Lock() + defer r.m.Unlock() + paths, found := r.byPID[pidU32] + if !found { + return + } + for pathID := range paths { + reg, found := r.byID[pathID] + if !found { + r.telemetry.libUnregisterPathIDNotFound.Add(1) + continue + } + if reg.unregisterPath(pathID) { + // we need to clean up our entries as there are no more processes using this ELF + delete(r.byID, pathID) + } + } + delete(r.byPID, pidU32) +} + +// register a ELF library root/libPath as be used by the pid +// Only one registration will be done per ELF (system wide) +func (r *soRegistry) register(root, libPath string, pid uint32, rule Rule) { + hostLibPath := root + libPath + pathID, err := NewPathIdentifier(hostLibPath) + if err != nil { + // short living process can hit here + // as we receive the openat() syscall info after receiving the EXIT netlink process + if log.ShouldLog(seelog.TraceLvl) { + log.Tracef("can't create path identifier %s", err) + } + return + } + + r.m.Lock() + defer r.m.Unlock() + if _, found := r.blocklistByID[pathID]; found { + r.telemetry.libBlocked.Add(1) + return + } + + if reg, found := r.byID[pathID]; found { + if _, found := r.byPID[pid][pathID]; !found { + reg.uniqueProcessesCount.Inc() + // Can happen if a new process opens the same so. + if len(r.byPID[pid]) == 0 { + r.byPID[pid] = pathIdentifierSet{} + } + r.byPID[pid][pathID] = struct{}{} + } + r.telemetry.libAlreadyRegistered.Add(1) + return + } + + if err := rule.RegisterCB(pathID, root, libPath); err != nil { + log.Debugf("error registering library (adding to blocklist) %s path %s by pid %d : %s", pathID.String(), hostLibPath, pid, err) + // we are calling UnregisterCB here as some uprobes could be already attached, UnregisterCB cleanup those entries + if rule.UnregisterCB != nil { + if err := rule.UnregisterCB(pathID); err != nil { + log.Debugf("UnregisterCB library %s path %s : %s", pathID.String(), hostLibPath, err) + } + } + // save sentinel value, so we don't attempt to re-register shared + // libraries that are problematic for some reason + r.blocklistByID[pathID] = struct{}{} + r.telemetry.libHookFailed.Add(1) + return + } + + reg := r.newRegistration(rule.UnregisterCB) + r.byID[pathID] = reg + if len(r.byPID[pid]) == 0 { + r.byPID[pid] = pathIdentifierSet{} + } + r.byPID[pid][pathID] = struct{}{} + log.Debugf("registering library %s path %s by pid %d", pathID.String(), hostLibPath, pid) + r.telemetry.libRegistered.Add(1) +} diff --git a/pkg/network/usm/testutil/sowatcher_client/.gitignore b/pkg/network/usm/sharedlibraries/testutil/sowatcher_client/.gitignore similarity index 100% rename from pkg/network/usm/testutil/sowatcher_client/.gitignore rename to pkg/network/usm/sharedlibraries/testutil/sowatcher_client/.gitignore diff --git a/pkg/network/usm/testutil/sowatcher_client/sowatcher_client.go b/pkg/network/usm/sharedlibraries/testutil/sowatcher_client/sowatcher_client.go similarity index 78% rename from pkg/network/usm/testutil/sowatcher_client/sowatcher_client.go rename to pkg/network/usm/sharedlibraries/testutil/sowatcher_client/sowatcher_client.go index 3a7772339113d..f19f49a351f12 100644 --- a/pkg/network/usm/testutil/sowatcher_client/sowatcher_client.go +++ b/pkg/network/usm/sharedlibraries/testutil/sowatcher_client/sowatcher_client.go @@ -10,6 +10,8 @@ import ( "os" "os/signal" "syscall" + + "golang.org/x/exp/mmap" ) func main() { @@ -17,18 +19,18 @@ func main() { signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) done := make(chan bool, 1) - fds := make([]*os.File, len(os.Args)-1) + readers := make([]*mmap.ReaderAt, len(os.Args)-1) defer func() { - for _, fd := range fds { - _ = fd.Close() + for _, r := range readers { + _ = r.Close() } }() for _, path := range os.Args[1:] { - fd, err := os.Open(path) + r, err := mmap.Open(path) if err != nil { panic(err) } - fds = append(fds, fd) + readers = append(readers, r) } go func() { diff --git a/pkg/network/usm/sharedlibraries/types.go b/pkg/network/usm/sharedlibraries/types.go new file mode 100644 index 0000000000000..906bcb7188967 --- /dev/null +++ b/pkg/network/usm/sharedlibraries/types.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build ignore + +package sharedlibraries + +/* +#include "../../ebpf/c/shared-libraries/types.h" +*/ +import "C" + +type libPath C.lib_path_t + +const ( + libPathMaxSize = C.LIB_PATH_MAX_SIZE +) diff --git a/pkg/network/usm/sharedlibraries/types_linux.go b/pkg/network/usm/sharedlibraries/types_linux.go new file mode 100644 index 0000000000000..c857c249155e7 --- /dev/null +++ b/pkg/network/usm/sharedlibraries/types_linux.go @@ -0,0 +1,14 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -I ../../ebpf/c -I ../../../ebpf/c -fsigned-char types.go + +package sharedlibraries + +type libPath struct { + Pid uint32 + Len uint32 + Buf [120]byte +} + +const ( + libPathMaxSize = 0x78 +) diff --git a/pkg/network/usm/shared_libraries.go b/pkg/network/usm/sharedlibraries/watcher.go similarity index 60% rename from pkg/network/usm/shared_libraries.go rename to pkg/network/usm/sharedlibraries/watcher.go index b931464a288f8..11d77ad9da13c 100644 --- a/pkg/network/usm/shared_libraries.go +++ b/pkg/network/usm/sharedlibraries/watcher.go @@ -5,29 +5,22 @@ //go:build linux_bpf -package usm +package sharedlibraries import ( "bufio" - "encoding/base64" - "encoding/binary" "fmt" "os" "regexp" "strings" "sync" - "syscall" "time" "unsafe" "go.uber.org/atomic" - "github.com/cihub/seelog" - "github.com/twmb/murmur3" - "golang.org/x/sys/unix" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/protocols/http" + "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/process/monitor" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -40,94 +33,53 @@ const ( scanTerminatedProcessesInterval = 30 * time.Second ) -func toLibPath(data []byte) http.LibPath { - return *(*http.LibPath)(unsafe.Pointer(&data[0])) +func toLibPath(data []byte) libPath { + return *(*libPath)(unsafe.Pointer(&data[0])) } -func toBytes(l *http.LibPath) []byte { +func toBytes(l *libPath) []byte { return l.Buf[:l.Len] } -// pathIdentifier is the unique key (system wide) of a file based on dev/inode -type pathIdentifier struct { - dev uint64 - inode uint64 -} - -func (p *pathIdentifier) String() string { - return fmt.Sprintf("dev/inode %d.%d/%d", unix.Major(p.dev), unix.Minor(p.dev), p.inode) -} - -// Key is a unique (system wide) TLDR Base64(murmur3.Sum64(device, inode)) -// It composes based the device (minor, major) and inode of a file -// murmur is a non-crypto hashing -// -// As multiple containers overlayfs (same inode but could be overwritten with different binary) -// device would be different -// -// a Base64 string representation is returned and could be used in a file path -func (p *pathIdentifier) Key() string { - buffer := make([]byte, 16) - binary.LittleEndian.PutUint64(buffer, p.dev) - binary.LittleEndian.PutUint64(buffer[8:], p.inode) - m := murmur3.Sum64(buffer) - bufferSum := make([]byte, 8) - binary.LittleEndian.PutUint64(bufferSum, m) - return base64.StdEncoding.EncodeToString(bufferSum) -} - -// path must be an absolute path -func newPathIdentifier(path string) (pi pathIdentifier, err error) { - if len(path) < 1 || path[0] != '/' { - return pi, fmt.Errorf("invalid path %q", path) - } - info, err := os.Stat(path) - if err != nil { - return pi, err - } - - stat, ok := info.Sys().(*syscall.Stat_t) - if !ok { - return pi, fmt.Errorf("invalid file %q stat %T", path, info.Sys()) - } - - return pathIdentifier{ - dev: stat.Dev, - inode: stat.Ino, - }, nil -} - -type soRule struct { - re *regexp.Regexp - registerCB func(id pathIdentifier, root string, path string) error - unregisterCB func(id pathIdentifier) error +type Rule struct { + Re *regexp.Regexp + RegisterCB func(id PathIdentifier, root string, path string) error + UnregisterCB func(id PathIdentifier) error } -// soWatcher provides a way to tie callback functions to the lifecycle of shared libraries -type soWatcher struct { +// Watcher provides a way to tie callback functions to the lifecycle of shared libraries +type Watcher struct { wg sync.WaitGroup done chan struct{} procRoot string - rules []soRule + rules []Rule loadEvents *ddebpf.PerfHandler processMonitor *monitor.ProcessMonitor registry *soRegistry + ebpfProgram *ebpfProgram } -func newSOWatcher(perfHandler *ddebpf.PerfHandler, rules ...soRule) *soWatcher { +func NewWatcher(cfg *config.Config, rules ...Rule) (*Watcher, error) { + ebpfProgram := newEBPFProgram(cfg) + err := ebpfProgram.Init() + if err != nil { + return nil, fmt.Errorf("error initializing shared library program: %w", err) + } + metricGroup := telemetry.NewMetricGroup( "usm.so_watcher", telemetry.OptPayloadTelemetry, ) - return &soWatcher{ + return &Watcher{ wg: sync.WaitGroup{}, done: make(chan struct{}), procRoot: util.GetProcRoot(), rules: rules, - loadEvents: perfHandler, + loadEvents: ebpfProgram.GetPerfHandler(), processMonitor: monitor.GetProcessMonitor(), + ebpfProgram: ebpfProgram, registry: &soRegistry{ - byID: make(map[pathIdentifier]*soRegistration), + byID: make(map[PathIdentifier]*soRegistration), byPID: make(map[uint32]pathIdentifierSet), blocklistByID: make(pathIdentifierSet), @@ -145,11 +97,9 @@ func newSOWatcher(perfHandler *ddebpf.PerfHandler, rules ...soRule) *soWatcher { libMatches: metricGroup.NewCounter("matches"), }, }, - } + }, nil } -type pathIdentifierSet = map[pathIdentifier]struct{} - type soRegistryTelemetry struct { // a library can be : // o Registered : it's a new library @@ -176,37 +126,16 @@ type soRegistryTelemetry struct { libMatches *telemetry.Counter } -type soRegistry struct { - m sync.RWMutex - byID map[pathIdentifier]*soRegistration - byPID map[uint32]pathIdentifierSet - - // if we can't register a uprobe we don't try more than once - blocklistByID pathIdentifierSet - - telemetry soRegistryTelemetry -} - type soRegistration struct { uniqueProcessesCount atomic.Int32 - unregisterCB func(pathIdentifier) error + unregisterCB func(PathIdentifier) error // we are sharing the telemetry from soRegistry telemetry *soRegistryTelemetry } -func (r *soRegistry) newRegistration(unregister func(pathIdentifier) error) *soRegistration { - uniqueCounter := atomic.Int32{} - uniqueCounter.Store(int32(1)) - return &soRegistration{ - unregisterCB: unregister, - uniqueProcessesCount: uniqueCounter, - telemetry: &r.telemetry, - } -} - // unregister return true if there are no more reference to this registration -func (r *soRegistration) unregisterPath(pathID pathIdentifier) bool { +func (r *soRegistration) unregisterPath(pathID PathIdentifier) bool { currentUniqueProcessesCount := r.uniqueProcessesCount.Dec() if currentUniqueProcessesCount > 0 { return false @@ -232,7 +161,12 @@ func (r *soRegistration) unregisterPath(pathID pathIdentifier) bool { return true } -func (w *soWatcher) Stop() { +func (w *Watcher) Stop() { + if w == nil { + return + } + + w.ebpfProgram.Stop() close(w.done) w.wg.Wait() } @@ -272,10 +206,14 @@ func parseMapsFile(scanner *bufio.Scanner, callback parseMapsFileCB) { } // Start consuming shared-library events -func (w *soWatcher) Start() { +func (w *Watcher) Start() { + if w == nil { + return + } + thisPID, err := util.GetRootNSPID() if err != nil { - log.Warnf("soWatcher Start can't get root namespace pid %s", err) + log.Warnf("Watcher Start can't get root namespace pid %s", err) } _ = util.WithAllProcs(w.procRoot, func(pid int) error { @@ -297,7 +235,7 @@ func (w *soWatcher) Start() { root := fmt.Sprintf("%s/%d/root", w.procRoot, pid) // Iterate over the rule, and look for a match. for _, r := range w.rules { - if r.re.MatchString(path) { + if r.Re.MatchString(path) { w.registry.register(root, path, uint32(pid), r) break } @@ -366,7 +304,7 @@ func (w *soWatcher) Start() { } for _, r := range w.rules { - if r.re.Match(path) { + if r.Re.Match(path) { w.registry.telemetry.libMatches.Add(1) w.registry.register(root, libPath, lib.Pid, r) break @@ -379,101 +317,9 @@ func (w *soWatcher) Start() { } } }() -} - -// cleanup removes all registrations. -// This function should be called in the termination, and after we're stopping all other goroutines. -func (r *soRegistry) cleanup() { - for pathID, reg := range r.byID { - reg.unregisterPath(pathID) - } -} - -// unregister a pid if exists, unregisterCB will be called if his uniqueProcessesCount == 0 -func (r *soRegistry) unregister(pid int) { - pidU32 := uint32(pid) - r.m.RLock() - _, found := r.byPID[pidU32] - r.m.RUnlock() - if !found { - return - } - - r.m.Lock() - defer r.m.Unlock() - paths, found := r.byPID[pidU32] - if !found { - return - } - for pathID := range paths { - reg, found := r.byID[pathID] - if !found { - r.telemetry.libUnregisterPathIDNotFound.Add(1) - continue - } - if reg.unregisterPath(pathID) { - // we need to clean up our entries as there are no more processes using this ELF - delete(r.byID, pathID) - } - } - delete(r.byPID, pidU32) -} -// register a ELF library root/libPath as be used by the pid -// Only one registration will be done per ELF (system wide) -func (r *soRegistry) register(root, libPath string, pid uint32, rule soRule) { - hostLibPath := root + libPath - pathID, err := newPathIdentifier(hostLibPath) + err = w.ebpfProgram.Start() if err != nil { - // short living process can hit here - // as we receive the openat() syscall info after receiving the EXIT netlink process - if log.ShouldLog(seelog.TraceLvl) { - log.Tracef("can't create path identifier %s", err) - } - return - } - - r.m.Lock() - defer r.m.Unlock() - if _, found := r.blocklistByID[pathID]; found { - r.telemetry.libBlocked.Add(1) - return - } - - if reg, found := r.byID[pathID]; found { - if _, found := r.byPID[pid][pathID]; !found { - reg.uniqueProcessesCount.Inc() - // Can happen if a new process opens the same so. - if len(r.byPID[pid]) == 0 { - r.byPID[pid] = pathIdentifierSet{} - } - r.byPID[pid][pathID] = struct{}{} - } - r.telemetry.libAlreadyRegistered.Add(1) - return - } - - if err := rule.registerCB(pathID, root, libPath); err != nil { - log.Debugf("error registering library (adding to blocklist) %s path %s by pid %d : %s", pathID.String(), hostLibPath, pid, err) - // we are calling unregisterCB here as some uprobes could be already attached, unregisterCB cleanup those entries - if rule.unregisterCB != nil { - if err := rule.unregisterCB(pathID); err != nil { - log.Debugf("unregisterCB library %s path %s : %s", pathID.String(), hostLibPath, err) - } - } - // save sentinel value, so we don't attempt to re-register shared - // libraries that are problematic for some reason - r.blocklistByID[pathID] = struct{}{} - r.telemetry.libHookFailed.Add(1) - return - } - - reg := r.newRegistration(rule.unregisterCB) - r.byID[pathID] = reg - if len(r.byPID[pid]) == 0 { - r.byPID[pid] = pathIdentifierSet{} + log.Errorf("error starting shared library detection eBPF program: %s", err) } - r.byPID[pid][pathID] = struct{}{} - log.Debugf("registering library %s path %s by pid %d", pathID.String(), hostLibPath, pid) - r.telemetry.libRegistered.Add(1) } diff --git a/pkg/network/usm/shared_libraries_test.go b/pkg/network/usm/sharedlibraries/watcher_test.go similarity index 68% rename from pkg/network/usm/shared_libraries_test.go rename to pkg/network/usm/sharedlibraries/watcher_test.go index b9d9b56b201b1..7dbe2c42f355a 100644 --- a/pkg/network/usm/shared_libraries_test.go +++ b/pkg/network/usm/sharedlibraries/watcher_test.go @@ -5,13 +5,12 @@ //go:build linux_bpf -package usm +package sharedlibraries import ( "bufio" "errors" "fmt" - "math" "os" "os/exec" "path/filepath" @@ -21,24 +20,16 @@ import ( "testing" "time" - manager "github.com/DataDog/ebpf-manager" - "github.com/DataDog/gopsutil/process" - "github.com/cihub/seelog" - "github.com/cilium/ebpf" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.uber.org/atomic" - "golang.org/x/sys/unix" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network/config" - netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" - errtelemetry "github.com/DataDog/datadog-agent/pkg/network/telemetry" "github.com/DataDog/datadog-agent/pkg/process/monitor" "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -64,6 +55,10 @@ type SharedLibrarySuite struct { } func TestSharedLibrary(t *testing.T) { + if !http.HTTPSSupported(config.New()) { + t.Skip("shared library tracing not supported for this platform") + } + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { suite.Run(t, new(SharedLibrarySuite)) }) @@ -71,7 +66,6 @@ func TestSharedLibrary(t *testing.T) { func (s *SharedLibrarySuite) TestSharedLibraryDetection() { t := s.T() - perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo-libssl.so") @@ -80,19 +74,20 @@ func (s *SharedLibrarySuite) TestSharedLibraryDetection() { pathDetected string ) - callback := func(id pathIdentifier, root string, path string) error { + callback := func(id PathIdentifier, root string, path string) error { mux.Lock() defer mux.Unlock() pathDetected = path return nil } - watcher := newSOWatcher(perfHandler, - soRule{ - re: regexp.MustCompile(`foo-libssl.so`), - registerCB: callback, + watcher, err := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: callback, }, ) + require.NoError(t, err) watcher.Start() t.Cleanup(watcher.Stop) launchProcessMonitor(t) @@ -155,26 +150,25 @@ func (s *SharedLibrarySuite) TestSharedLibraryDetectionWithPIDandRootNameSpace() err = exec.Command("cp", "/usr/bin/busybox", root+"/sleep").Run() require.NoError(t, err) - perfHandler := initEBPFProgram(t) - var ( mux sync.Mutex pathDetected string ) - callback := func(id pathIdentifier, root string, path string) error { + callback := func(id PathIdentifier, root string, path string) error { mux.Lock() defer mux.Unlock() pathDetected = path return nil } - watcher := newSOWatcher(perfHandler, - soRule{ - re: regexp.MustCompile(`fooroot-crypto.so`), - registerCB: callback, + watcher, err := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`fooroot-crypto.so`), + RegisterCB: callback, }, ) + require.NoError(t, err) watcher.Start() t.Cleanup(watcher.Stop) launchProcessMonitor(t) @@ -215,28 +209,28 @@ func (s *SharedLibrarySuite) TestSharedLibraryDetectionWithPIDandRootNameSpace() func (s *SharedLibrarySuite) TestSameInodeRegression() { t := s.T() - perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "a-foo-libssl.so") fooPath2 := filepath.Join(t.TempDir(), "b-foo-libssl.so") // create a hard-link (a-foo-libssl.so and b-foo-libssl.so will share the same inode) require.NoError(t, os.Link(fooPath1, fooPath2)) - fooPathID2, err := newPathIdentifier(fooPath2) + fooPathID2, err := NewPathIdentifier(fooPath2) require.NoError(t, err) registers := atomic.NewInt64(0) - callback := func(id pathIdentifier, root string, path string) error { + callback := func(id PathIdentifier, root string, path string) error { registers.Add(1) return nil } - watcher := newSOWatcher(perfHandler, - soRule{ - re: regexp.MustCompile(`foo-libssl.so`), - registerCB: callback, + watcher, err := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: callback, }, ) + require.NoError(t, err) watcher.Start() t.Cleanup(watcher.Stop) launchProcessMonitor(t) @@ -285,26 +279,26 @@ func (s *SharedLibrarySuite) TestSameInodeRegression() { func (s *SharedLibrarySuite) TestSoWatcherLeaks() { t := s.T() - perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo-libssl.so") fooPath2, fooPathID2 := createTempTestFile(t, "foo2-gnutls.so") - registerCB := func(id pathIdentifier, root string, path string) error { return nil } - unregisterCB := func(id pathIdentifier) error { return errors.New("fake unregisterCB error") } + registerCB := func(id PathIdentifier, root string, path string) error { return nil } + unregisterCB := func(id PathIdentifier) error { return errors.New("fake unregisterCB error") } - watcher := newSOWatcher(perfHandler, - soRule{ - re: regexp.MustCompile(`foo-libssl.so`), - registerCB: registerCB, - unregisterCB: unregisterCB, + watcher, err := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: registerCB, + UnregisterCB: unregisterCB, }, - soRule{ - re: regexp.MustCompile(`foo2-gnutls.so`), - registerCB: registerCB, - unregisterCB: unregisterCB, + Rule{ + Re: regexp.MustCompile(`foo2-gnutls.so`), + RegisterCB: registerCB, + UnregisterCB: unregisterCB, }, ) + require.NoError(t, err) watcher.Start() t.Cleanup(watcher.Stop) launchProcessMonitor(t) @@ -383,26 +377,26 @@ func (s *SharedLibrarySuite) TestSoWatcherLeaks() { func (s *SharedLibrarySuite) TestSoWatcherProcessAlreadyHoldingReferences() { t := s.T() - perfHandler := initEBPFProgram(t) fooPath1, fooPathID1 := createTempTestFile(t, "foo-libssl.so") fooPath2, fooPathID2 := createTempTestFile(t, "foo2-gnutls.so") - registerCB := func(id pathIdentifier, root string, path string) error { return nil } - unregisterCB := func(id pathIdentifier) error { return nil } + registerCB := func(id PathIdentifier, root string, path string) error { return nil } + unregisterCB := func(id PathIdentifier) error { return nil } - watcher := newSOWatcher(perfHandler, - soRule{ - re: regexp.MustCompile(`foo-libssl.so`), - registerCB: registerCB, - unregisterCB: unregisterCB, + watcher, err := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: registerCB, + UnregisterCB: unregisterCB, }, - soRule{ - re: regexp.MustCompile(`foo2-gnutls.so`), - registerCB: registerCB, - unregisterCB: unregisterCB, + Rule{ + Re: regexp.MustCompile(`foo2-gnutls.so`), + RegisterCB: registerCB, + UnregisterCB: unregisterCB, }, ) + require.NoError(t, err) // create files clientBin := buildSOWatcherClientBin(t) @@ -458,7 +452,7 @@ func (s *SharedLibrarySuite) TestSoWatcherProcessAlreadyHoldingReferences() { require.GreaterOrEqual(t, tel["usm.so_watcher.hits"], tel["usm.so_watcher.matches"], "usm.so_watcher.hits") telEqual(t, 1, "usm.so_watcher.already_registered") telEqual(t, 0, "usm.so_watcher.blocked") - telEqual(t, 3, "usm.so_watcher.matches") // command1 access to 2 files, command2 access to 1 file + telEqual(t, 0, "usm.so_watcher.matches") telEqual(t, 2, "usm.so_watcher.registered") telEqual(t, 0, "usm.so_watcher.unregister_errors") telEqual(t, 0, "usm.so_watcher.unregister_no_callback") @@ -501,16 +495,16 @@ func buildSOWatcherClientBin(t *testing.T) string { return clientBinPath } -func checkPathIDExists(watcher *soWatcher, pathID pathIdentifier) bool { +func checkPathIDExists(watcher *Watcher, pathID PathIdentifier) bool { _, ok := watcher.registry.byID[pathID] return ok } -func checkPathIDDoesNotExist(watcher *soWatcher, pathID pathIdentifier) bool { +func checkPathIDDoesNotExist(watcher *Watcher, pathID PathIdentifier) bool { return !checkPathIDExists(watcher, pathID) } -func checkPIDAssociatedWithPathID(watcher *soWatcher, pathID pathIdentifier, pid uint32) bool { +func checkPIDAssociatedWithPathID(watcher *Watcher, pathID PathIdentifier, pid uint32) bool { value, ok := watcher.registry.byPID[pid] if !ok { return false @@ -519,243 +513,47 @@ func checkPIDAssociatedWithPathID(watcher *soWatcher, pathID pathIdentifier, pid return ok } -func checkPIDNotAssociatedWithPathID(watcher *soWatcher, pathID pathIdentifier, pid uint32) bool { +func checkPIDNotAssociatedWithPathID(watcher *Watcher, pathID PathIdentifier, pid uint32) bool { return !checkPIDAssociatedWithPathID(watcher, pathID, pid) } -func createTempTestFile(t *testing.T, name string) (string, pathIdentifier) { +func createTempTestFile(t *testing.T, name string) (string, PathIdentifier) { fullPath := filepath.Join(t.TempDir(), name) f, err := os.Create(fullPath) + f.WriteString("foobar") require.NoError(t, err) f.Close() t.Cleanup(func() { os.RemoveAll(fullPath) }) - pathID, err := newPathIdentifier(fullPath) + pathID, err := NewPathIdentifier(fullPath) require.NoError(t, err) return fullPath, pathID } -func checkWatcherStateIsClean(t *testing.T, watcher *soWatcher) { +func checkWatcherStateIsClean(t *testing.T, watcher *Watcher) { require.True(t, len(watcher.registry.byPID) == 0 && len(watcher.registry.byID) == 0, "watcher state is not clean") } -func getTracepointFuncName(tracepointType, name string) string { - return fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", tracepointType, name) -} - -const ( - enterTracepoint = "enter" - exitTracepoint = "exit" -) - -func initEBPFProgram(t *testing.T) *ddebpf.PerfHandler { - c := config.New() - if !http.HTTPSSupported(c) { - t.Skip("https not supported for this setup") - } - - includeOpenat2 := sysOpenAt2Supported() - openat2Probes := []manager.ProbeIdentificationPair{ - { - EBPFFuncName: getTracepointFuncName(enterTracepoint, openat2SysCall), - UID: probeUID, - }, - { - EBPFFuncName: getTracepointFuncName(exitTracepoint, openat2SysCall), - UID: probeUID, - }, - } - - perfHandler := ddebpf.NewPerfHandler(10) - mgr := &manager.Manager{ - PerfMaps: []*manager.PerfMap{ - { - Map: manager.Map{Name: sharedLibrariesPerfMap}, - PerfMapOptions: manager.PerfMapOptions{ - PerfRingBufferSize: 8 * os.Getpagesize(), - Watermark: 1, - RecordHandler: perfHandler.RecordHandler, - LostHandler: perfHandler.LostHandler, - RecordGetter: perfHandler.RecordGetter, - }, - }, - }, - Probes: []*manager.Probe{ - { - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: getTracepointFuncName(enterTracepoint, openatSysCall), - UID: probeUID, - }, - KProbeMaxActive: maxActive, - }, - { - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: getTracepointFuncName(exitTracepoint, openatSysCall), - UID: probeUID, - }, - KProbeMaxActive: maxActive, - }, - }, - } - - options := manager.Options{ - RLimit: &unix.Rlimit{ - Cur: math.MaxUint64, - Max: math.MaxUint64, - }, - MapSpecEditors: map[string]manager.MapSpecEditor{ - // TODO: move shared library probes to their own compilation artifact - "http_batches": { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - "http2_batches": { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - "http_in_flight": { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - "kafka_batches": { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - "kafka_last_tcp_seq_per_connection": { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - "http2_in_flight": { - Type: ebpf.LRUHash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - connectionStatesMap: { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - probes.ConnectionProtocolMap: { - Type: ebpf.Hash, - MaxEntries: 1, - EditorFlag: manager.EditMaxEntries, - }, - }, - ActivatedProbes: []manager.ProbesSelector{ - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: getTracepointFuncName(enterTracepoint, openatSysCall), - UID: probeUID, - }, - }, - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: getTracepointFuncName(exitTracepoint, openatSysCall), - UID: probeUID, - }, - }, - }, - } - - if includeOpenat2 { - for _, probe := range openat2Probes { - mgr.Probes = append(mgr.Probes, &manager.Probe{ - ProbeIdentificationPair: probe, - KProbeMaxActive: maxActive, - }) - - options.ActivatedProbes = append(options.ActivatedProbes, &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: probe.EBPFFuncName, - UID: probeUID, - }, - }) - } - } - - exclude := []string{ - "socket__http_filter", - "socket__http2_filter", - "socket__kafka_filter", - "socket__protocol_dispatcher", - "socket__protocol_dispatcher_kafka", - "kprobe__tcp_sendmsg", - "kretprobe__security_sock_rcv_skb", - "tracepoint__net__netif_receive_skb", - "kprobe__do_vfs_ioctl", - "kprobe_handle_sync_payload", - "kprobe_handle_close_connection", - "kprobe_handle_connection_by_peer", - "kprobe_handle_async_payload", - } - - if !includeOpenat2 { - exclude = append(exclude, getTracepointFuncName(enterTracepoint, openat2SysCall), - getTracepointFuncName(exitTracepoint, openat2SysCall)) - } - - for _, sslProbeList := range [][]manager.ProbesSelector{openSSLProbes, cryptoProbes, gnuTLSProbes} { - for _, singleProbe := range sslProbeList { - for _, identifier := range singleProbe.GetProbesIdentificationPairList() { - options.ExcludedFunctions = append(options.ExcludedFunctions, identifier.EBPFFuncName) - } - } - } - for _, probeInfo := range functionToProbes { - if probeInfo.functionInfo != nil { - options.ExcludedFunctions = append(options.ExcludedFunctions, probeInfo.functionInfo.ebpfFunctionName) - } - if probeInfo.returnInfo != nil { - options.ExcludedFunctions = append(options.ExcludedFunctions, probeInfo.returnInfo.ebpfFunctionName) - } - - } - options.ExcludedFunctions = append(options.ExcludedFunctions, exclude...) - - mgr.InstructionPatcher = func(m *manager.Manager) error { - return errtelemetry.PatchEBPFTelemetry(m, false, nil) - } - - bc, err := netebpf.ReadHTTPModule(c.BPFDir, c.BPFDebug) - require.NoError(t, err) - err = mgr.InitWithOptions(bc, options) - require.NoError(t, err) - err = mgr.Start() - require.NoError(t, err) - - t.Cleanup(func() { - mgr.Stop(manager.CleanAll) - perfHandler.Stop() - }) - - return perfHandler -} - func BenchmarkScanSOWatcherNew(b *testing.B) { - w := newSOWatcher(nil, - soRule{ - re: regexp.MustCompile(`libssl.so`), + w, _ := NewWatcher(config.New(), + Rule{ + Re: regexp.MustCompile(`libssl.so`), }, - soRule{ - re: regexp.MustCompile(`libcrypto.so`), + Rule{ + Re: regexp.MustCompile(`libcrypto.so`), }, - soRule{ - re: regexp.MustCompile(`libgnutls.so`), + Rule{ + Re: regexp.MustCompile(`libgnutls.so`), }, ) callback := func(path string) { for _, r := range w.rules { - if r.re.MatchString(path) { + if r.Re.MatchString(path) { break } } @@ -783,54 +581,6 @@ func BenchmarkScanSOWatcherNew(b *testing.B) { } } -func BenchmarkScanSOWatcherOld(b *testing.B) { - w := newSOWatcher(nil, - soRule{ - re: regexp.MustCompile(`libssl.so`), - }, - soRule{ - re: regexp.MustCompile(`libcrypto.so`), - }, - soRule{ - re: regexp.MustCompile(`libgnutls.so`), - }, - ) - - f := func(testPid int) error { - // report silently parsing /proc error as this could happen - // just exit processes - proc, err := process.NewProcess(int32(testPid)) - if err != nil { - log.Debugf("process %d parsing failed %s", testPid, err) - return nil - } - - mmaps, err := proc.MemoryMaps(true) - if err != nil { - if log.ShouldLog(seelog.TraceLvl) { - log.Tracef("process %d maps parsing failed %s", testPid, err) - } - return nil - } - - for _, m := range *mmaps { - for _, r := range w.rules { - if r.re.MatchString(m.Path) { - break - } - } - } - - return nil - } - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - util.WithAllProcs(w.procRoot, f) - } -} - var mapsFile = ` 7f178d0a6000-7f178d0cb000 r--p 00000000 fd:00 268741 /usr/lib/x86_64-linux-gnu/libc-2.31.so 7f178d0cb000-7f178d243000 r-xp 00025000 fd:00 268741 /usr/lib/x86_64-linux-gnu/libc-2.31.so diff --git a/pkg/otlp/internal/serializerexporter/consumer.go b/pkg/otlp/internal/serializerexporter/consumer.go index 32b060378ee8a..2ef6ab5c2b5db 100644 --- a/pkg/otlp/internal/serializerexporter/consumer.go +++ b/pkg/otlp/internal/serializerexporter/consumer.go @@ -17,11 +17,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/metrics" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" "github.com/DataDog/datadog-agent/pkg/tagset" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/util/log" otlpmetrics "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics" "github.com/DataDog/opentelemetry-mapping-go/pkg/quantile" @@ -62,11 +62,11 @@ func (c *serializerConsumer) enrichedTags(dimensions *otlpmetrics.Dimensions) [] return enrichedTags } -func (c *serializerConsumer) ConsumeAPMStats(ss pb.ClientStatsPayload) { +func (c *serializerConsumer) ConsumeAPMStats(ss *pb.ClientStatsPayload) { log.Tracef("Serializing %d client stats buckets.", len(ss.Stats)) ss.Tags = append(ss.Tags, c.extraTags...) body := new(bytes.Buffer) - if err := msgp.Encode(body, &ss); err != nil { + if err := msgp.Encode(body, ss); err != nil { log.Errorf("Error encoding ClientStatsPayload: %v", err) return } diff --git a/pkg/otlp/internal/serializerexporter/consumer_test.go b/pkg/otlp/internal/serializerexporter/consumer_test.go index 9ccf8f03499fd..2378197dcf37b 100644 --- a/pkg/otlp/internal/serializerexporter/consumer_test.go +++ b/pkg/otlp/internal/serializerexporter/consumer_test.go @@ -18,14 +18,16 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/serializer/types" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tinylib/msgp/msgp" ) -var statsPayloads = []pb.ClientStatsPayload{ +var statsPayloads = []*pb.ClientStatsPayload{ { Hostname: "host", Env: "prod", @@ -38,11 +40,11 @@ var statsPayloads = []pb.ClientStatsPayload{ Service: "mysql", ContainerID: "abcdef123456", Tags: []string{"a:b", "c:d"}, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 10, Duration: 1, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "kafka", Name: "queue.add", @@ -72,11 +74,11 @@ var statsPayloads = []pb.ClientStatsPayload{ Service: "mysql2", ContainerID: "abcdef1234562", Tags: []string{"a:b2", "c:d2"}, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 102, Duration: 12, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "kafka2", Name: "queue.add2", @@ -103,17 +105,14 @@ func TestConsumeAPMStats(t *testing.T) { sc.ConsumeAPMStats(statsPayloads[1]) require.Len(t, sc.apmstats, 2) - var one, two pb.ClientStatsPayload - err := msgp.Decode(sc.apmstats[0], &one) + one := &pb.ClientStatsPayload{} + two := &pb.ClientStatsPayload{} + err := msgp.Decode(sc.apmstats[0], one) require.NoError(t, err) - err = msgp.Decode(sc.apmstats[1], &two) + err = msgp.Decode(sc.apmstats[1], two) require.NoError(t, err) - // We add back the tags to the originally added statsPayloads because that's - // what we need to compare against: ConsumeAPMStats adds the extraTags - statsPayloads[0].Tags = append(statsPayloads[0].Tags, sc.extraTags...) - statsPayloads[1].Tags = append(statsPayloads[1].Tags, sc.extraTags...) - require.Equal(t, one, statsPayloads[0]) - require.Equal(t, two, statsPayloads[1]) + assert.Equal(t, one.String(), statsPayloads[0].String()) + assert.Equal(t, two.String(), statsPayloads[1].String()) } func TestSendAPMStats(t *testing.T) { @@ -131,11 +130,13 @@ func TestSendAPMStats(t *testing.T) { var called int srv := withHandler(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { require.Equal(t, req.URL.Path, "/v0.6/stats") - var in pb.ClientStatsPayload - err := msgp.Decode(req.Body, &in) + in := &pb.ClientStatsPayload{} + in.Reset() + err := msgp.Decode(req.Body, in) defer req.Body.Close() require.NoError(t, err) - require.Equal(t, statsPayloads[called], in) + // compare string representations of messages + assert.Equal(t, statsPayloads[called].String(), in.String()) called++ })) defer srv.Close() diff --git a/pkg/process/checks/host_info.go b/pkg/process/checks/host_info.go index a60e51d341862..49f541ea1ddee 100644 --- a/pkg/process/checks/host_info.go +++ b/pkg/process/checks/host_info.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc" "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fargate" ddgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" diff --git a/pkg/process/checks/host_info_test.go b/pkg/process/checks/host_info_test.go index 73f67468de1b0..3df44c67bee13 100644 --- a/pkg/process/checks/host_info_test.go +++ b/pkg/process/checks/host_info_test.go @@ -19,8 +19,8 @@ import ( "google.golang.org/grpc" "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - mocks "github.com/DataDog/datadog-agent/pkg/proto/pbgo/mocks" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" + pbmocks "github.com/DataDog/datadog-agent/pkg/proto/pbgo/mocks/core" ) func TestGetHostname(t *testing.T) { @@ -38,7 +38,7 @@ func TestGetHostnameFromGRPC(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockClient := mocks.NewMockAgentClient(ctrl) + mockClient := pbmocks.NewMockAgentClient(ctrl) mockClient.EXPECT().GetHostname( gomock.Any(), diff --git a/pkg/process/encoding/request/encoding.go b/pkg/process/encoding/request/encoding.go index 5ce3b12fe3c14..ecb1ee82f4678 100644 --- a/pkg/process/encoding/request/encoding.go +++ b/pkg/process/encoding/request/encoding.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) var ( diff --git a/pkg/process/encoding/request/encoding_test.go b/pkg/process/encoding/request/encoding_test.go index d53d299d27455..94b637b277ef0 100644 --- a/pkg/process/encoding/request/encoding_test.go +++ b/pkg/process/encoding/request/encoding_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) func TestSerialization(t *testing.T) { diff --git a/pkg/process/encoding/request/json.go b/pkg/process/encoding/request/json.go index e9e46ddc74e2a..897766beb99d4 100644 --- a/pkg/process/encoding/request/json.go +++ b/pkg/process/encoding/request/json.go @@ -8,7 +8,7 @@ package request import ( "google.golang.org/protobuf/encoding/protojson" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) // ContentTypeJSON holds the HTML content-type of a JSON payload diff --git a/pkg/process/encoding/request/protobuf.go b/pkg/process/encoding/request/protobuf.go index e19690b2d6402..f34fbffa2c502 100644 --- a/pkg/process/encoding/request/protobuf.go +++ b/pkg/process/encoding/request/protobuf.go @@ -8,7 +8,7 @@ package request import ( "google.golang.org/protobuf/proto" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" ) // ContentTypeProtobuf holds the HTML content-type of a Protobuf payload diff --git a/pkg/process/metadata/workloadmeta/grpc.go b/pkg/process/metadata/workloadmeta/grpc.go index d372d96d4b7e8..f80bad7ed92ef 100644 --- a/pkg/process/metadata/workloadmeta/grpc.go +++ b/pkg/process/metadata/workloadmeta/grpc.go @@ -16,7 +16,7 @@ import ( "google.golang.org/grpc" "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/process/metadata/workloadmeta/grpc_test.go b/pkg/process/metadata/workloadmeta/grpc_test.go index aaabb4143f00b..b58c23f5d805f 100644 --- a/pkg/process/metadata/workloadmeta/grpc_test.go +++ b/pkg/process/metadata/workloadmeta/grpc_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/procutil" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/process/net/common.go b/pkg/process/net/common.go index 3c1ae0ae1b55c..94314bc53b79b 100644 --- a/pkg/process/net/common.go +++ b/pkg/process/net/common.go @@ -23,7 +23,7 @@ import ( netEncoding "github.com/DataDog/datadog-agent/pkg/network/encoding" procEncoding "github.com/DataDog/datadog-agent/pkg/process/encoding" reqEncoding "github.com/DataDog/datadog-agent/pkg/process/encoding/request" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" ) diff --git a/pkg/proto/datadog/api/v1/api.proto b/pkg/proto/datadog/api/v1/api.proto index 953a772d82da9..fbd57f2edfc94 100644 --- a/pkg/proto/datadog/api/v1/api.proto +++ b/pkg/proto/datadog/api/v1/api.proto @@ -8,7 +8,7 @@ import "datadog/workloadmeta/workloadmeta.proto"; import "google/api/annotations.proto"; import "google/protobuf/empty.proto"; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/core"; // golang // The greeting service definition. diff --git a/pkg/proto/datadog/model/v1/model.proto b/pkg/proto/datadog/model/v1/model.proto index f2736de5b6cd3..aae1416c47c8c 100644 --- a/pkg/proto/datadog/model/v1/model.proto +++ b/pkg/proto/datadog/model/v1/model.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package datadog.model.v1; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/core"; // golang // Hostname types diff --git a/pkg/proto/datadog/process/process.proto b/pkg/proto/datadog/process/process.proto index b351e705ce130..c3db1e3ee1252 100644 --- a/pkg/proto/datadog/process/process.proto +++ b/pkg/proto/datadog/process/process.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package datadog.process; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/process"; // golang // ProcessStatRequest is the request to get process stats. message ProcessStatRequest { diff --git a/pkg/proto/datadog/process/workloadmeta_process.proto b/pkg/proto/datadog/process/workloadmeta_process.proto index f4223fbe648fb..0c34b19a63c9f 100644 --- a/pkg/proto/datadog/process/workloadmeta_process.proto +++ b/pkg/proto/datadog/process/workloadmeta_process.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package datadog.process; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/process"; // golang message ProcessStreamResponse { int32 eventID = 1; diff --git a/pkg/proto/datadog/remoteconfig/remoteconfig.proto b/pkg/proto/datadog/remoteconfig/remoteconfig.proto index cde8d20bcd024..20a4984f37404 100644 --- a/pkg/proto/datadog/remoteconfig/remoteconfig.proto +++ b/pkg/proto/datadog/remoteconfig/remoteconfig.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package datadog.config; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/core"; // golang // Backend definitions diff --git a/pkg/trace/pb/agent_payload.proto b/pkg/proto/datadog/trace/agent_payload.proto similarity index 89% rename from pkg/trace/pb/agent_payload.proto rename to pkg/proto/datadog/trace/agent_payload.proto index 3d0f4f2471f94..e6a63eb47ab15 100644 --- a/pkg/trace/pb/agent_payload.proto +++ b/pkg/proto/datadog/trace/agent_payload.proto @@ -2,10 +2,10 @@ syntax = "proto3"; -package pb; -option go_package = "github.com/DataDog/datadog-agent/pkg/trace/pb"; +package datadog.trace; +option go_package = "pkg/proto/pbgo/trace"; // golang -import "tracer_payload.proto"; +import "datadog/trace/tracer_payload.proto"; // AgentPayload represents payload the agent sends to the intake. message AgentPayload { diff --git a/pkg/trace/pb/span.proto b/pkg/proto/datadog/trace/span.proto similarity index 87% rename from pkg/trace/pb/span.proto rename to pkg/proto/datadog/trace/span.proto index de10f5fa83d59..58458cdf06532 100644 --- a/pkg/trace/pb/span.proto +++ b/pkg/proto/datadog/trace/span.proto @@ -1,7 +1,8 @@ syntax = "proto3"; -package pb; -option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; +package datadog.trace; + +option go_package="pkg/proto/pbgo/trace"; message Span { // service is the name of the service with which this span is associated. @@ -32,15 +33,15 @@ message Span { // @gotags: json:"error" msg:"error" int32 error = 9; // meta is a mapping from tag name to tag value for string-valued tags. - // @gotags: json:"meta" msg:"meta" + // @gotags: json:"meta,omitempty" msg:"meta,omitempty" map meta = 10; // metrics is a mapping from tag name to tag value for numeric-valued tags. - // @gotags: json:"metrics" msg:"metrics" + // @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" map metrics = 11; // type is the type of the service with which this span is associated. Example values: web, db, lambda. // @gotags: json:"type" msg:"type" string type = 12; // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty" map meta_struct = 13; } diff --git a/pkg/trace/pb/stats.proto b/pkg/proto/datadog/trace/stats.proto similarity index 90% rename from pkg/trace/pb/stats.proto rename to pkg/proto/datadog/trace/stats.proto index 3fccb50584895..ac868e87b7a5f 100644 --- a/pkg/trace/pb/stats.proto +++ b/pkg/proto/datadog/trace/stats.proto @@ -1,8 +1,8 @@ syntax = "proto3"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +package datadog.trace; -package pb; +option go_package = "pkg/proto/pbgo/trace"; // golang // protoc --gogofaster_out=. -I $GOPATH/src -I . stats.proto @@ -10,7 +10,8 @@ package pb; message StatsPayload { string agentHostname = 1; string agentEnv = 2; - repeated ClientStatsPayload stats = 3 [(gogoproto.nullable) = false]; + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + repeated ClientStatsPayload stats = 3; string agentVersion = 4; bool clientComputed = 5; } @@ -23,7 +24,8 @@ message ClientStatsPayload { string hostname = 1; string env = 2; // env tag set on spans or in the tracers, used for aggregation string version = 3; // version tag set on spans or in the tracers, used for aggregation - repeated ClientStatsBucket stats = 4 [(gogoproto.nullable) = false]; + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + repeated ClientStatsBucket stats = 4; string lang = 5; // informative field not used for aggregation string tracerVersion = 6; // informative field not used for aggregation string runtimeID = 7; // used on stats payloads sent by the tracer to identify uniquely a message @@ -46,7 +48,8 @@ message ClientStatsPayload { message ClientStatsBucket { uint64 start = 1; // bucket start in nanoseconds uint64 duration = 2; // bucket duration in nanoseconds - repeated ClientGroupedStats stats = 3 [(gogoproto.nullable) = false]; + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + repeated ClientGroupedStats stats = 3; // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start // when the received bucket start is outside of the agent aggregation window int64 agentTimeShift = 4; diff --git a/pkg/trace/pb/tracer_payload.proto b/pkg/proto/datadog/trace/tracer_payload.proto similarity index 95% rename from pkg/trace/pb/tracer_payload.proto rename to pkg/proto/datadog/trace/tracer_payload.proto index 6b391b6c19959..3ad61c3ae6997 100644 --- a/pkg/trace/pb/tracer_payload.proto +++ b/pkg/proto/datadog/trace/tracer_payload.proto @@ -1,8 +1,10 @@ syntax = "proto3"; -package pb; -option go_package="github.com/DataDog/datadog-agent/pkg/trace/pb"; -import "span.proto"; +package datadog.trace; + +option go_package="pkg/proto/pbgo/trace"; + +import "datadog/trace/span.proto"; // TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. message TraceChunk { diff --git a/pkg/proto/datadog/workloadmeta/workloadmeta.proto b/pkg/proto/datadog/workloadmeta/workloadmeta.proto index 054142e8bd714..a785613528372 100644 --- a/pkg/proto/datadog/workloadmeta/workloadmeta.proto +++ b/pkg/proto/datadog/workloadmeta/workloadmeta.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package datadog.workloadmeta; -option go_package = "pkg/proto/pbgo"; // golang +option go_package = "pkg/proto/pbgo/core"; // golang enum WorkloadmetaKind { CONTAINER = 0; diff --git a/pkg/proto/go.mod b/pkg/proto/go.mod index 57ec39d788df3..3bfff53f88c02 100644 --- a/pkg/proto/go.mod +++ b/pkg/proto/go.mod @@ -7,16 +7,24 @@ retract v0.46.0-devel require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 + github.com/google/gofuzz v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/stretchr/testify v1.8.4 github.com/tinylib/msgp v1.1.8 + github.com/vmihailenco/msgpack/v4 v4.3.12 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 google.golang.org/grpc v1.55.0 google.golang.org/protobuf v1.30.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect github.com/philhofer/fwd v1.1.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/vmihailenco/tagparser v0.1.1 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/proto/go.sum b/pkg/proto/go.sum index 39254afeb9e8e..b2085d8b4038e 100644 --- a/pkg/proto/go.sum +++ b/pkg/proto/go.sum @@ -5,6 +5,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -15,8 +17,10 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -24,15 +28,30 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -52,7 +71,9 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -86,6 +107,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -106,6 +128,9 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -123,6 +148,10 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/proto/google/api/annotations.proto b/pkg/proto/google/api/annotations.proto deleted file mode 100644 index 85c361b47fed2..0000000000000 --- a/pkg/proto/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/pkg/proto/google/api/http.proto b/pkg/proto/google/api/http.proto deleted file mode 100644 index b2977f5147412..0000000000000 --- a/pkg/proto/google/api/http.proto +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -message HttpRule { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - string get = 2; - - // Maps to HTTP PUT. Used for replacing a resource. - string put = 3; - - // Maps to HTTP POST. Used for creating a resource or performing an action. - string post = 4; - - // Maps to HTTP DELETE. Used for deleting a resource. - string delete = 5; - - // Maps to HTTP PATCH. Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/pkg/proto/patches/0001-Customize-msgpack-parsing.patch b/pkg/proto/patches/0001-Customize-msgpack-parsing.patch new file mode 100644 index 0000000000000..21ad8f9647ec3 --- /dev/null +++ b/pkg/proto/patches/0001-Customize-msgpack-parsing.patch @@ -0,0 +1,194 @@ +From 001ef5097dbfcec313736e1dc93fd92b942f8e71 Mon Sep 17 00:00:00 2001 +From: Bertrand Mermet +Date: Fri, 13 Nov 2020 11:08:55 +0100 +Subject: [PATCH] Customize msgpack parsing + +--- + pkg/trace/pb/span_gen.go | 92 +++++++++++++++++++++++++++++++++------- + 1 file changed, 76 insertions(+), 16 deletions(-) + +diff --git a/pkg/trace/pb/span_gen.go b/pkg/trace/pb/span_gen.go +index d834873065..9e6cbc401c 100644 +--- a/pkg/trace/pb/span_gen.go ++++ b/pkg/trace/pb/span_gen.go +@@ -77,60 +77,110 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + } + switch msgp.UnsafeString(field) { + case "service": +- z.Service, bts, err = msgp.ReadStringBytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Service = "" ++ break ++ } ++ z.Service, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "name": +- z.Name, bts, err = msgp.ReadStringBytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Name = "" ++ break ++ } ++ z.Name, bts, err = parseStringBytes(bts) + if err != nil { +- err = msgp.WrapError(err, "Name") ++ err = msgp.WrapError(err, "Service") + return + } + case "resource": +- z.Resource, bts, err = msgp.ReadStringBytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Resource = "" ++ break ++ } ++ z.Resource, bts, err = parseStringBytes(bts) + if err != nil { +- err = msgp.WrapError(err, "Resource") ++ err = msgp.WrapError(err, "Service") + return + } + case "trace_id": +- z.TraceID, bts, err = msgp.ReadUint64Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.TraceID = 0 ++ break ++ } ++ z.TraceID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceID") + return + } + case "span_id": +- z.SpanID, bts, err = msgp.ReadUint64Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.SpanID = 0 ++ break ++ } ++ z.SpanID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanID") + return + } + case "parent_id": +- z.ParentID, bts, err = msgp.ReadUint64Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.ParentID = 0 ++ break ++ } ++ z.ParentID, bts, err = parseUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ParentID") + return + } + case "start": +- z.Start, bts, err = msgp.ReadInt64Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Start = 0 ++ break ++ } ++ z.Start, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Start") + return + } + case "duration": +- z.Duration, bts, err = msgp.ReadInt64Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Duration = 0 ++ break ++ } ++ z.Duration, bts, err = parseInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Duration") + return + } + case "error": +- z.Error, bts, err = msgp.ReadInt32Bytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Error = 0 ++ break ++ } ++ z.Error, bts, err = parseInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + case "meta": ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Meta = nil ++ break ++ } + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { +@@ -148,12 +198,12 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + var za0001 string + var za0002 string + zb0002-- +- za0001, bts, err = msgp.ReadStringBytes(bts) ++ za0001, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } +- za0002, bts, err = msgp.ReadStringBytes(bts) ++ za0002, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Meta", za0001) + return +@@ -161,6 +211,11 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + z.Meta[za0001] = za0002 + } + case "metrics": ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Metrics = nil ++ break ++ } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { +@@ -178,12 +233,12 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + var za0003 string + var za0004 float64 + zb0003-- +- za0003, bts, err = msgp.ReadStringBytes(bts) ++ za0003, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } +- za0004, bts, err = msgp.ReadFloat64Bytes(bts) ++ za0004, bts, err = parseFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Metrics", za0003) + return +@@ -191,7 +246,12 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + z.Metrics[za0003] = za0004 + } + case "type": +- z.Type, bts, err = msgp.ReadStringBytes(bts) ++ if msgp.IsNil(bts) { ++ bts, err = msgp.ReadNilBytes(bts) ++ z.Type = "" ++ break ++ } ++ z.Type, bts, err = parseStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return +-- +2.41.0 + diff --git a/pkg/proto/patches/0002-Make-nil-map-deserialization-retrocompatible.patch b/pkg/proto/patches/0002-Make-nil-map-deserialization-retrocompatible.patch new file mode 100644 index 0000000000000..d8dcb755e4069 --- /dev/null +++ b/pkg/proto/patches/0002-Make-nil-map-deserialization-retrocompatible.patch @@ -0,0 +1,34 @@ +From 5bbd9c854c7d6fa334938892da12df3f7537c37f Mon Sep 17 00:00:00 2001 +From: Bertrand Mermet +Date: Fri, 13 Nov 2020 11:32:55 +0100 +Subject: [PATCH] Make nil map deserialization retrocompatible + +--- + pkg/trace/pb/span_gen.go | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pkg/trace/pb/span_gen.go b/pkg/trace/pb/span_gen.go +index 9e6cbc401c..c4199906ac 100644 +--- a/pkg/trace/pb/span_gen.go ++++ b/pkg/trace/pb/span_gen.go +@@ -187,7 +187,7 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + err = msgp.WrapError(err, "Meta") + return + } +- if z.Meta == nil { ++ if z.Meta == nil && zb0002 > 0 { + z.Meta = make(map[string]string, zb0002) + } else if len(z.Meta) > 0 { + for key := range z.Meta { +@@ -222,7 +222,7 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + err = msgp.WrapError(err, "Metrics") + return + } +- if z.Metrics == nil { ++ if z.Metrics == nil && zb0003 > 0{ + z.Metrics = make(map[string]float64, zb0003) + } else if len(z.Metrics) > 0 { + for key := range z.Metrics { +-- +2.41.0 + diff --git a/pkg/proto/patches/0003-pkg-trace-traceutil-credit-card-obfuscation-9213.patch b/pkg/proto/patches/0003-pkg-trace-traceutil-credit-card-obfuscation-9213.patch new file mode 100644 index 0000000000000..55d7ddf2c836f --- /dev/null +++ b/pkg/proto/patches/0003-pkg-trace-traceutil-credit-card-obfuscation-9213.patch @@ -0,0 +1,50 @@ +From e8ce85ce0ee230aac96594b11ffea6cabd2d89c7 Mon Sep 17 00:00:00 2001 +From: Gabriel Aszalos +Date: Tue, 2 Nov 2021 14:34:03 +0200 +Subject: [PATCH] pkg/trace/traceutil: credit card obfuscation (#9213) + +The PR adds support for credit card number obfuscation in span tags by means of configuration: +```yaml +apm_config: + obfuscation: + credit_cards: + enabled: true # enables obfuscation in span tags + luhn: true # enables Luhn check +``` +It is also possible to apply these settings via `DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED` and `DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN`. + +The feature is off by default. Applying the Luhn algorithm has a performance impact but eliminates any potential false positives. Without it, the algorithm simply checks for valid IIN credit card prefixes (and lengths) in numeric tags, which should be sufficient for most use cases. + +The check and obfuscation is applied at decode time to avoid iterating and reading the map again. This is possible only for Msgpack. For JSON, the iteration happens since we don't own the decoding code. +--- + pkg/trace/pb/span_gen.go | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/pkg/trace/pb/span_gen.go b/pkg/trace/pb/span_gen.go +index 7a06e04853..589abbfee8 100644 +--- a/pkg/trace/pb/span_gen.go ++++ b/pkg/trace/pb/span_gen.go +@@ -74,6 +74,7 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + err = msgp.WrapError(err) + return + } ++ hook, hookok := MetaHook() + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) +@@ -169,7 +170,11 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { + err = msgp.WrapError(err, "Meta", za0001) + return + } +- z.Meta[za0001] = za0002 ++ if hookok { ++ z.Meta[za0001] = hook(za0001, za0002) ++ } else { ++ z.Meta[za0001] = za0002 ++ } + } + case "metrics": + if msgp.IsNil(bts) { +-- +2.41.0 + diff --git a/pkg/proto/pbgo/api.pb.go b/pkg/proto/pbgo/core/api.pb.go similarity index 98% rename from pkg/proto/pbgo/api.pb.go rename to pkg/proto/pbgo/core/api.pb.go index a878d4af11817..d8c628cb02224 100644 --- a/pkg/proto/pbgo/api.pb.go +++ b/pkg/proto/pbgo/core/api.pb.go @@ -1,20 +1,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v4.23.2 // source: datadog/api/v1/api.proto -package pbgo +package core import ( context "context" + empty "github.com/golang/protobuf/ptypes/empty" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" ) @@ -114,8 +114,9 @@ var file_datadog_api_v1_api_proto_rawDesc = []byte{ 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x6d, 0x65, 0x74, 0x61, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x3a, 0x01, 0x2a, 0x30, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var file_datadog_api_v1_api_proto_goTypes = []interface{}{ @@ -125,7 +126,7 @@ var file_datadog_api_v1_api_proto_goTypes = []interface{}{ (*CaptureTriggerRequest)(nil), // 3: datadog.model.v1.CaptureTriggerRequest (*TaggerState)(nil), // 4: datadog.model.v1.TaggerState (*ClientGetConfigsRequest)(nil), // 5: datadog.config.ClientGetConfigsRequest - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty + (*empty.Empty)(nil), // 6: google.protobuf.Empty (*WorkloadmetaStreamRequest)(nil), // 7: datadog.workloadmeta.WorkloadmetaStreamRequest (*HostnameReply)(nil), // 8: datadog.model.v1.HostnameReply (*StreamTagsResponse)(nil), // 9: datadog.model.v1.StreamTagsResponse @@ -326,7 +327,7 @@ type AgentSecureClient interface { // TODO: add the curl code here DogstatsdSetTaggerState(ctx context.Context, in *TaggerState, opts ...grpc.CallOption) (*TaggerStateResponse, error) ClientGetConfigs(ctx context.Context, in *ClientGetConfigsRequest, opts ...grpc.CallOption) (*ClientGetConfigsResponse, error) - GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) + GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) // Subscribes to added, removed, or changed entities in the Workloadmeta and // streams them to clients as events. // Can be called through the HTTP gateway, and events will be streamed as JSON. @@ -427,7 +428,7 @@ func (c *agentSecureClient) ClientGetConfigs(ctx context.Context, in *ClientGetC return out, nil } -func (c *agentSecureClient) GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) { +func (c *agentSecureClient) GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetStateConfigResponse, error) { out := new(GetStateConfigResponse) err := c.cc.Invoke(ctx, "/datadog.api.v1.AgentSecure/GetConfigState", in, out, opts...) if err != nil { @@ -523,7 +524,7 @@ type AgentSecureServer interface { // TODO: add the curl code here DogstatsdSetTaggerState(context.Context, *TaggerState) (*TaggerStateResponse, error) ClientGetConfigs(context.Context, *ClientGetConfigsRequest) (*ClientGetConfigsResponse, error) - GetConfigState(context.Context, *emptypb.Empty) (*GetStateConfigResponse, error) + GetConfigState(context.Context, *empty.Empty) (*GetStateConfigResponse, error) // Subscribes to added, removed, or changed entities in the Workloadmeta and // streams them to clients as events. // Can be called through the HTTP gateway, and events will be streamed as JSON. @@ -567,7 +568,7 @@ func (*UnimplementedAgentSecureServer) DogstatsdSetTaggerState(context.Context, func (*UnimplementedAgentSecureServer) ClientGetConfigs(context.Context, *ClientGetConfigsRequest) (*ClientGetConfigsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ClientGetConfigs not implemented") } -func (*UnimplementedAgentSecureServer) GetConfigState(context.Context, *emptypb.Empty) (*GetStateConfigResponse, error) { +func (*UnimplementedAgentSecureServer) GetConfigState(context.Context, *empty.Empty) (*GetStateConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetConfigState not implemented") } func (*UnimplementedAgentSecureServer) WorkloadmetaStreamEntities(*WorkloadmetaStreamRequest, AgentSecure_WorkloadmetaStreamEntitiesServer) error { @@ -672,7 +673,7 @@ func _AgentSecure_ClientGetConfigs_Handler(srv interface{}, ctx context.Context, } func _AgentSecure_GetConfigState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) + in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } @@ -684,7 +685,7 @@ func _AgentSecure_GetConfigState_Handler(srv interface{}, ctx context.Context, d FullMethod: "/datadog.api.v1.AgentSecure/GetConfigState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AgentSecureServer).GetConfigState(ctx, req.(*emptypb.Empty)) + return srv.(AgentSecureServer).GetConfigState(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } diff --git a/pkg/proto/pbgo/api.pb.gw.go b/pkg/proto/pbgo/core/api.pb.gw.go similarity index 99% rename from pkg/proto/pbgo/api.pb.gw.go rename to pkg/proto/pbgo/core/api.pb.gw.go index 5c2c2dbd071f1..05b0c60e32775 100644 --- a/pkg/proto/pbgo/api.pb.gw.go +++ b/pkg/proto/pbgo/core/api.pb.gw.go @@ -2,11 +2,11 @@ // source: datadog/api/v1/api.proto /* -Package pbgo is a reverse proxy. +Package core is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package pbgo +package core import ( "context" @@ -15,13 +15,13 @@ import ( "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" ) // Suppress "imported and not used" errors @@ -212,7 +212,7 @@ func local_request_AgentSecure_ClientGetConfigs_0(ctx context.Context, marshaler } func request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq emptypb.Empty + var protoReq empty.Empty var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -229,7 +229,7 @@ func request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime } func local_request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq emptypb.Empty + var protoReq empty.Empty var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) diff --git a/pkg/proto/pbgo/model.pb.go b/pkg/proto/pbgo/core/model.pb.go similarity index 99% rename from pkg/proto/pbgo/model.pb.go rename to pkg/proto/pbgo/core/model.pb.go index 0cfc31560ba1d..b637a5383aca2 100644 --- a/pkg/proto/pbgo/model.pb.go +++ b/pkg/proto/pbgo/core/model.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v4.23.2 // source: datadog/model/v1/model.proto -package pbgo +package core import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -1125,8 +1125,8 @@ var file_datadog_model_v1_model_proto_rawDesc = []byte{ 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x52, 0x43, 0x48, 0x45, 0x53, 0x54, 0x52, 0x41, 0x54, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x02, - 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, - 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/remoteconfig.pb.go b/pkg/proto/pbgo/core/remoteconfig.pb.go similarity index 99% rename from pkg/proto/pbgo/remoteconfig.pb.go rename to pkg/proto/pbgo/core/remoteconfig.pb.go index 8468ed918d586..c3fb0d03b22ee 100644 --- a/pkg/proto/pbgo/remoteconfig.pb.go +++ b/pkg/proto/pbgo/core/remoteconfig.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v4.23.3 +// protoc-gen-go v1.28.1 +// protoc v4.23.2 // source: datadog/remoteconfig/remoteconfig.proto -package pbgo +package core import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -1904,8 +1904,9 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x52, 0x12, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x73, 0x56, 0x31, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x56, 0x31, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/core/remoteconfig_gen.go b/pkg/proto/pbgo/core/remoteconfig_gen.go new file mode 100644 index 0000000000000..f5a7ab831c068 --- /dev/null +++ b/pkg/proto/pbgo/core/remoteconfig_gen.go @@ -0,0 +1,3309 @@ +package core + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *Client) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 9 + // string "State" + o = append(o, 0x89, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x65) + if z.State == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.State.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "State") + return + } + } + // string "Id" + o = append(o, 0xa2, 0x49, 0x64) + o = msgp.AppendString(o, z.Id) + // string "Products" + o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Products))) + for za0001 := range z.Products { + o = msgp.AppendString(o, z.Products[za0001]) + } + // string "IsTracer" + o = append(o, 0xa8, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72) + o = msgp.AppendBool(o, z.IsTracer) + // string "ClientTracer" + o = append(o, 0xac, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72) + if z.ClientTracer == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ClientTracer.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ClientTracer") + return + } + } + // string "IsAgent" + o = append(o, 0xa7, 0x49, 0x73, 0x41, 0x67, 0x65, 0x6e, 0x74) + o = msgp.AppendBool(o, z.IsAgent) + // string "ClientAgent" + o = append(o, 0xab, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74) + if z.ClientAgent == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ClientAgent.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ClientAgent") + return + } + } + // string "LastSeen" + o = append(o, 0xa8, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e) + o = msgp.AppendUint64(o, z.LastSeen) + // string "Capabilities" + o = append(o, 0xac, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73) + o = msgp.AppendBytes(o, z.Capabilities) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Client) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "State": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.State = nil + } else { + if z.State == nil { + z.State = new(ClientState) + } + bts, err = z.State.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "State") + return + } + } + case "Id": + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + case "Products": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Products") + return + } + if cap(z.Products) >= int(zb0002) { + z.Products = (z.Products)[:zb0002] + } else { + z.Products = make([]string, zb0002) + } + for za0001 := range z.Products { + z.Products[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Products", za0001) + return + } + } + case "IsTracer": + z.IsTracer, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsTracer") + return + } + case "ClientTracer": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ClientTracer = nil + } else { + if z.ClientTracer == nil { + z.ClientTracer = new(ClientTracer) + } + bts, err = z.ClientTracer.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ClientTracer") + return + } + } + case "IsAgent": + z.IsAgent, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsAgent") + return + } + case "ClientAgent": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ClientAgent = nil + } else { + if z.ClientAgent == nil { + z.ClientAgent = new(ClientAgent) + } + bts, err = z.ClientAgent.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ClientAgent") + return + } + } + case "LastSeen": + z.LastSeen, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastSeen") + return + } + case "Capabilities": + z.Capabilities, bts, err = msgp.ReadBytesBytes(bts, z.Capabilities) + if err != nil { + err = msgp.WrapError(err, "Capabilities") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Client) Msgsize() (s int) { + s = 1 + 6 + if z.State == nil { + s += msgp.NilSize + } else { + s += z.State.Msgsize() + } + s += 3 + msgp.StringPrefixSize + len(z.Id) + 9 + msgp.ArrayHeaderSize + for za0001 := range z.Products { + s += msgp.StringPrefixSize + len(z.Products[za0001]) + } + s += 9 + msgp.BoolSize + 13 + if z.ClientTracer == nil { + s += msgp.NilSize + } else { + s += z.ClientTracer.Msgsize() + } + s += 8 + msgp.BoolSize + 12 + if z.ClientAgent == nil { + s += msgp.NilSize + } else { + s += z.ClientAgent.Msgsize() + } + s += 9 + msgp.Uint64Size + 13 + msgp.BytesPrefixSize + len(z.Capabilities) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientAgent) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Name" + o = append(o, 0x85, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Version" + o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.Version) + // string "ClusterName" + o = append(o, 0xab, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.ClusterName) + // string "ClusterId" + o = append(o, 0xa9, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64) + o = msgp.AppendString(o, z.ClusterId) + // string "CwsWorkloads" + o = append(o, 0xac, 0x43, 0x77, 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.CwsWorkloads))) + for za0001 := range z.CwsWorkloads { + o = msgp.AppendString(o, z.CwsWorkloads[za0001]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientAgent) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Version": + z.Version, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "ClusterName": + z.ClusterName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClusterName") + return + } + case "ClusterId": + z.ClusterId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClusterId") + return + } + case "CwsWorkloads": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "CwsWorkloads") + return + } + if cap(z.CwsWorkloads) >= int(zb0002) { + z.CwsWorkloads = (z.CwsWorkloads)[:zb0002] + } else { + z.CwsWorkloads = make([]string, zb0002) + } + for za0001 := range z.CwsWorkloads { + z.CwsWorkloads[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "CwsWorkloads", za0001) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientAgent) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.StringPrefixSize + len(z.Version) + 12 + msgp.StringPrefixSize + len(z.ClusterName) + 10 + msgp.StringPrefixSize + len(z.ClusterId) + 13 + msgp.ArrayHeaderSize + for za0001 := range z.CwsWorkloads { + s += msgp.StringPrefixSize + len(z.CwsWorkloads[za0001]) + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientGetConfigsRequest) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Client" + o = append(o, 0x82, 0xa6, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74) + if z.Client == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Client.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Client") + return + } + } + // string "CachedTargetFiles" + o = append(o, 0xb1, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.CachedTargetFiles))) + for za0001 := range z.CachedTargetFiles { + if z.CachedTargetFiles[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.CachedTargetFiles[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "CachedTargetFiles", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientGetConfigsRequest) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Client": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Client = nil + } else { + if z.Client == nil { + z.Client = new(Client) + } + bts, err = z.Client.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Client") + return + } + } + case "CachedTargetFiles": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "CachedTargetFiles") + return + } + if cap(z.CachedTargetFiles) >= int(zb0002) { + z.CachedTargetFiles = (z.CachedTargetFiles)[:zb0002] + } else { + z.CachedTargetFiles = make([]*TargetFileMeta, zb0002) + } + for za0001 := range z.CachedTargetFiles { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.CachedTargetFiles[za0001] = nil + } else { + if z.CachedTargetFiles[za0001] == nil { + z.CachedTargetFiles[za0001] = new(TargetFileMeta) + } + bts, err = z.CachedTargetFiles[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "CachedTargetFiles", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientGetConfigsRequest) Msgsize() (s int) { + s = 1 + 7 + if z.Client == nil { + s += msgp.NilSize + } else { + s += z.Client.Msgsize() + } + s += 18 + msgp.ArrayHeaderSize + for za0001 := range z.CachedTargetFiles { + if z.CachedTargetFiles[za0001] == nil { + s += msgp.NilSize + } else { + s += z.CachedTargetFiles[za0001].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientGetConfigsResponse) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "Roots" + o = append(o, 0x84, 0xa5, 0x52, 0x6f, 0x6f, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Roots))) + for za0001 := range z.Roots { + o = msgp.AppendBytes(o, z.Roots[za0001]) + } + // string "Targets" + o = append(o, 0xa7, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73) + o = msgp.AppendBytes(o, z.Targets) + // string "TargetFiles" + o = append(o, 0xab, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.TargetFiles))) + for za0002 := range z.TargetFiles { + if z.TargetFiles[za0002] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Path" + o = append(o, 0x82, 0xa4, 0x50, 0x61, 0x74, 0x68) + o = msgp.AppendString(o, z.TargetFiles[za0002].Path) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.TargetFiles[za0002].Raw) + } + } + // string "ClientConfigs" + o = append(o, 0xad, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ClientConfigs))) + for za0003 := range z.ClientConfigs { + o = msgp.AppendString(o, z.ClientConfigs[za0003]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientGetConfigsResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Roots": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots") + return + } + if cap(z.Roots) >= int(zb0002) { + z.Roots = (z.Roots)[:zb0002] + } else { + z.Roots = make([][]byte, zb0002) + } + for za0001 := range z.Roots { + z.Roots[za0001], bts, err = msgp.ReadBytesBytes(bts, z.Roots[za0001]) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + } + case "Targets": + z.Targets, bts, err = msgp.ReadBytesBytes(bts, z.Targets) + if err != nil { + err = msgp.WrapError(err, "Targets") + return + } + case "TargetFiles": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles") + return + } + if cap(z.TargetFiles) >= int(zb0003) { + z.TargetFiles = (z.TargetFiles)[:zb0003] + } else { + z.TargetFiles = make([]*File, zb0003) + } + for za0002 := range z.TargetFiles { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TargetFiles[za0002] = nil + } else { + if z.TargetFiles[za0002] == nil { + z.TargetFiles[za0002] = new(File) + } + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0002) + return + } + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0002) + return + } + switch msgp.UnsafeString(field) { + case "Path": + z.TargetFiles[za0002].Path, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0002, "Path") + return + } + case "Raw": + z.TargetFiles[za0002].Raw, bts, err = msgp.ReadBytesBytes(bts, z.TargetFiles[za0002].Raw) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0002, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0002) + return + } + } + } + } + } + case "ClientConfigs": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClientConfigs") + return + } + if cap(z.ClientConfigs) >= int(zb0005) { + z.ClientConfigs = (z.ClientConfigs)[:zb0005] + } else { + z.ClientConfigs = make([]string, zb0005) + } + for za0003 := range z.ClientConfigs { + z.ClientConfigs[za0003], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClientConfigs", za0003) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientGetConfigsResponse) Msgsize() (s int) { + s = 1 + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Roots { + s += msgp.BytesPrefixSize + len(z.Roots[za0001]) + } + s += 8 + msgp.BytesPrefixSize + len(z.Targets) + 12 + msgp.ArrayHeaderSize + for za0002 := range z.TargetFiles { + if z.TargetFiles[za0002] == nil { + s += msgp.NilSize + } else { + s += 1 + 5 + msgp.StringPrefixSize + len(z.TargetFiles[za0002].Path) + 4 + msgp.BytesPrefixSize + len(z.TargetFiles[za0002].Raw) + } + } + s += 14 + msgp.ArrayHeaderSize + for za0003 := range z.ClientConfigs { + s += msgp.StringPrefixSize + len(z.ClientConfigs[za0003]) + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientState) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "RootVersion" + o = append(o, 0x86, 0xab, 0x52, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.RootVersion) + // string "TargetsVersion" + o = append(o, 0xae, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.TargetsVersion) + // string "ConfigStates" + o = append(o, 0xac, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ConfigStates))) + for za0001 := range z.ConfigStates { + if z.ConfigStates[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ConfigStates[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ConfigStates", za0001) + return + } + } + } + // string "HasError" + o = append(o, 0xa8, 0x48, 0x61, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendBool(o, z.HasError) + // string "Error" + o = append(o, 0xa5, 0x45, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendString(o, z.Error) + // string "BackendClientState" + o = append(o, 0xb2, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendBytes(o, z.BackendClientState) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientState) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "RootVersion": + z.RootVersion, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "RootVersion") + return + } + case "TargetsVersion": + z.TargetsVersion, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetsVersion") + return + } + case "ConfigStates": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigStates") + return + } + if cap(z.ConfigStates) >= int(zb0002) { + z.ConfigStates = (z.ConfigStates)[:zb0002] + } else { + z.ConfigStates = make([]*ConfigState, zb0002) + } + for za0001 := range z.ConfigStates { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ConfigStates[za0001] = nil + } else { + if z.ConfigStates[za0001] == nil { + z.ConfigStates[za0001] = new(ConfigState) + } + bts, err = z.ConfigStates[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigStates", za0001) + return + } + } + } + case "HasError": + z.HasError, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HasError") + return + } + case "Error": + z.Error, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + case "BackendClientState": + z.BackendClientState, bts, err = msgp.ReadBytesBytes(bts, z.BackendClientState) + if err != nil { + err = msgp.WrapError(err, "BackendClientState") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientState) Msgsize() (s int) { + s = 1 + 12 + msgp.Uint64Size + 15 + msgp.Uint64Size + 13 + msgp.ArrayHeaderSize + for za0001 := range z.ConfigStates { + if z.ConfigStates[za0001] == nil { + s += msgp.NilSize + } else { + s += z.ConfigStates[za0001].Msgsize() + } + } + s += 9 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Error) + 19 + msgp.BytesPrefixSize + len(z.BackendClientState) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ClientTracer) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "RuntimeId" + o = append(o, 0x88, 0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x64) + o = msgp.AppendString(o, z.RuntimeId) + // string "Language" + o = append(o, 0xa8, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + o = msgp.AppendString(o, z.Language) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "Service" + o = append(o, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "ExtraServices" + o = append(o, 0xad, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ExtraServices))) + for za0001 := range z.ExtraServices { + o = msgp.AppendString(o, z.ExtraServices[za0001]) + } + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "AppVersion" + o = append(o, 0xaa, 0x41, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AppVersion) + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Tags))) + for za0002 := range z.Tags { + o = msgp.AppendString(o, z.Tags[za0002]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ClientTracer) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "RuntimeId": + z.RuntimeId, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeId") + return + } + case "Language": + z.Language, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "ExtraServices": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExtraServices") + return + } + if cap(z.ExtraServices) >= int(zb0002) { + z.ExtraServices = (z.ExtraServices)[:zb0002] + } else { + z.ExtraServices = make([]string, zb0002) + } + for za0001 := range z.ExtraServices { + z.ExtraServices[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExtraServices", za0001) + return + } + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "AppVersion": + z.AppVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AppVersion") + return + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if cap(z.Tags) >= int(zb0003) { + z.Tags = (z.Tags)[:zb0003] + } else { + z.Tags = make([]string, zb0003) + } + for za0002 := range z.Tags { + z.Tags[za0002], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ClientTracer) Msgsize() (s int) { + s = 1 + 10 + msgp.StringPrefixSize + len(z.RuntimeId) + 9 + msgp.StringPrefixSize + len(z.Language) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 8 + msgp.StringPrefixSize + len(z.Service) + 14 + msgp.ArrayHeaderSize + for za0001 := range z.ExtraServices { + s += msgp.StringPrefixSize + len(z.ExtraServices[za0001]) + } + s += 4 + msgp.StringPrefixSize + len(z.Env) + 11 + msgp.StringPrefixSize + len(z.AppVersion) + 5 + msgp.ArrayHeaderSize + for za0002 := range z.Tags { + s += msgp.StringPrefixSize + len(z.Tags[za0002]) + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ConfigMetas) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Roots" + o = append(o, 0x85, 0xa5, 0x52, 0x6f, 0x6f, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Roots))) + for za0001 := range z.Roots { + if z.Roots[za0001] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Roots[za0001].Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Roots[za0001].Raw) + } + } + // string "Timestamp" + o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) + if z.Timestamp == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Timestamp.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Timestamp.Raw) + } + // string "Snapshot" + o = append(o, 0xa8, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74) + if z.Snapshot == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Snapshot.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Snapshot.Raw) + } + // string "TopTargets" + o = append(o, 0xaa, 0x54, 0x6f, 0x70, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73) + if z.TopTargets == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.TopTargets.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.TopTargets.Raw) + } + // string "DelegatedTargets" + o = append(o, 0xb0, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.DelegatedTargets))) + for za0002 := range z.DelegatedTargets { + if z.DelegatedTargets[za0002] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 3 + // string "Version" + o = append(o, 0x83, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.DelegatedTargets[za0002].Version) + // string "Role" + o = append(o, 0xa4, 0x52, 0x6f, 0x6c, 0x65) + o = msgp.AppendString(o, z.DelegatedTargets[za0002].Role) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.DelegatedTargets[za0002].Raw) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ConfigMetas) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Roots": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots") + return + } + if cap(z.Roots) >= int(zb0002) { + z.Roots = (z.Roots)[:zb0002] + } else { + z.Roots = make([]*TopMeta, zb0002) + } + for za0001 := range z.Roots { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Roots[za0001] = nil + } else { + if z.Roots[za0001] == nil { + z.Roots[za0001] = new(TopMeta) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Roots[za0001].Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001, "Version") + return + } + case "Raw": + z.Roots[za0001].Raw, bts, err = msgp.ReadBytesBytes(bts, z.Roots[za0001].Raw) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + } + } + } + } + case "Timestamp": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Timestamp = nil + } else { + if z.Timestamp == nil { + z.Timestamp = new(TopMeta) + } + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Timestamp.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp", "Version") + return + } + case "Raw": + z.Timestamp.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Timestamp.Raw) + if err != nil { + err = msgp.WrapError(err, "Timestamp", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + } + } + } + case "Snapshot": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Snapshot = nil + } else { + if z.Snapshot == nil { + z.Snapshot = new(TopMeta) + } + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Snapshot.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot", "Version") + return + } + case "Raw": + z.Snapshot.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Snapshot.Raw) + if err != nil { + err = msgp.WrapError(err, "Snapshot", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + } + } + } + case "TopTargets": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TopTargets = nil + } else { + if z.TopTargets == nil { + z.TopTargets = new(TopMeta) + } + var zb0006 uint32 + zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TopTargets") + return + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "TopTargets") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.TopTargets.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TopTargets", "Version") + return + } + case "Raw": + z.TopTargets.Raw, bts, err = msgp.ReadBytesBytes(bts, z.TopTargets.Raw) + if err != nil { + err = msgp.WrapError(err, "TopTargets", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "TopTargets") + return + } + } + } + } + case "DelegatedTargets": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets") + return + } + if cap(z.DelegatedTargets) >= int(zb0007) { + z.DelegatedTargets = (z.DelegatedTargets)[:zb0007] + } else { + z.DelegatedTargets = make([]*DelegatedMeta, zb0007) + } + for za0002 := range z.DelegatedTargets { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.DelegatedTargets[za0002] = nil + } else { + if z.DelegatedTargets[za0002] == nil { + z.DelegatedTargets[za0002] = new(DelegatedMeta) + } + var zb0008 uint32 + zb0008, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002) + return + } + for zb0008 > 0 { + zb0008-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.DelegatedTargets[za0002].Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002, "Version") + return + } + case "Role": + z.DelegatedTargets[za0002].Role, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002, "Role") + return + } + case "Raw": + z.DelegatedTargets[za0002].Raw, bts, err = msgp.ReadBytesBytes(bts, z.DelegatedTargets[za0002].Raw) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "DelegatedTargets", za0002) + return + } + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ConfigMetas) Msgsize() (s int) { + s = 1 + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Roots { + if z.Roots[za0001] == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Roots[za0001].Raw) + } + } + s += 10 + if z.Timestamp == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Timestamp.Raw) + } + s += 9 + if z.Snapshot == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Snapshot.Raw) + } + s += 11 + if z.TopTargets == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.TopTargets.Raw) + } + s += 17 + msgp.ArrayHeaderSize + for za0002 := range z.DelegatedTargets { + if z.DelegatedTargets[za0002] == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(z.DelegatedTargets[za0002].Role) + 4 + msgp.BytesPrefixSize + len(z.DelegatedTargets[za0002].Raw) + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *ConfigState) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Id" + o = append(o, 0x85, 0xa2, 0x49, 0x64) + o = msgp.AppendString(o, z.Id) + // string "Version" + o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Version) + // string "Product" + o = append(o, 0xa7, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74) + o = msgp.AppendString(o, z.Product) + // string "ApplyState" + o = append(o, 0xaa, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendUint64(o, z.ApplyState) + // string "ApplyError" + o = append(o, 0xaa, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendString(o, z.ApplyError) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *ConfigState) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Id": + z.Id, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Id") + return + } + case "Version": + z.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Product": + z.Product, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Product") + return + } + case "ApplyState": + z.ApplyState, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ApplyState") + return + } + case "ApplyError": + z.ApplyError, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ApplyError") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ConfigState) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.Id) + 8 + msgp.Uint64Size + 8 + msgp.StringPrefixSize + len(z.Product) + 11 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len(z.ApplyError) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *DelegatedMeta) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Version" + o = append(o, 0x83, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Version) + // string "Role" + o = append(o, 0xa4, 0x52, 0x6f, 0x6c, 0x65) + o = msgp.AppendString(o, z.Role) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Raw) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *DelegatedMeta) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Role": + z.Role, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Role") + return + } + case "Raw": + z.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Raw) + if err != nil { + err = msgp.WrapError(err, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DelegatedMeta) Msgsize() (s int) { + s = 1 + 8 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(z.Role) + 4 + msgp.BytesPrefixSize + len(z.Raw) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *DirectorMetas) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "Roots" + o = append(o, 0x84, 0xa5, 0x52, 0x6f, 0x6f, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Roots))) + for za0001 := range z.Roots { + if z.Roots[za0001] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Roots[za0001].Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Roots[za0001].Raw) + } + } + // string "Timestamp" + o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) + if z.Timestamp == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Timestamp.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Timestamp.Raw) + } + // string "Snapshot" + o = append(o, 0xa8, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74) + if z.Snapshot == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Snapshot.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Snapshot.Raw) + } + // string "Targets" + o = append(o, 0xa7, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73) + if z.Targets == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Targets.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Targets.Raw) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *DirectorMetas) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Roots": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots") + return + } + if cap(z.Roots) >= int(zb0002) { + z.Roots = (z.Roots)[:zb0002] + } else { + z.Roots = make([]*TopMeta, zb0002) + } + for za0001 := range z.Roots { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Roots[za0001] = nil + } else { + if z.Roots[za0001] == nil { + z.Roots[za0001] = new(TopMeta) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Roots[za0001].Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001, "Version") + return + } + case "Raw": + z.Roots[za0001].Raw, bts, err = msgp.ReadBytesBytes(bts, z.Roots[za0001].Raw) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Roots", za0001) + return + } + } + } + } + } + case "Timestamp": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Timestamp = nil + } else { + if z.Timestamp == nil { + z.Timestamp = new(TopMeta) + } + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Timestamp.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp", "Version") + return + } + case "Raw": + z.Timestamp.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Timestamp.Raw) + if err != nil { + err = msgp.WrapError(err, "Timestamp", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Timestamp") + return + } + } + } + } + case "Snapshot": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Snapshot = nil + } else { + if z.Snapshot == nil { + z.Snapshot = new(TopMeta) + } + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Snapshot.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot", "Version") + return + } + case "Raw": + z.Snapshot.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Snapshot.Raw) + if err != nil { + err = msgp.WrapError(err, "Snapshot", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Snapshot") + return + } + } + } + } + case "Targets": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Targets = nil + } else { + if z.Targets == nil { + z.Targets = new(TopMeta) + } + var zb0006 uint32 + zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Targets") + return + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Targets") + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Targets.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Targets", "Version") + return + } + case "Raw": + z.Targets.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Targets.Raw) + if err != nil { + err = msgp.WrapError(err, "Targets", "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Targets") + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DirectorMetas) Msgsize() (s int) { + s = 1 + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Roots { + if z.Roots[za0001] == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Roots[za0001].Raw) + } + } + s += 10 + if z.Timestamp == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Timestamp.Raw) + } + s += 9 + if z.Snapshot == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Snapshot.Raw) + } + s += 8 + if z.Targets == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Targets.Raw) + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *File) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Path" + o = append(o, 0x82, 0xa4, 0x50, 0x61, 0x74, 0x68) + o = msgp.AppendString(o, z.Path) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Raw) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *File) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Path": + z.Path, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Path") + return + } + case "Raw": + z.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Raw) + if err != nil { + err = msgp.WrapError(err, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *File) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Path) + 4 + msgp.BytesPrefixSize + len(z.Raw) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z FileMetaState) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Version) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *FileMetaState) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Hash": + z.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z FileMetaState) Msgsize() (s int) { + s = 1 + 8 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(z.Hash) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *GetStateConfigResponse) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 4 + // string "ConfigState" + o = append(o, 0x84, 0xab, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendMapHeader(o, uint32(len(z.ConfigState))) + for za0001, za0002 := range z.ConfigState { + o = msgp.AppendString(o, za0001) + if za0002 == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, za0002.Version) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, za0002.Hash) + } + } + // string "DirectorState" + o = append(o, 0xad, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendMapHeader(o, uint32(len(z.DirectorState))) + for za0003, za0004 := range z.DirectorState { + o = msgp.AppendString(o, za0003) + if za0004 == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, za0004.Version) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, za0004.Hash) + } + } + // string "TargetFilenames" + o = append(o, 0xaf, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.TargetFilenames))) + for za0005, za0006 := range z.TargetFilenames { + o = msgp.AppendString(o, za0005) + o = msgp.AppendString(o, za0006) + } + // string "ActiveClients" + o = append(o, 0xad, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ActiveClients))) + for za0007 := range z.ActiveClients { + if z.ActiveClients[za0007] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ActiveClients[za0007].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ActiveClients", za0007) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *GetStateConfigResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ConfigState": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState") + return + } + if z.ConfigState == nil { + z.ConfigState = make(map[string]*FileMetaState, zb0002) + } else if len(z.ConfigState) > 0 { + for key := range z.ConfigState { + delete(z.ConfigState, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 *FileMetaState + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + za0002 = nil + } else { + if za0002 == nil { + za0002 = new(FileMetaState) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Version": + za0002.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState", za0001, "Version") + return + } + case "Hash": + za0002.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState", za0001, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigState", za0001) + return + } + } + } + } + z.ConfigState[za0001] = za0002 + } + case "DirectorState": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState") + return + } + if z.DirectorState == nil { + z.DirectorState = make(map[string]*FileMetaState, zb0004) + } else if len(z.DirectorState) > 0 { + for key := range z.DirectorState { + delete(z.DirectorState, key) + } + } + for zb0004 > 0 { + var za0003 string + var za0004 *FileMetaState + zb0004-- + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + za0004 = nil + } else { + if za0004 == nil { + za0004 = new(FileMetaState) + } + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState", za0003) + return + } + for zb0005 > 0 { + zb0005-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState", za0003) + return + } + switch msgp.UnsafeString(field) { + case "Version": + za0004.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState", za0003, "Version") + return + } + case "Hash": + za0004.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState", za0003, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorState", za0003) + return + } + } + } + } + z.DirectorState[za0003] = za0004 + } + case "TargetFilenames": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFilenames") + return + } + if z.TargetFilenames == nil { + z.TargetFilenames = make(map[string]string, zb0006) + } else if len(z.TargetFilenames) > 0 { + for key := range z.TargetFilenames { + delete(z.TargetFilenames, key) + } + } + for zb0006 > 0 { + var za0005 string + var za0006 string + zb0006-- + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFilenames") + return + } + za0006, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFilenames", za0005) + return + } + z.TargetFilenames[za0005] = za0006 + } + case "ActiveClients": + var zb0007 uint32 + zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ActiveClients") + return + } + if cap(z.ActiveClients) >= int(zb0007) { + z.ActiveClients = (z.ActiveClients)[:zb0007] + } else { + z.ActiveClients = make([]*Client, zb0007) + } + for za0007 := range z.ActiveClients { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ActiveClients[za0007] = nil + } else { + if z.ActiveClients[za0007] == nil { + z.ActiveClients[za0007] = new(Client) + } + bts, err = z.ActiveClients[za0007].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ActiveClients", za0007) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *GetStateConfigResponse) Msgsize() (s int) { + s = 1 + 12 + msgp.MapHeaderSize + if z.ConfigState != nil { + for za0001, za0002 := range z.ConfigState { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + if za0002 == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(za0002.Hash) + } + } + } + s += 14 + msgp.MapHeaderSize + if z.DirectorState != nil { + for za0003, za0004 := range z.DirectorState { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + if za0004 == nil { + s += msgp.NilSize + } else { + s += 1 + 8 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(za0004.Hash) + } + } + } + s += 16 + msgp.MapHeaderSize + if z.TargetFilenames != nil { + for za0005, za0006 := range z.TargetFilenames { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.StringPrefixSize + len(za0006) + } + } + s += 14 + msgp.ArrayHeaderSize + for za0007 := range z.ActiveClients { + if z.ActiveClients[za0007] == nil { + s += msgp.NilSize + } else { + s += z.ActiveClients[za0007].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *LatestConfigsRequest) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 13 + // string "Hostname" + o = append(o, 0x8d, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Hostname) + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "CurrentConfigSnapshotVersion" + o = append(o, 0xbc, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.CurrentConfigSnapshotVersion) + // string "CurrentConfigRootVersion" + o = append(o, 0xb8, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.CurrentConfigRootVersion) + // string "CurrentDirectorRootVersion" + o = append(o, 0xba, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.CurrentDirectorRootVersion) + // string "Products" + o = append(o, 0xa8, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Products))) + for za0001 := range z.Products { + o = msgp.AppendString(o, z.Products[za0001]) + } + // string "NewProducts" + o = append(o, 0xab, 0x4e, 0x65, 0x77, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.NewProducts))) + for za0002 := range z.NewProducts { + o = msgp.AppendString(o, z.NewProducts[za0002]) + } + // string "ActiveClients" + o = append(o, 0xad, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ActiveClients))) + for za0003 := range z.ActiveClients { + if z.ActiveClients[za0003] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ActiveClients[za0003].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ActiveClients", za0003) + return + } + } + } + // string "BackendClientState" + o = append(o, 0xb2, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65) + o = msgp.AppendBytes(o, z.BackendClientState) + // string "HasError" + o = append(o, 0xa8, 0x48, 0x61, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendBool(o, z.HasError) + // string "Error" + o = append(o, 0xa5, 0x45, 0x72, 0x72, 0x6f, 0x72) + o = msgp.AppendString(o, z.Error) + // string "TraceAgentEnv" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.TraceAgentEnv) + // string "OrgUuid" + o = append(o, 0xa7, 0x4f, 0x72, 0x67, 0x55, 0x75, 0x69, 0x64) + o = msgp.AppendString(o, z.OrgUuid) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *LatestConfigsRequest) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Hostname": + z.Hostname, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hostname") + return + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "CurrentConfigSnapshotVersion": + z.CurrentConfigSnapshotVersion, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CurrentConfigSnapshotVersion") + return + } + case "CurrentConfigRootVersion": + z.CurrentConfigRootVersion, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CurrentConfigRootVersion") + return + } + case "CurrentDirectorRootVersion": + z.CurrentDirectorRootVersion, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "CurrentDirectorRootVersion") + return + } + case "Products": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Products") + return + } + if cap(z.Products) >= int(zb0002) { + z.Products = (z.Products)[:zb0002] + } else { + z.Products = make([]string, zb0002) + } + for za0001 := range z.Products { + z.Products[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Products", za0001) + return + } + } + case "NewProducts": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NewProducts") + return + } + if cap(z.NewProducts) >= int(zb0003) { + z.NewProducts = (z.NewProducts)[:zb0003] + } else { + z.NewProducts = make([]string, zb0003) + } + for za0002 := range z.NewProducts { + z.NewProducts[za0002], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NewProducts", za0002) + return + } + } + case "ActiveClients": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ActiveClients") + return + } + if cap(z.ActiveClients) >= int(zb0004) { + z.ActiveClients = (z.ActiveClients)[:zb0004] + } else { + z.ActiveClients = make([]*Client, zb0004) + } + for za0003 := range z.ActiveClients { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ActiveClients[za0003] = nil + } else { + if z.ActiveClients[za0003] == nil { + z.ActiveClients[za0003] = new(Client) + } + bts, err = z.ActiveClients[za0003].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ActiveClients", za0003) + return + } + } + } + case "BackendClientState": + z.BackendClientState, bts, err = msgp.ReadBytesBytes(bts, z.BackendClientState) + if err != nil { + err = msgp.WrapError(err, "BackendClientState") + return + } + case "HasError": + z.HasError, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HasError") + return + } + case "Error": + z.Error, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + case "TraceAgentEnv": + z.TraceAgentEnv, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TraceAgentEnv") + return + } + case "OrgUuid": + z.OrgUuid, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OrgUuid") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *LatestConfigsRequest) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 29 + msgp.Uint64Size + 25 + msgp.Uint64Size + 27 + msgp.Uint64Size + 9 + msgp.ArrayHeaderSize + for za0001 := range z.Products { + s += msgp.StringPrefixSize + len(z.Products[za0001]) + } + s += 12 + msgp.ArrayHeaderSize + for za0002 := range z.NewProducts { + s += msgp.StringPrefixSize + len(z.NewProducts[za0002]) + } + s += 14 + msgp.ArrayHeaderSize + for za0003 := range z.ActiveClients { + if z.ActiveClients[za0003] == nil { + s += msgp.NilSize + } else { + s += z.ActiveClients[za0003].Msgsize() + } + } + s += 19 + msgp.BytesPrefixSize + len(z.BackendClientState) + 9 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Error) + 14 + msgp.StringPrefixSize + len(z.TraceAgentEnv) + 8 + msgp.StringPrefixSize + len(z.OrgUuid) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *LatestConfigsResponse) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "ConfigMetas" + o = append(o, 0x83, 0xab, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x73) + if z.ConfigMetas == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ConfigMetas.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ConfigMetas") + return + } + } + // string "DirectorMetas" + o = append(o, 0xad, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x73) + if z.DirectorMetas == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.DirectorMetas.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "DirectorMetas") + return + } + } + // string "TargetFiles" + o = append(o, 0xab, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.TargetFiles))) + for za0001 := range z.TargetFiles { + if z.TargetFiles[za0001] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Path" + o = append(o, 0x82, 0xa4, 0x50, 0x61, 0x74, 0x68) + o = msgp.AppendString(o, z.TargetFiles[za0001].Path) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.TargetFiles[za0001].Raw) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *LatestConfigsResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ConfigMetas": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ConfigMetas = nil + } else { + if z.ConfigMetas == nil { + z.ConfigMetas = new(ConfigMetas) + } + bts, err = z.ConfigMetas.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ConfigMetas") + return + } + } + case "DirectorMetas": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.DirectorMetas = nil + } else { + if z.DirectorMetas == nil { + z.DirectorMetas = new(DirectorMetas) + } + bts, err = z.DirectorMetas.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "DirectorMetas") + return + } + } + case "TargetFiles": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles") + return + } + if cap(z.TargetFiles) >= int(zb0002) { + z.TargetFiles = (z.TargetFiles)[:zb0002] + } else { + z.TargetFiles = make([]*File, zb0002) + } + for za0001 := range z.TargetFiles { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TargetFiles[za0001] = nil + } else { + if z.TargetFiles[za0001] == nil { + z.TargetFiles[za0001] = new(File) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Path": + z.TargetFiles[za0001].Path, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0001, "Path") + return + } + case "Raw": + z.TargetFiles[za0001].Raw, bts, err = msgp.ReadBytesBytes(bts, z.TargetFiles[za0001].Raw) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0001, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "TargetFiles", za0001) + return + } + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *LatestConfigsResponse) Msgsize() (s int) { + s = 1 + 12 + if z.ConfigMetas == nil { + s += msgp.NilSize + } else { + s += z.ConfigMetas.Msgsize() + } + s += 14 + if z.DirectorMetas == nil { + s += msgp.NilSize + } else { + s += z.DirectorMetas.Msgsize() + } + s += 12 + msgp.ArrayHeaderSize + for za0001 := range z.TargetFiles { + if z.TargetFiles[za0001] == nil { + s += msgp.NilSize + } else { + s += 1 + 5 + msgp.StringPrefixSize + len(z.TargetFiles[za0001].Path) + 4 + msgp.BytesPrefixSize + len(z.TargetFiles[za0001].Raw) + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z OrgDataResponse) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "Uuid" + o = append(o, 0x81, 0xa4, 0x55, 0x75, 0x69, 0x64) + o = msgp.AppendString(o, z.Uuid) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OrgDataResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Uuid": + z.Uuid, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Uuid") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z OrgDataResponse) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Uuid) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z OrgStatusResponse) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Enabled" + o = append(o, 0x82, 0xa7, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64) + o = msgp.AppendBool(o, z.Enabled) + // string "Authorized" + o = append(o, 0xaa, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64) + o = msgp.AppendBool(o, z.Authorized) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OrgStatusResponse) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Enabled": + z.Enabled, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Enabled") + return + } + case "Authorized": + z.Authorized, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Authorized") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z OrgStatusResponse) Msgsize() (s int) { + s = 1 + 8 + msgp.BoolSize + 11 + msgp.BoolSize + return +} + +// MarshalMsg implements msgp.Marshaler +func (z TargetFileHash) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Algorithm" + o = append(o, 0x82, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Algorithm) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, z.Hash) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TargetFileHash) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Algorithm") + return + } + case "Hash": + z.Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z TargetFileHash) Msgsize() (s int) { + s = 1 + 10 + msgp.StringPrefixSize + len(z.Algorithm) + 5 + msgp.StringPrefixSize + len(z.Hash) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TargetFileMeta) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "Path" + o = append(o, 0x83, 0xa4, 0x50, 0x61, 0x74, 0x68) + o = msgp.AppendString(o, z.Path) + // string "Length" + o = append(o, 0xa6, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68) + o = msgp.AppendInt64(o, z.Length) + // string "Hashes" + o = append(o, 0xa6, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Hashes))) + for za0001 := range z.Hashes { + if z.Hashes[za0001] == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 2 + // string "Algorithm" + o = append(o, 0x82, 0xa9, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) + o = msgp.AppendString(o, z.Hashes[za0001].Algorithm) + // string "Hash" + o = append(o, 0xa4, 0x48, 0x61, 0x73, 0x68) + o = msgp.AppendString(o, z.Hashes[za0001].Hash) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TargetFileMeta) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Path": + z.Path, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Path") + return + } + case "Length": + z.Length, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Length") + return + } + case "Hashes": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes") + return + } + if cap(z.Hashes) >= int(zb0002) { + z.Hashes = (z.Hashes)[:zb0002] + } else { + z.Hashes = make([]*TargetFileHash, zb0002) + } + for za0001 := range z.Hashes { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Hashes[za0001] = nil + } else { + if z.Hashes[za0001] == nil { + z.Hashes[za0001] = new(TargetFileHash) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes", za0001) + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes", za0001) + return + } + switch msgp.UnsafeString(field) { + case "Algorithm": + z.Hashes[za0001].Algorithm, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes", za0001, "Algorithm") + return + } + case "Hash": + z.Hashes[za0001].Hash, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes", za0001, "Hash") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "Hashes", za0001) + return + } + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TargetFileMeta) Msgsize() (s int) { + s = 1 + 5 + msgp.StringPrefixSize + len(z.Path) + 7 + msgp.Int64Size + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Hashes { + if z.Hashes[za0001] == nil { + s += msgp.NilSize + } else { + s += 1 + 10 + msgp.StringPrefixSize + len(z.Hashes[za0001].Algorithm) + 5 + msgp.StringPrefixSize + len(z.Hashes[za0001].Hash) + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TopMeta) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Version" + o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendUint64(o, z.Version) + // string "Raw" + o = append(o, 0xa3, 0x52, 0x61, 0x77) + o = msgp.AppendBytes(o, z.Raw) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TopMeta) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Version": + z.Version, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Version") + return + } + case "Raw": + z.Raw, bts, err = msgp.ReadBytesBytes(bts, z.Raw) + if err != nil { + err = msgp.WrapError(err, "Raw") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TopMeta) Msgsize() (s int) { + s = 1 + 8 + msgp.Uint64Size + 4 + msgp.BytesPrefixSize + len(z.Raw) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TracerPredicateV1) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 7 + // string "ClientID" + o = append(o, 0x87, 0xa8, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44) + o = msgp.AppendString(o, z.ClientID) + // string "Service" + o = append(o, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = msgp.AppendString(o, z.Service) + // string "Environment" + o = append(o, 0xab, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74) + o = msgp.AppendString(o, z.Environment) + // string "AppVersion" + o = append(o, 0xaa, 0x41, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AppVersion) + // string "TracerVersion" + o = append(o, 0xad, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.TracerVersion) + // string "Language" + o = append(o, 0xa8, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65) + o = msgp.AppendString(o, z.Language) + // string "RuntimeID" + o = append(o, 0xa9, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44) + o = msgp.AppendString(o, z.RuntimeID) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TracerPredicateV1) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "ClientID": + z.ClientID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ClientID") + return + } + case "Service": + z.Service, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Service") + return + } + case "Environment": + z.Environment, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Environment") + return + } + case "AppVersion": + z.AppVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AppVersion") + return + } + case "TracerVersion": + z.TracerVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerVersion") + return + } + case "Language": + z.Language, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Language") + return + } + case "RuntimeID": + z.RuntimeID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RuntimeID") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TracerPredicateV1) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.ClientID) + 8 + msgp.StringPrefixSize + len(z.Service) + 12 + msgp.StringPrefixSize + len(z.Environment) + 11 + msgp.StringPrefixSize + len(z.AppVersion) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 9 + msgp.StringPrefixSize + len(z.Language) + 10 + msgp.StringPrefixSize + len(z.RuntimeID) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *TracerPredicates) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "TracerPredicatesV1" + o = append(o, 0x81, 0xb2, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x56, 0x31) + o = msgp.AppendArrayHeader(o, uint32(len(z.TracerPredicatesV1))) + for za0001 := range z.TracerPredicatesV1 { + if z.TracerPredicatesV1[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TracerPredicatesV1[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TracerPredicatesV1", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *TracerPredicates) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TracerPredicatesV1": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPredicatesV1") + return + } + if cap(z.TracerPredicatesV1) >= int(zb0002) { + z.TracerPredicatesV1 = (z.TracerPredicatesV1)[:zb0002] + } else { + z.TracerPredicatesV1 = make([]*TracerPredicateV1, zb0002) + } + for za0001 := range z.TracerPredicatesV1 { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TracerPredicatesV1[za0001] = nil + } else { + if z.TracerPredicatesV1[za0001] == nil { + z.TracerPredicatesV1[za0001] = new(TracerPredicateV1) + } + bts, err = z.TracerPredicatesV1[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPredicatesV1", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *TracerPredicates) Msgsize() (s int) { + s = 1 + 19 + msgp.ArrayHeaderSize + for za0001 := range z.TracerPredicatesV1 { + if z.TracerPredicatesV1[za0001] == nil { + s += msgp.NilSize + } else { + s += z.TracerPredicatesV1[za0001].Msgsize() + } + } + return +} diff --git a/pkg/proto/pbgo/core/remoteconfig_gen_test.go b/pkg/proto/pbgo/core/remoteconfig_gen_test.go new file mode 100644 index 0000000000000..a1d65b7b98885 --- /dev/null +++ b/pkg/proto/pbgo/core/remoteconfig_gen_test.go @@ -0,0 +1,1285 @@ +package core + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalClient(t *testing.T) { + v := Client{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClient(b *testing.B) { + v := Client{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClient(b *testing.B) { + v := Client{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClient(b *testing.B) { + v := Client{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalClientAgent(t *testing.T) { + v := ClientAgent{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClientAgent(b *testing.B) { + v := ClientAgent{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClientAgent(b *testing.B) { + v := ClientAgent{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClientAgent(b *testing.B) { + v := ClientAgent{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalClientGetConfigsRequest(t *testing.T) { + v := ClientGetConfigsRequest{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClientGetConfigsRequest(b *testing.B) { + v := ClientGetConfigsRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClientGetConfigsRequest(b *testing.B) { + v := ClientGetConfigsRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClientGetConfigsRequest(b *testing.B) { + v := ClientGetConfigsRequest{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalClientGetConfigsResponse(t *testing.T) { + v := ClientGetConfigsResponse{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClientGetConfigsResponse(b *testing.B) { + v := ClientGetConfigsResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClientGetConfigsResponse(b *testing.B) { + v := ClientGetConfigsResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClientGetConfigsResponse(b *testing.B) { + v := ClientGetConfigsResponse{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalClientState(t *testing.T) { + v := ClientState{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClientState(b *testing.B) { + v := ClientState{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClientState(b *testing.B) { + v := ClientState{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClientState(b *testing.B) { + v := ClientState{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalClientTracer(t *testing.T) { + v := ClientTracer{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgClientTracer(b *testing.B) { + v := ClientTracer{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgClientTracer(b *testing.B) { + v := ClientTracer{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalClientTracer(b *testing.B) { + v := ClientTracer{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalConfigMetas(t *testing.T) { + v := ConfigMetas{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgConfigMetas(b *testing.B) { + v := ConfigMetas{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgConfigMetas(b *testing.B) { + v := ConfigMetas{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalConfigMetas(b *testing.B) { + v := ConfigMetas{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalConfigState(t *testing.T) { + v := ConfigState{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgConfigState(b *testing.B) { + v := ConfigState{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgConfigState(b *testing.B) { + v := ConfigState{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalConfigState(b *testing.B) { + v := ConfigState{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalDelegatedMeta(t *testing.T) { + v := DelegatedMeta{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgDelegatedMeta(b *testing.B) { + v := DelegatedMeta{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgDelegatedMeta(b *testing.B) { + v := DelegatedMeta{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalDelegatedMeta(b *testing.B) { + v := DelegatedMeta{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalDirectorMetas(t *testing.T) { + v := DirectorMetas{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgDirectorMetas(b *testing.B) { + v := DirectorMetas{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgDirectorMetas(b *testing.B) { + v := DirectorMetas{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalDirectorMetas(b *testing.B) { + v := DirectorMetas{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalFile(t *testing.T) { + v := File{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgFile(b *testing.B) { + v := File{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgFile(b *testing.B) { + v := File{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFile(b *testing.B) { + v := File{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalFileMetaState(t *testing.T) { + v := FileMetaState{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgFileMetaState(b *testing.B) { + v := FileMetaState{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgFileMetaState(b *testing.B) { + v := FileMetaState{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFileMetaState(b *testing.B) { + v := FileMetaState{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalGetStateConfigResponse(t *testing.T) { + v := GetStateConfigResponse{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgGetStateConfigResponse(b *testing.B) { + v := GetStateConfigResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgGetStateConfigResponse(b *testing.B) { + v := GetStateConfigResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalGetStateConfigResponse(b *testing.B) { + v := GetStateConfigResponse{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalLatestConfigsRequest(t *testing.T) { + v := LatestConfigsRequest{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgLatestConfigsRequest(b *testing.B) { + v := LatestConfigsRequest{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgLatestConfigsRequest(b *testing.B) { + v := LatestConfigsRequest{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalLatestConfigsRequest(b *testing.B) { + v := LatestConfigsRequest{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalLatestConfigsResponse(t *testing.T) { + v := LatestConfigsResponse{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgLatestConfigsResponse(b *testing.B) { + v := LatestConfigsResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgLatestConfigsResponse(b *testing.B) { + v := LatestConfigsResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalLatestConfigsResponse(b *testing.B) { + v := LatestConfigsResponse{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalOrgDataResponse(t *testing.T) { + v := OrgDataResponse{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgOrgDataResponse(b *testing.B) { + v := OrgDataResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOrgDataResponse(b *testing.B) { + v := OrgDataResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOrgDataResponse(b *testing.B) { + v := OrgDataResponse{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalOrgStatusResponse(t *testing.T) { + v := OrgStatusResponse{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgOrgStatusResponse(b *testing.B) { + v := OrgStatusResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOrgStatusResponse(b *testing.B) { + v := OrgStatusResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOrgStatusResponse(b *testing.B) { + v := OrgStatusResponse{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTargetFileHash(t *testing.T) { + v := TargetFileHash{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTargetFileHash(b *testing.B) { + v := TargetFileHash{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTargetFileHash(b *testing.B) { + v := TargetFileHash{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTargetFileHash(b *testing.B) { + v := TargetFileHash{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTargetFileMeta(t *testing.T) { + v := TargetFileMeta{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTargetFileMeta(b *testing.B) { + v := TargetFileMeta{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTargetFileMeta(b *testing.B) { + v := TargetFileMeta{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTargetFileMeta(b *testing.B) { + v := TargetFileMeta{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTopMeta(t *testing.T) { + v := TopMeta{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTopMeta(b *testing.B) { + v := TopMeta{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTopMeta(b *testing.B) { + v := TopMeta{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTopMeta(b *testing.B) { + v := TopMeta{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTracerPredicateV1(t *testing.T) { + v := TracerPredicateV1{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTracerPredicateV1(b *testing.B) { + v := TracerPredicateV1{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTracerPredicateV1(b *testing.B) { + v := TracerPredicateV1{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTracerPredicateV1(b *testing.B) { + v := TracerPredicateV1{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalTracerPredicates(t *testing.T) { + v := TracerPredicates{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgTracerPredicates(b *testing.B) { + v := TracerPredicates{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgTracerPredicates(b *testing.B) { + v := TracerPredicates{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalTracerPredicates(b *testing.B) { + v := TracerPredicates{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/proto/pbgo/workloadmeta.pb.go b/pkg/proto/pbgo/core/workloadmeta.pb.go similarity index 99% rename from pkg/proto/pbgo/workloadmeta.pb.go rename to pkg/proto/pbgo/core/workloadmeta.pb.go index cde3a56ff7937..dcbbb20c4194d 100644 --- a/pkg/proto/pbgo/workloadmeta.pb.go +++ b/pkg/proto/pbgo/core/workloadmeta.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v4.23.2 // source: datadog/workloadmeta/workloadmeta.proto -package pbgo +package core import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -1789,9 +1789,9 @@ var file_datadog_workloadmeta_workloadmeta_proto_rawDesc = []byte{ 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x2a, 0x25, 0x0a, 0x0d, 0x45, 0x43, 0x53, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x43, 0x32, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, - 0x52, 0x47, 0x41, 0x54, 0x45, 0x10, 0x01, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, 0x67, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x47, 0x41, 0x54, 0x45, 0x10, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/mocks/api_mockgen.pb.go b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go similarity index 92% rename from pkg/proto/pbgo/mocks/api_mockgen.pb.go rename to pkg/proto/pbgo/mocks/core/api_mockgen.pb.go index 564c633a6e7ed..8733a076c6dee 100644 --- a/pkg/proto/pbgo/mocks/api_mockgen.pb.go +++ b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go @@ -1,18 +1,18 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: /Users/jaime.fullaondo/go/src/github.com/DataDog/datadog-agent/pkg/proto/pbgo/api.pb.go +// Source: /Users/jaime.fullaondo/go/src/github.com/DataDog/datadog-agent/pkg/proto/pbgo/core/api.pb.go -// Package mock_pbgo is a generated GoMock package. -package mock_pbgo +// Package mock_core is a generated GoMock package. +package mock_core import ( context "context" reflect "reflect" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + core "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" gomock "github.com/golang/mock/gomock" + empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" - emptypb "google.golang.org/protobuf/types/known/emptypb" ) // MockAgentClient is a mock of AgentClient interface. @@ -39,14 +39,14 @@ func (m *MockAgentClient) EXPECT() *MockAgentClientMockRecorder { } // GetHostname mocks base method. -func (m *MockAgentClient) GetHostname(ctx context.Context, in *pbgo.HostnameRequest, opts ...grpc.CallOption) (*pbgo.HostnameReply, error) { +func (m *MockAgentClient) GetHostname(ctx context.Context, in *core.HostnameRequest, opts ...grpc.CallOption) (*core.HostnameReply, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetHostname", varargs...) - ret0, _ := ret[0].(*pbgo.HostnameReply) + ret0, _ := ret[0].(*core.HostnameReply) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -82,10 +82,10 @@ func (m *MockAgentServer) EXPECT() *MockAgentServerMockRecorder { } // GetHostname mocks base method. -func (m *MockAgentServer) GetHostname(arg0 context.Context, arg1 *pbgo.HostnameRequest) (*pbgo.HostnameReply, error) { +func (m *MockAgentServer) GetHostname(arg0 context.Context, arg1 *core.HostnameRequest) (*core.HostnameReply, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHostname", arg0, arg1) - ret0, _ := ret[0].(*pbgo.HostnameReply) + ret0, _ := ret[0].(*core.HostnameReply) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -120,14 +120,14 @@ func (m *MockAgentSecureClient) EXPECT() *MockAgentSecureClientMockRecorder { } // ClientGetConfigs mocks base method. -func (m *MockAgentSecureClient) ClientGetConfigs(ctx context.Context, in *pbgo.ClientGetConfigsRequest, opts ...grpc.CallOption) (*pbgo.ClientGetConfigsResponse, error) { +func (m *MockAgentSecureClient) ClientGetConfigs(ctx context.Context, in *core.ClientGetConfigsRequest, opts ...grpc.CallOption) (*core.ClientGetConfigsResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ClientGetConfigs", varargs...) - ret0, _ := ret[0].(*pbgo.ClientGetConfigsResponse) + ret0, _ := ret[0].(*core.ClientGetConfigsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -140,14 +140,14 @@ func (mr *MockAgentSecureClientMockRecorder) ClientGetConfigs(ctx, in interface{ } // DogstatsdCaptureTrigger mocks base method. -func (m *MockAgentSecureClient) DogstatsdCaptureTrigger(ctx context.Context, in *pbgo.CaptureTriggerRequest, opts ...grpc.CallOption) (*pbgo.CaptureTriggerResponse, error) { +func (m *MockAgentSecureClient) DogstatsdCaptureTrigger(ctx context.Context, in *core.CaptureTriggerRequest, opts ...grpc.CallOption) (*core.CaptureTriggerResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "DogstatsdCaptureTrigger", varargs...) - ret0, _ := ret[0].(*pbgo.CaptureTriggerResponse) + ret0, _ := ret[0].(*core.CaptureTriggerResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -160,14 +160,14 @@ func (mr *MockAgentSecureClientMockRecorder) DogstatsdCaptureTrigger(ctx, in int } // DogstatsdSetTaggerState mocks base method. -func (m *MockAgentSecureClient) DogstatsdSetTaggerState(ctx context.Context, in *pbgo.TaggerState, opts ...grpc.CallOption) (*pbgo.TaggerStateResponse, error) { +func (m *MockAgentSecureClient) DogstatsdSetTaggerState(ctx context.Context, in *core.TaggerState, opts ...grpc.CallOption) (*core.TaggerStateResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "DogstatsdSetTaggerState", varargs...) - ret0, _ := ret[0].(*pbgo.TaggerStateResponse) + ret0, _ := ret[0].(*core.TaggerStateResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -180,14 +180,14 @@ func (mr *MockAgentSecureClientMockRecorder) DogstatsdSetTaggerState(ctx, in int } // GetConfigState mocks base method. -func (m *MockAgentSecureClient) GetConfigState(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*pbgo.GetStateConfigResponse, error) { +func (m *MockAgentSecureClient) GetConfigState(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*core.GetStateConfigResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetConfigState", varargs...) - ret0, _ := ret[0].(*pbgo.GetStateConfigResponse) + ret0, _ := ret[0].(*core.GetStateConfigResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -200,14 +200,14 @@ func (mr *MockAgentSecureClientMockRecorder) GetConfigState(ctx, in interface{}, } // TaggerFetchEntity mocks base method. -func (m *MockAgentSecureClient) TaggerFetchEntity(ctx context.Context, in *pbgo.FetchEntityRequest, opts ...grpc.CallOption) (*pbgo.FetchEntityResponse, error) { +func (m *MockAgentSecureClient) TaggerFetchEntity(ctx context.Context, in *core.FetchEntityRequest, opts ...grpc.CallOption) (*core.FetchEntityResponse, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "TaggerFetchEntity", varargs...) - ret0, _ := ret[0].(*pbgo.FetchEntityResponse) + ret0, _ := ret[0].(*core.FetchEntityResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -220,14 +220,14 @@ func (mr *MockAgentSecureClientMockRecorder) TaggerFetchEntity(ctx, in interface } // TaggerStreamEntities mocks base method. -func (m *MockAgentSecureClient) TaggerStreamEntities(ctx context.Context, in *pbgo.StreamTagsRequest, opts ...grpc.CallOption) (pbgo.AgentSecure_TaggerStreamEntitiesClient, error) { +func (m *MockAgentSecureClient) TaggerStreamEntities(ctx context.Context, in *core.StreamTagsRequest, opts ...grpc.CallOption) (core.AgentSecure_TaggerStreamEntitiesClient, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "TaggerStreamEntities", varargs...) - ret0, _ := ret[0].(pbgo.AgentSecure_TaggerStreamEntitiesClient) + ret0, _ := ret[0].(core.AgentSecure_TaggerStreamEntitiesClient) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -240,14 +240,14 @@ func (mr *MockAgentSecureClientMockRecorder) TaggerStreamEntities(ctx, in interf } // WorkloadmetaStreamEntities mocks base method. -func (m *MockAgentSecureClient) WorkloadmetaStreamEntities(ctx context.Context, in *pbgo.WorkloadmetaStreamRequest, opts ...grpc.CallOption) (pbgo.AgentSecure_WorkloadmetaStreamEntitiesClient, error) { +func (m *MockAgentSecureClient) WorkloadmetaStreamEntities(ctx context.Context, in *core.WorkloadmetaStreamRequest, opts ...grpc.CallOption) (core.AgentSecure_WorkloadmetaStreamEntitiesClient, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, in} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "WorkloadmetaStreamEntities", varargs...) - ret0, _ := ret[0].(pbgo.AgentSecure_WorkloadmetaStreamEntitiesClient) + ret0, _ := ret[0].(core.AgentSecure_WorkloadmetaStreamEntitiesClient) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -326,10 +326,10 @@ func (mr *MockAgentSecure_TaggerStreamEntitiesClientMockRecorder) Header() *gomo } // Recv mocks base method. -func (m *MockAgentSecure_TaggerStreamEntitiesClient) Recv() (*pbgo.StreamTagsResponse, error) { +func (m *MockAgentSecure_TaggerStreamEntitiesClient) Recv() (*core.StreamTagsResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*pbgo.StreamTagsResponse) + ret0, _ := ret[0].(*core.StreamTagsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -449,10 +449,10 @@ func (mr *MockAgentSecure_WorkloadmetaStreamEntitiesClientMockRecorder) Header() } // Recv mocks base method. -func (m *MockAgentSecure_WorkloadmetaStreamEntitiesClient) Recv() (*pbgo.WorkloadmetaStreamResponse, error) { +func (m *MockAgentSecure_WorkloadmetaStreamEntitiesClient) Recv() (*core.WorkloadmetaStreamResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*pbgo.WorkloadmetaStreamResponse) + ret0, _ := ret[0].(*core.WorkloadmetaStreamResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -529,10 +529,10 @@ func (m *MockAgentSecureServer) EXPECT() *MockAgentSecureServerMockRecorder { } // ClientGetConfigs mocks base method. -func (m *MockAgentSecureServer) ClientGetConfigs(arg0 context.Context, arg1 *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { +func (m *MockAgentSecureServer) ClientGetConfigs(arg0 context.Context, arg1 *core.ClientGetConfigsRequest) (*core.ClientGetConfigsResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetConfigs", arg0, arg1) - ret0, _ := ret[0].(*pbgo.ClientGetConfigsResponse) + ret0, _ := ret[0].(*core.ClientGetConfigsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -544,10 +544,10 @@ func (mr *MockAgentSecureServerMockRecorder) ClientGetConfigs(arg0, arg1 interfa } // DogstatsdCaptureTrigger mocks base method. -func (m *MockAgentSecureServer) DogstatsdCaptureTrigger(arg0 context.Context, arg1 *pbgo.CaptureTriggerRequest) (*pbgo.CaptureTriggerResponse, error) { +func (m *MockAgentSecureServer) DogstatsdCaptureTrigger(arg0 context.Context, arg1 *core.CaptureTriggerRequest) (*core.CaptureTriggerResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DogstatsdCaptureTrigger", arg0, arg1) - ret0, _ := ret[0].(*pbgo.CaptureTriggerResponse) + ret0, _ := ret[0].(*core.CaptureTriggerResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -559,10 +559,10 @@ func (mr *MockAgentSecureServerMockRecorder) DogstatsdCaptureTrigger(arg0, arg1 } // DogstatsdSetTaggerState mocks base method. -func (m *MockAgentSecureServer) DogstatsdSetTaggerState(arg0 context.Context, arg1 *pbgo.TaggerState) (*pbgo.TaggerStateResponse, error) { +func (m *MockAgentSecureServer) DogstatsdSetTaggerState(arg0 context.Context, arg1 *core.TaggerState) (*core.TaggerStateResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DogstatsdSetTaggerState", arg0, arg1) - ret0, _ := ret[0].(*pbgo.TaggerStateResponse) + ret0, _ := ret[0].(*core.TaggerStateResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -574,10 +574,10 @@ func (mr *MockAgentSecureServerMockRecorder) DogstatsdSetTaggerState(arg0, arg1 } // GetConfigState mocks base method. -func (m *MockAgentSecureServer) GetConfigState(arg0 context.Context, arg1 *emptypb.Empty) (*pbgo.GetStateConfigResponse, error) { +func (m *MockAgentSecureServer) GetConfigState(arg0 context.Context, arg1 *empty.Empty) (*core.GetStateConfigResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetConfigState", arg0, arg1) - ret0, _ := ret[0].(*pbgo.GetStateConfigResponse) + ret0, _ := ret[0].(*core.GetStateConfigResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -589,10 +589,10 @@ func (mr *MockAgentSecureServerMockRecorder) GetConfigState(arg0, arg1 interface } // TaggerFetchEntity mocks base method. -func (m *MockAgentSecureServer) TaggerFetchEntity(arg0 context.Context, arg1 *pbgo.FetchEntityRequest) (*pbgo.FetchEntityResponse, error) { +func (m *MockAgentSecureServer) TaggerFetchEntity(arg0 context.Context, arg1 *core.FetchEntityRequest) (*core.FetchEntityResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TaggerFetchEntity", arg0, arg1) - ret0, _ := ret[0].(*pbgo.FetchEntityResponse) + ret0, _ := ret[0].(*core.FetchEntityResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -604,7 +604,7 @@ func (mr *MockAgentSecureServerMockRecorder) TaggerFetchEntity(arg0, arg1 interf } // TaggerStreamEntities mocks base method. -func (m *MockAgentSecureServer) TaggerStreamEntities(arg0 *pbgo.StreamTagsRequest, arg1 pbgo.AgentSecure_TaggerStreamEntitiesServer) error { +func (m *MockAgentSecureServer) TaggerStreamEntities(arg0 *core.StreamTagsRequest, arg1 core.AgentSecure_TaggerStreamEntitiesServer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TaggerStreamEntities", arg0, arg1) ret0, _ := ret[0].(error) @@ -618,7 +618,7 @@ func (mr *MockAgentSecureServerMockRecorder) TaggerStreamEntities(arg0, arg1 int } // WorkloadmetaStreamEntities mocks base method. -func (m *MockAgentSecureServer) WorkloadmetaStreamEntities(arg0 *pbgo.WorkloadmetaStreamRequest, arg1 pbgo.AgentSecure_WorkloadmetaStreamEntitiesServer) error { +func (m *MockAgentSecureServer) WorkloadmetaStreamEntities(arg0 *core.WorkloadmetaStreamRequest, arg1 core.AgentSecure_WorkloadmetaStreamEntitiesServer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WorkloadmetaStreamEntities", arg0, arg1) ret0, _ := ret[0].(error) @@ -683,7 +683,7 @@ func (mr *MockAgentSecure_TaggerStreamEntitiesServerMockRecorder) RecvMsg(m inte } // Send mocks base method. -func (m *MockAgentSecure_TaggerStreamEntitiesServer) Send(arg0 *pbgo.StreamTagsResponse) error { +func (m *MockAgentSecure_TaggerStreamEntitiesServer) Send(arg0 *core.StreamTagsResponse) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Send", arg0) ret0, _ := ret[0].(error) @@ -802,7 +802,7 @@ func (mr *MockAgentSecure_WorkloadmetaStreamEntitiesServerMockRecorder) RecvMsg( } // Send mocks base method. -func (m *MockAgentSecure_WorkloadmetaStreamEntitiesServer) Send(arg0 *pbgo.WorkloadmetaStreamResponse) error { +func (m *MockAgentSecure_WorkloadmetaStreamEntitiesServer) Send(arg0 *core.WorkloadmetaStreamResponse) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Send", arg0) ret0, _ := ret[0].(error) diff --git a/pkg/proto/pbgo/process.pb.go b/pkg/proto/pbgo/process/process.pb.go similarity index 96% rename from pkg/proto/pbgo/process.pb.go rename to pkg/proto/pbgo/process/process.pb.go index 941f9e90e6616..b362b7eefffaf 100644 --- a/pkg/proto/pbgo/process.pb.go +++ b/pkg/proto/pbgo/process/process.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v4.23.2 // source: datadog/process/process.proto -package pbgo +package process import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -76,9 +76,9 @@ var file_datadog_process_process_proto_rawDesc = []byte{ 0x0f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x69, 0x64, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/workloadmeta_process.pb.go b/pkg/proto/pbgo/process/workloadmeta_process.pb.go similarity index 99% rename from pkg/proto/pbgo/workloadmeta_process.pb.go rename to pkg/proto/pbgo/process/workloadmeta_process.pb.go index 8de8b349aae3f..0c38fc839e630 100644 --- a/pkg/proto/pbgo/workloadmeta_process.pb.go +++ b/pkg/proto/pbgo/process/workloadmeta_process.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.12 +// protoc v4.23.2 // source: datadog/process/workloadmeta_process.proto -package pbgo +package process import ( context "context" @@ -342,9 +342,9 @@ var file_datadog_process_workloadmeta_process_proto_rawDesc = []byte{ 0x6d, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x10, 0x5a, 0x0e, 0x70, 0x6b, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x18, 0x5a, 0x16, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/trace/agent_payload.pb.go b/pkg/proto/pbgo/trace/agent_payload.pb.go new file mode 100644 index 0000000000000..af712611346f0 --- /dev/null +++ b/pkg/proto/pbgo/trace/agent_payload.pb.go @@ -0,0 +1,240 @@ +// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.23.2 +// source: datadog/trace/agent_payload.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AgentPayload represents payload the agent sends to the intake. +type AgentPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // hostName specifies hostname of where the agent is running. + HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` + // env specifies `env` set in agent configuration. + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` + // tracerPayloads specifies list of the payloads received from tracers. + TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` + // tags specifies tags common in all `tracerPayloads`. + Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // agentVersion specifies version of the agent. + AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + // targetTPS holds `TargetTPS` value in AgentConfig. + TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"` + // errorTPS holds `ErrorTPS` value in AgentConfig. + ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` + // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig + RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` +} + +func (x *AgentPayload) Reset() { + *x = AgentPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentPayload) ProtoMessage() {} + +func (x *AgentPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_agent_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. +func (*AgentPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_agent_payload_proto_rawDescGZIP(), []int{0} +} + +func (x *AgentPayload) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *AgentPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { + if x != nil { + return x.TracerPayloads + } + return nil +} + +func (x *AgentPayload) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *AgentPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *AgentPayload) GetTargetTPS() float64 { + if x != nil { + return x.TargetTPS + } + return 0 +} + +func (x *AgentPayload) GetErrorTPS() float64 { + if x != nil { + return x.ErrorTPS + } + return 0 +} + +func (x *AgentPayload) GetRareSamplerEnabled() bool { + if x != nil { + return x.RareSamplerEnabled + } + return false +} + +var File_datadog_trace_agent_payload_proto protoreflect.FileDescriptor + +var file_datadog_trace_agent_payload_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x1a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x03, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, + 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x44, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, + 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, + 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_agent_payload_proto_rawDescOnce sync.Once + file_datadog_trace_agent_payload_proto_rawDescData = file_datadog_trace_agent_payload_proto_rawDesc +) + +func file_datadog_trace_agent_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_agent_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_agent_payload_proto_rawDescData) + }) + return file_datadog_trace_agent_payload_proto_rawDescData +} + +var file_datadog_trace_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_datadog_trace_agent_payload_proto_goTypes = []interface{}{ + (*AgentPayload)(nil), // 0: datadog.trace.AgentPayload + nil, // 1: datadog.trace.AgentPayload.TagsEntry + (*TracerPayload)(nil), // 2: datadog.trace.TracerPayload +} +var file_datadog_trace_agent_payload_proto_depIdxs = []int32{ + 2, // 0: datadog.trace.AgentPayload.tracerPayloads:type_name -> datadog.trace.TracerPayload + 1, // 1: datadog.trace.AgentPayload.tags:type_name -> datadog.trace.AgentPayload.TagsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_datadog_trace_agent_payload_proto_init() } +func file_datadog_trace_agent_payload_proto_init() { + if File_datadog_trace_agent_payload_proto != nil { + return + } + file_datadog_trace_tracer_payload_proto_init() + if !protoimpl.UnsafeEnabled { + file_datadog_trace_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_agent_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_agent_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_agent_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_agent_payload_proto_msgTypes, + }.Build() + File_datadog_trace_agent_payload_proto = out.File + file_datadog_trace_agent_payload_proto_rawDesc = nil + file_datadog_trace_agent_payload_proto_goTypes = nil + file_datadog_trace_agent_payload_proto_depIdxs = nil +} diff --git a/pkg/proto/pbgo/trace/agent_payload_gen.go b/pkg/proto/pbgo/trace/agent_payload_gen.go new file mode 100644 index 0000000000000..26cefad588f7e --- /dev/null +++ b/pkg/proto/pbgo/trace/agent_payload_gen.go @@ -0,0 +1,200 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "github.com/tinylib/msgp/msgp" +) + +// MarshalMsg implements msgp.Marshaler +func (z *AgentPayload) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "HostName" + o = append(o, 0x88, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.HostName) + // string "Env" + o = append(o, 0xa3, 0x45, 0x6e, 0x76) + o = msgp.AppendString(o, z.Env) + // string "TracerPayloads" + o = append(o, 0xae, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.TracerPayloads))) + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.TracerPayloads[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + // string "Tags" + o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) + for za0002, za0003 := range z.Tags { + o = msgp.AppendString(o, za0002) + o = msgp.AppendString(o, za0003) + } + // string "AgentVersion" + o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.AgentVersion) + // string "TargetTPS" + o = append(o, 0xa9, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.TargetTPS) + // string "ErrorTPS" + o = append(o, 0xa8, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53) + o = msgp.AppendFloat64(o, z.ErrorTPS) + // string "RareSamplerEnabled" + o = append(o, 0xb2, 0x52, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64) + o = msgp.AppendBool(o, z.RareSamplerEnabled) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AgentPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "HostName": + z.HostName, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HostName") + return + } + case "Env": + z.Env, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Env") + return + } + case "TracerPayloads": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads") + return + } + if cap(z.TracerPayloads) >= int(zb0002) { + z.TracerPayloads = (z.TracerPayloads)[:zb0002] + } else { + z.TracerPayloads = make([]*TracerPayload, zb0002) + } + for za0001 := range z.TracerPayloads { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.TracerPayloads[za0001] = nil + } else { + if z.TracerPayloads[za0001] == nil { + z.TracerPayloads[za0001] = new(TracerPayload) + } + bts, err = z.TracerPayloads[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "TracerPayloads", za0001) + return + } + } + } + case "Tags": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + if z.Tags == nil { + z.Tags = make(map[string]string, zb0003) + } else if len(z.Tags) > 0 { + for key := range z.Tags { + delete(z.Tags, key) + } + } + for zb0003 > 0 { + var za0002 string + var za0003 string + zb0003-- + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags") + return + } + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Tags", za0002) + return + } + z.Tags[za0002] = za0003 + } + case "AgentVersion": + z.AgentVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AgentVersion") + return + } + case "TargetTPS": + z.TargetTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TargetTPS") + return + } + case "ErrorTPS": + z.ErrorTPS, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ErrorTPS") + return + } + case "RareSamplerEnabled": + z.RareSamplerEnabled, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "RareSamplerEnabled") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AgentPayload) Msgsize() (s int) { + s = 1 + 9 + msgp.StringPrefixSize + len(z.HostName) + 4 + msgp.StringPrefixSize + len(z.Env) + 15 + msgp.ArrayHeaderSize + for za0001 := range z.TracerPayloads { + if z.TracerPayloads[za0001] == nil { + s += msgp.NilSize + } else { + s += z.TracerPayloads[za0001].Msgsize() + } + } + s += 5 + msgp.MapHeaderSize + if z.Tags != nil { + for za0002, za0003 := range z.Tags { + _ = za0003 + s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) + } + } + s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 10 + msgp.Float64Size + 9 + msgp.Float64Size + 19 + msgp.BoolSize + return +} diff --git a/pkg/proto/pbgo/trace/agent_payload_gen_test.go b/pkg/proto/pbgo/trace/agent_payload_gen_test.go new file mode 100644 index 0000000000000..c45867c9863ef --- /dev/null +++ b/pkg/proto/pbgo/trace/agent_payload_gen_test.go @@ -0,0 +1,67 @@ +package trace + +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +import ( + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalAgentPayload(t *testing.T) { + v := AgentPayload{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgAgentPayload(b *testing.B) { + v := AgentPayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgAgentPayload(b *testing.B) { + v := AgentPayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalAgentPayload(b *testing.B) { + v := AgentPayload{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/trace/pb/agent_payload_vtproto.pb.go b/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go similarity index 99% rename from pkg/trace/pb/agent_payload_vtproto.pb.go rename to pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go index 36ca03fe83430..e4d4f171bf7a2 100644 --- a/pkg/trace/pb/agent_payload_vtproto.pb.go +++ b/pkg/proto/pbgo/trace/agent_payload_vtproto.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.4.0 -// source: agent_payload.proto +// source: datadog/trace/agent_payload.proto -package pb +package trace import ( binary "encoding/binary" diff --git a/pkg/trace/pb/decoder_bytes.go b/pkg/proto/pbgo/trace/decoder_bytes.go similarity index 98% rename from pkg/trace/pb/decoder_bytes.go rename to pkg/proto/pbgo/trace/decoder_bytes.go index 0e1feacde71a2..50bdc4966fb3c 100644 --- a/pkg/trace/pb/decoder_bytes.go +++ b/pkg/proto/pbgo/trace/decoder_bytes.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "bytes" @@ -249,6 +249,8 @@ func parseInt32Bytes(bts []byte) (int32, []byte, error) { } // parseBytes reads the next BinType in the msgpack payload. +// +//nolint:unused // potentially useful; was used with prior proto definitions func parseBytes(bts []byte) ([]byte, []byte, error) { if msgp.IsNil(bts) { bts, err := msgp.ReadNilBytes(bts) diff --git a/pkg/trace/pb/decoder_bytes_test.go b/pkg/proto/pbgo/trace/decoder_bytes_test.go similarity index 88% rename from pkg/trace/pb/decoder_bytes_test.go rename to pkg/proto/pbgo/trace/decoder_bytes_test.go index 57ce128a2caa4..df2adb772fde7 100644 --- a/pkg/trace/pb/decoder_bytes_test.go +++ b/pkg/proto/pbgo/trace/decoder_bytes_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "testing" @@ -50,6 +50,7 @@ func TestDecodeBytes(t *testing.T) { {{Service: "B"}}, {{Service: "C"}}, } + var ( bts []byte err error @@ -66,14 +67,14 @@ func TestDecodeBytes(t *testing.T) { func TestDecodeInvalidUTF8Bytes(t *testing.T) { provide := Traces{ - {{Service: "A", Name: "op\x99\xbf"}}, - {{Service: "B"}}, - {{Service: "C"}}, + {&Span{Service: "A", Name: "op\x99\xbf"}}, + {&Span{Service: "B"}}, + {&Span{Service: "C"}}, } accept := Traces{ - {{Service: "A", Name: "op��"}}, - {{Service: "B"}}, - {{Service: "C"}}, + {&Span{Service: "A", Name: "op��"}}, + {&Span{Service: "B"}}, + {&Span{Service: "C"}}, } var ( bts []byte diff --git a/pkg/trace/pb/decoder_v05.go b/pkg/proto/pbgo/trace/decoder_v05.go similarity index 99% rename from pkg/trace/pb/decoder_v05.go rename to pkg/proto/pbgo/trace/decoder_v05.go index 01ffae1bdc4a3..b35f81692a123 100644 --- a/pkg/trace/pb/decoder_v05.go +++ b/pkg/proto/pbgo/trace/decoder_v05.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "errors" diff --git a/pkg/trace/pb/decoder_v05_test.go b/pkg/proto/pbgo/trace/decoder_v05_test.go similarity index 99% rename from pkg/trace/pb/decoder_v05_test.go rename to pkg/proto/pbgo/trace/decoder_v05_test.go index c94fd5aafd2b6..f8aeee5c150f4 100644 --- a/pkg/trace/pb/decoder_v05_test.go +++ b/pkg/proto/pbgo/trace/decoder_v05_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "testing" diff --git a/pkg/trace/pb/hook.go b/pkg/proto/pbgo/trace/hook.go similarity index 98% rename from pkg/trace/pb/hook.go rename to pkg/proto/pbgo/trace/hook.go index ef10f015a1b19..969f3daa9b8db 100644 --- a/pkg/trace/pb/hook.go +++ b/pkg/proto/pbgo/trace/hook.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "sync" diff --git a/pkg/trace/pb/hook_test.go b/pkg/proto/pbgo/trace/hook_test.go similarity index 87% rename from pkg/trace/pb/hook_test.go rename to pkg/proto/pbgo/trace/hook_test.go index d32a859788b26..904bd188093fe 100644 --- a/pkg/trace/pb/hook_test.go +++ b/pkg/proto/pbgo/trace/hook_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "testing" @@ -39,6 +39,6 @@ func TestMetaHook(t *testing.T) { assert := assert.New(t) assert.Nil(err) - assert.Equal(map[string]string{"card.number": "test"}, s.Meta, "Warning! pkg/trace/pb: MetaHook was not applied. One possible cause is regenerating the code in this folder without porting custom modifications of it.") + assert.Equal(map[string]string{"card.number": "test"}, s.Meta, "Warning! pkg/proto/pbgo/trace: MetaHook was not applied. One possible cause is regenerating the code in this folder without porting custom modifications of it.") }) } diff --git a/pkg/proto/pbgo/trace/span.pb.go b/pkg/proto/pbgo/trace/span.pb.go new file mode 100644 index 0000000000000..180ed68776bd4 --- /dev/null +++ b/pkg/proto/pbgo/trace/span.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.23.2 +// source: datadog/trace/span.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // service is the name of the service with which this span is associated. + // @gotags: json:"service" msg:"service" + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` + // name is the operation name of this span. + // @gotags: json:"name" msg:"name" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` + // resource is the resource name of this span, also sometimes called the endpoint (for web spans). + // @gotags: json:"resource" msg:"resource" + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` + // traceID is the ID of the trace to which this span belongs. + // @gotags: json:"trace_id" msg:"trace_id" + TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` + // spanID is the ID of this span. + // @gotags: json:"span_id" msg:"span_id" + SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` + // parentID is the ID of this span's parent, or zero if this span has no parent. + // @gotags: json:"parent_id" msg:"parent_id" + ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` + // start is the number of nanoseconds between the Unix epoch and the beginning of this span. + // @gotags: json:"start" msg:"start" + Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` + // duration is the time length of this span in nanoseconds. + // @gotags: json:"duration" msg:"duration" + Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` + // error is 1 if there is an error associated with this span, or 0 if there is not. + // @gotags: json:"error" msg:"error" + Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` + // meta is a mapping from tag name to tag value for string-valued tags. + // @gotags: json:"meta,omitempty" msg:"meta,omitempty" + Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta,omitempty"` + // metrics is a mapping from tag name to tag value for numeric-valued tags. + // @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" + Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics,omitempty"` + // type is the type of the service with which this span is associated. Example values: web, db, lambda. + // @gotags: json:"type" msg:"type" + Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` + // meta_struct is a registry of structured "other" data used by, e.g., AppSec. + // @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty" + MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_span_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{0} +} + +func (x *Span) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *Span) GetTraceID() uint64 { + if x != nil { + return x.TraceID + } + return 0 +} + +func (x *Span) GetSpanID() uint64 { + if x != nil { + return x.SpanID + } + return 0 +} + +func (x *Span) GetParentID() uint64 { + if x != nil { + return x.ParentID + } + return 0 +} + +func (x *Span) GetStart() int64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Span) GetDuration() int64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *Span) GetError() int32 { + if x != nil { + return x.Error + } + return 0 +} + +func (x *Span) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +func (x *Span) GetMetrics() map[string]float64 { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *Span) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Span) GetMetaStruct() map[string][]byte { + if x != nil { + return x.MetaStruct + } + return nil +} + +var File_datadog_trace_span_proto protoreflect.FileDescriptor + +var file_datadog_trace_span_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x04, 0x53, 0x70, + 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x1a, 0x37, 0x0a, 0x09, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, + 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_span_proto_rawDescOnce sync.Once + file_datadog_trace_span_proto_rawDescData = file_datadog_trace_span_proto_rawDesc +) + +func file_datadog_trace_span_proto_rawDescGZIP() []byte { + file_datadog_trace_span_proto_rawDescOnce.Do(func() { + file_datadog_trace_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_span_proto_rawDescData) + }) + return file_datadog_trace_span_proto_rawDescData +} + +var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_span_proto_goTypes = []interface{}{ + (*Span)(nil), // 0: datadog.trace.Span + nil, // 1: datadog.trace.Span.MetaEntry + nil, // 2: datadog.trace.Span.MetricsEntry + nil, // 3: datadog.trace.Span.MetaStructEntry +} +var file_datadog_trace_span_proto_depIdxs = []int32{ + 1, // 0: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry + 2, // 1: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry + 3, // 2: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_datadog_trace_span_proto_init() } +func file_datadog_trace_span_proto_init() { + if File_datadog_trace_span_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_datadog_trace_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_span_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_span_proto_goTypes, + DependencyIndexes: file_datadog_trace_span_proto_depIdxs, + MessageInfos: file_datadog_trace_span_proto_msgTypes, + }.Build() + File_datadog_trace_span_proto = out.File + file_datadog_trace_span_proto_rawDesc = nil + file_datadog_trace_span_proto_goTypes = nil + file_datadog_trace_span_proto_depIdxs = nil +} diff --git a/pkg/trace/pb/span_gen.go b/pkg/proto/pbgo/trace/span_gen.go similarity index 71% rename from pkg/trace/pb/span_gen.go rename to pkg/proto/pbgo/trace/span_gen.go index 95060115353a2..9789822a9b422 100644 --- a/pkg/trace/pb/span_gen.go +++ b/pkg/proto/pbgo/trace/span_gen.go @@ -1,24 +1,36 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. +package trace -package pb +// Code generated by github.com/tinylib/msgp DO NOT EDIT. import ( "github.com/tinylib/msgp/msgp" ) -// This file is based on the code generated by tinylib/msgp but has been edited -// to add some features. If this needs to ge regenerated, be sure to port all -// the changes. - // MarshalMsg implements msgp.Marshaler func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 13 + // omitempty: check for empty values + zb0001Len := uint32(13) + var zb0001Mask uint16 /* 13 bits */ + if z.Meta == nil { + zb0001Len-- + zb0001Mask |= 0x200 + } + if z.Metrics == nil { + zb0001Len-- + zb0001Mask |= 0x400 + } + if z.MetaStruct == nil { + zb0001Len-- + zb0001Mask |= 0x1000 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } // string "service" - o = append(o, 0x8d, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + o = append(o, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) o = msgp.AppendString(o, z.Service) // string "name" o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65) @@ -44,30 +56,35 @@ func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { // string "error" o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72) o = msgp.AppendInt32(o, z.Error) - // string "meta" - o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61) - o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) - for za0001, za0002 := range z.Meta { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) + if (zb0001Mask & 0x200) == 0 { // if not empty + // string "meta" + o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61) + o = msgp.AppendMapHeader(o, uint32(len(z.Meta))) + for za0001, za0002 := range z.Meta { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } } - // string "metrics" - o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Metrics))) - for za0003, za0004 := range z.Metrics { - o = msgp.AppendString(o, za0003) - o = msgp.AppendFloat64(o, za0004) + if (zb0001Mask & 0x400) == 0 { // if not empty + // string "metrics" + o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Metrics))) + for za0003, za0004 := range z.Metrics { + o = msgp.AppendString(o, za0003) + o = msgp.AppendFloat64(o, za0004) + } } // string "type" o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) o = msgp.AppendString(o, z.Type) - - // string "meta_struct" - o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) - o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct))) - for za0005, za0006 := range z.MetaStruct { - o = msgp.AppendString(o, za0005) - o = msgp.AppendBytes(o, za0006) + if (zb0001Mask & 0x1000) == 0 { // if not empty + // string "meta_struct" + o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct))) + for za0005, za0006 := range z.MetaStruct { + o = msgp.AppendString(o, za0005) + o = msgp.AppendBytes(o, za0006) + } } return } @@ -92,54 +109,99 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "service": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Service = "" + break + } z.Service, bts, err = parseStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Service") return } case "name": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Name = "" + break + } z.Name, bts, err = parseStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Service") return } case "resource": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Resource = "" + break + } z.Resource, bts, err = parseStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Service") return } case "trace_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.TraceID = 0 + break + } z.TraceID, bts, err = parseUint64Bytes(bts) if err != nil { err = msgp.WrapError(err, "TraceID") return } case "span_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.SpanID = 0 + break + } z.SpanID, bts, err = parseUint64Bytes(bts) if err != nil { err = msgp.WrapError(err, "SpanID") return } case "parent_id": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.ParentID = 0 + break + } z.ParentID, bts, err = parseUint64Bytes(bts) if err != nil { err = msgp.WrapError(err, "ParentID") return } case "start": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Start = 0 + break + } z.Start, bts, err = parseInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "Start") return } case "duration": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Duration = 0 + break + } z.Duration, bts, err = parseInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "Duration") return } case "error": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Error = 0 + break + } z.Error, bts, err = parseInt32Bytes(bts) if err != nil { err = msgp.WrapError(err, "Error") @@ -196,7 +258,7 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Metrics") return } - if z.Metrics == nil && zb0003 > 0 { + if z.Metrics == nil && zb0003 > 0{ z.Metrics = make(map[string]float64, zb0003) } else if len(z.Metrics) > 0 { for key := range z.Metrics { @@ -220,24 +282,24 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { z.Metrics[za0003] = za0004 } case "type": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + z.Type = "" + break + } z.Type, bts, err = parseStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Type") return } case "meta_struct": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - z.MetaStruct = nil - break - } var zb0004 uint32 zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "MetaStruct") return } - if z.MetaStruct == nil && zb0004 > 0 { + if z.MetaStruct == nil { z.MetaStruct = make(map[string][]byte, zb0004) } else if len(z.MetaStruct) > 0 { for key := range z.MetaStruct { @@ -248,14 +310,14 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { var za0005 string var za0006 []byte zb0004-- - za0005, bts, err = parseStringBytes(bts) + za0005, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "MetaStruct") return } - za0006, bts, err = parseBytes(bts) - if za0006 != nil && err != nil { - err = msgp.WrapError(err, "MetaStruct", za0006) + za0006, bts, err = msgp.ReadBytesBytes(bts, za0006) + if err != nil { + err = msgp.WrapError(err, "MetaStruct", za0005) return } z.MetaStruct[za0005] = za0006 diff --git a/pkg/trace/pb/span_gen_modifs_test.go b/pkg/proto/pbgo/trace/span_gen_modifs_test.go similarity index 98% rename from pkg/trace/pb/span_gen_modifs_test.go rename to pkg/proto/pbgo/trace/span_gen_modifs_test.go index 7bda76cd54c26..441e83e4cb401 100644 --- a/pkg/trace/pb/span_gen_modifs_test.go +++ b/pkg/proto/pbgo/trace/span_gen_modifs_test.go @@ -3,9 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( + fmt "fmt" "testing" "github.com/stretchr/testify/assert" @@ -18,6 +19,9 @@ import ( func decodeBytes(bts []byte) (*Span, error) { var s Span _, err := s.UnmarshalMsg(bts) + if err != nil { + fmt.Printf("cause: %v\n", msgp.Cause(err)) + } return &s, err } @@ -207,7 +211,8 @@ func TestMetaMapDeserialization(t *testing.T) { b = msgp.AppendString(b, "meta") b = msgp.AppendMapHeader(b, 1) b = msgp.AppendString(b, "key") - b = msgp.AppendString(b, "op\x99\xbf") + // b = msgp.AppendString(b, "op\x99\xbf") + b = msgp.AppendString(b, "op��") s, err := decodeBytes(b) assert.Nil(t, err) assert.Equal(t, map[string]string{"key": "op��"}, s.Meta) diff --git a/pkg/trace/pb/span_gen_test.go b/pkg/proto/pbgo/trace/span_gen_test.go similarity index 82% rename from pkg/trace/pb/span_gen_test.go rename to pkg/proto/pbgo/trace/span_gen_test.go index 165abc22b45f4..d3b8488315c55 100644 --- a/pkg/trace/pb/span_gen_test.go +++ b/pkg/proto/pbgo/trace/span_gen_test.go @@ -1,9 +1,4 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. diff --git a/pkg/trace/pb/span_utils.go b/pkg/proto/pbgo/trace/span_utils.go similarity index 99% rename from pkg/trace/pb/span_utils.go rename to pkg/proto/pbgo/trace/span_utils.go index e8da5dea22508..06a1524a2a292 100644 --- a/pkg/trace/pb/span_utils.go +++ b/pkg/proto/pbgo/trace/span_utils.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace // spanCopiedFields records the fields that are copied in ShallowCopy. // This should match exactly the fields set in (*Span).ShallowCopy. diff --git a/pkg/trace/pb/span_vtproto.pb.go b/pkg/proto/pbgo/trace/span_vtproto.pb.go similarity index 99% rename from pkg/trace/pb/span_vtproto.pb.go rename to pkg/proto/pbgo/trace/span_vtproto.pb.go index 9614470ba845c..d313cb30aabbb 100644 --- a/pkg/trace/pb/span_vtproto.pb.go +++ b/pkg/proto/pbgo/trace/span_vtproto.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.4.0 -// source: span.proto +// source: datadog/trace/span.proto -package pb +package trace import ( binary "encoding/binary" diff --git a/pkg/proto/pbgo/trace/stats.pb.go b/pkg/proto/pbgo/trace/stats.pb.go new file mode 100644 index 0000000000000..26c8b666c44ca --- /dev/null +++ b/pkg/proto/pbgo/trace/stats.pb.go @@ -0,0 +1,677 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.23.2 +// source: datadog/trace/stats.proto + +package trace + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StatsPayload is the payload used to send stats from the agent to the backend. +type StatsPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` + AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + Stats []*ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"stats,omitempty"` + AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` +} + +func (x *StatsPayload) Reset() { + *x = StatsPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsPayload) ProtoMessage() {} + +func (x *StatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsPayload.ProtoReflect.Descriptor instead. +func (*StatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *StatsPayload) GetAgentHostname() string { + if x != nil { + return x.AgentHostname + } + return "" +} + +func (x *StatsPayload) GetAgentEnv() string { + if x != nil { + return x.AgentEnv + } + return "" +} + +func (x *StatsPayload) GetStats() []*ClientStatsPayload { + if x != nil { + return x.Stats + } + return nil +} + +func (x *StatsPayload) GetAgentVersion() string { + if x != nil { + return x.AgentVersion + } + return "" +} + +func (x *StatsPayload) GetClientComputed() bool { + if x != nil { + return x.ClientComputed + } + return false +} + +// ClientStatsPayload is the first layer of span stats aggregation. It is also +// the payload sent by tracers to the agent when stats in tracer are enabled. +type ClientStatsPayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta + // or set by tracer stats payload when hostname reporting is enabled. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` // env tag set on spans or in the tracers, used for aggregation + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // version tag set on spans or in the tracers, used for aggregation + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + Stats []*ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats,omitempty" msg:"stats,omitempty"` + Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` // informative field not used for aggregation + TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` // informative field not used for aggregation + RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message + // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer + // characterizes counts only and distributions only payloads + AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"` + // Service is the main service of the tracer. + // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging + Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"` + // ContainerID specifies the origin container ID. It is meant to be populated by the client and may + // be enhanced by the agent to ensure it is unique. + ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"` + // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. + // This field should be left empty by the client. It only applies to some specific environment. + Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` +} + +func (x *ClientStatsPayload) Reset() { + *x = ClientStatsPayload{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsPayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsPayload) ProtoMessage() {} + +func (x *ClientStatsPayload) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsPayload.ProtoReflect.Descriptor instead. +func (*ClientStatsPayload) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientStatsPayload) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *ClientStatsPayload) GetEnv() string { + if x != nil { + return x.Env + } + return "" +} + +func (x *ClientStatsPayload) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientStatsPayload) GetStats() []*ClientStatsBucket { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsPayload) GetLang() string { + if x != nil { + return x.Lang + } + return "" +} + +func (x *ClientStatsPayload) GetTracerVersion() string { + if x != nil { + return x.TracerVersion + } + return "" +} + +func (x *ClientStatsPayload) GetRuntimeID() string { + if x != nil { + return x.RuntimeID + } + return "" +} + +func (x *ClientStatsPayload) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *ClientStatsPayload) GetAgentAggregation() string { + if x != nil { + return x.AgentAggregation + } + return "" +} + +func (x *ClientStatsPayload) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientStatsPayload) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *ClientStatsPayload) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +// ClientStatsBucket is a time bucket containing aggregated stats. +type ClientStatsBucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds + Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds + // @gotags: json:"stats,omitempty" msg:"stats,omitempty" + Stats []*ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"stats,omitempty"` + // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start + // when the received bucket start is outside of the agent aggregation window + AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` +} + +func (x *ClientStatsBucket) Reset() { + *x = ClientStatsBucket{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientStatsBucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientStatsBucket) ProtoMessage() {} + +func (x *ClientStatsBucket) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientStatsBucket.ProtoReflect.Descriptor instead. +func (*ClientStatsBucket) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *ClientStatsBucket) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ClientStatsBucket) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientStatsBucket) GetStats() []*ClientGroupedStats { + if x != nil { + return x.Stats + } + return nil +} + +func (x *ClientStatsBucket) GetAgentTimeShift() int64 { + if x != nil { + return x.AgentTimeShift + } + return 0 +} + +// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type +type ClientGroupedStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step + Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats + Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats + Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket + OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf + ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf + Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic + TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats + PeerService string `protobuf:"bytes,14,opt,name=peer_service,json=peerService,proto3" json:"peer_service,omitempty"` // name of the remote service that the `service` communicated with + SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span +} + +func (x *ClientGroupedStats) Reset() { + *x = ClientGroupedStats{} + if protoimpl.UnsafeEnabled { + mi := &file_datadog_trace_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientGroupedStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientGroupedStats) ProtoMessage() {} + +func (x *ClientGroupedStats) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_stats_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientGroupedStats.ProtoReflect.Descriptor instead. +func (*ClientGroupedStats) Descriptor() ([]byte, []int) { + return file_datadog_trace_stats_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientGroupedStats) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ClientGroupedStats) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ClientGroupedStats) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +func (x *ClientGroupedStats) GetHTTPStatusCode() uint32 { + if x != nil { + return x.HTTPStatusCode + } + return 0 +} + +func (x *ClientGroupedStats) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ClientGroupedStats) GetDBType() string { + if x != nil { + return x.DBType + } + return "" +} + +func (x *ClientGroupedStats) GetHits() uint64 { + if x != nil { + return x.Hits + } + return 0 +} + +func (x *ClientGroupedStats) GetErrors() uint64 { + if x != nil { + return x.Errors + } + return 0 +} + +func (x *ClientGroupedStats) GetDuration() uint64 { + if x != nil { + return x.Duration + } + return 0 +} + +func (x *ClientGroupedStats) GetOkSummary() []byte { + if x != nil { + return x.OkSummary + } + return nil +} + +func (x *ClientGroupedStats) GetErrorSummary() []byte { + if x != nil { + return x.ErrorSummary + } + return nil +} + +func (x *ClientGroupedStats) GetSynthetics() bool { + if x != nil { + return x.Synthetics + } + return false +} + +func (x *ClientGroupedStats) GetTopLevelHits() uint64 { + if x != nil { + return x.TopLevelHits + } + return 0 +} + +func (x *ClientGroupedStats) GetPeerService() string { + if x != nil { + return x.PeerService + } + return "" +} + +func (x *ClientGroupedStats) GetSpanKind() string { + if x != nil { + return x.SpanKind + } + return "" +} + +var File_datadog_trace_stats_proto protoreflect.FileDescriptor + +var file_datadog_trace_stats_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x0c, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x12, 0x37, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, + 0x65, 0x64, 0x22, 0x84, 0x03, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x12, 0x24, 0x0a, + 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, + 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2a, 0x0a, + 0x10, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0c, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0xa6, 0x01, 0x0a, 0x11, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, + 0x66, 0x74, 0x22, 0xc3, 0x03, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x48, 0x54, 0x54, 0x50, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x48, + 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x07, 0x44, 0x42, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x44, 0x42, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x69, 0x74, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x6b, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x48, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x70, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x70, 0x61, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_datadog_trace_stats_proto_rawDescOnce sync.Once + file_datadog_trace_stats_proto_rawDescData = file_datadog_trace_stats_proto_rawDesc +) + +func file_datadog_trace_stats_proto_rawDescGZIP() []byte { + file_datadog_trace_stats_proto_rawDescOnce.Do(func() { + file_datadog_trace_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_stats_proto_rawDescData) + }) + return file_datadog_trace_stats_proto_rawDescData +} + +var file_datadog_trace_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_stats_proto_goTypes = []interface{}{ + (*StatsPayload)(nil), // 0: datadog.trace.StatsPayload + (*ClientStatsPayload)(nil), // 1: datadog.trace.ClientStatsPayload + (*ClientStatsBucket)(nil), // 2: datadog.trace.ClientStatsBucket + (*ClientGroupedStats)(nil), // 3: datadog.trace.ClientGroupedStats +} +var file_datadog_trace_stats_proto_depIdxs = []int32{ + 1, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload + 2, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket + 3, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_datadog_trace_stats_proto_init() } +func file_datadog_trace_stats_proto_init() { + if File_datadog_trace_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_datadog_trace_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatsPayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientStatsBucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datadog_trace_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientGroupedStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_datadog_trace_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_datadog_trace_stats_proto_goTypes, + DependencyIndexes: file_datadog_trace_stats_proto_depIdxs, + MessageInfos: file_datadog_trace_stats_proto_msgTypes, + }.Build() + File_datadog_trace_stats_proto = out.File + file_datadog_trace_stats_proto_rawDesc = nil + file_datadog_trace_stats_proto_goTypes = nil + file_datadog_trace_stats_proto_depIdxs = nil +} diff --git a/pkg/trace/pb/stats_gen.go b/pkg/proto/pbgo/trace/stats_gen.go similarity index 81% rename from pkg/trace/pb/stats_gen.go rename to pkg/proto/pbgo/trace/stats_gen.go index 451b3a9bf8fd8..0aa3fa9b5dbe3 100644 --- a/pkg/trace/pb/stats_gen.go +++ b/pkg/proto/pbgo/trace/stats_gen.go @@ -1,11 +1,8 @@ -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. -// Command to generate: msgp -file pkg/trace/pb/stats.pb.go -o pkg/trace/pb/stats_gen.go -// Please remember to add this comment back after re-generation! import ( - _ "github.com/gogo/protobuf/gogoproto" "github.com/tinylib/msgp/msgp" ) @@ -492,7 +489,7 @@ func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Duration") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { @@ -502,13 +499,25 @@ func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientGroupedStats, zb0002) + z.Stats = make([]*ClientGroupedStats, zb0002) } for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "AgentTimeShift": @@ -530,9 +539,23 @@ func (z *ClientStatsBucket) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } // write "Start" - err = en.Append(0x84, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) if err != nil { return } @@ -551,22 +574,31 @@ func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Duration") return } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "stats" + err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) if err != nil { - err = msgp.WrapError(err, "Stats", za0001) + err = msgp.WrapError(err, "Stats") return } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } } // write "AgentTimeShift" err = en.Append(0xae, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74) @@ -584,21 +616,38 @@ func (z *ClientStatsBucket) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *ClientStatsBucket) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 4 + // omitempty: check for empty values + zb0001Len := uint32(4) + var zb0001Mask uint8 /* 4 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } // string "Start" - o = append(o, 0x84, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) + o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x72, 0x74) o = msgp.AppendUint64(o, z.Start) // string "Duration" o = append(o, 0xa8, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e) o = msgp.AppendUint64(o, z.Duration) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "stats" + o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } } } // string "AgentTimeShift" @@ -637,7 +686,7 @@ func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Duration") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { @@ -647,13 +696,24 @@ func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientGroupedStats, zb0002) + z.Stats = make([]*ClientGroupedStats, zb0002) } for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientGroupedStats) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "AgentTimeShift": @@ -678,7 +738,11 @@ func (z *ClientStatsBucket) UnmarshalMsg(bts []byte) (o []byte, err error) { func (z *ClientStatsBucket) Msgsize() (s int) { s = 1 + 6 + msgp.Uint64Size + 9 + msgp.Uint64Size + 6 + msgp.ArrayHeaderSize for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } } s += 15 + msgp.Int64Size return @@ -720,7 +784,7 @@ func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Version") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { @@ -730,13 +794,25 @@ func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientStatsBucket, zb0002) + z.Stats = make([]*ClientStatsBucket, zb0002) } for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "Lang": @@ -813,9 +889,23 @@ func (z *ClientStatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 12 + // omitempty: check for empty values + zb0001Len := uint32(12) + var zb0001Mask uint16 /* 12 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } // write "Hostname" - err = en.Append(0x8c, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + err = en.Append(0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -844,22 +934,31 @@ func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Version") return } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) + if (zb0001Mask & 0x8) == 0 { // if not empty + // write "stats" + err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Stats))) if err != nil { - err = msgp.WrapError(err, "Stats", za0001) + err = msgp.WrapError(err, "Stats") return } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } } // write "Lang" err = en.Append(0xa4, 0x4c, 0x61, 0x6e, 0x67) @@ -954,9 +1053,20 @@ func (z *ClientStatsPayload) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *ClientStatsPayload) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 12 + // omitempty: check for empty values + zb0001Len := uint32(12) + var zb0001Mask uint16 /* 12 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x8 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } // string "Hostname" - o = append(o, 0x8c, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = append(o, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Hostname) // string "Env" o = append(o, 0xa3, 0x45, 0x6e, 0x76) @@ -964,14 +1074,20 @@ func (z *ClientStatsPayload) MarshalMsg(b []byte) (o []byte, err error) { // string "Version" o = append(o, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) o = msgp.AppendString(o, z.Version) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "stats" + o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } } } // string "Lang" @@ -1040,7 +1156,7 @@ func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Version") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { @@ -1050,13 +1166,24 @@ func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientStatsBucket, zb0002) + z.Stats = make([]*ClientStatsBucket, zb0002) } for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsBucket) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "Lang": @@ -1136,7 +1263,11 @@ func (z *ClientStatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { func (z *ClientStatsPayload) Msgsize() (s int) { s = 1 + 9 + msgp.StringPrefixSize + len(z.Hostname) + 4 + msgp.StringPrefixSize + len(z.Env) + 8 + msgp.StringPrefixSize + len(z.Version) + 6 + msgp.ArrayHeaderSize for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } } s += 5 + msgp.StringPrefixSize + len(z.Lang) + 14 + msgp.StringPrefixSize + len(z.TracerVersion) + 10 + msgp.StringPrefixSize + len(z.RuntimeID) + 9 + msgp.Uint64Size + 17 + msgp.StringPrefixSize + len(z.AgentAggregation) + 8 + msgp.StringPrefixSize + len(z.Service) + 12 + msgp.StringPrefixSize + len(z.ContainerID) + 5 + msgp.ArrayHeaderSize for za0002 := range z.Tags { @@ -1175,7 +1306,7 @@ func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "AgentEnv") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { @@ -1185,13 +1316,25 @@ func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientStatsPayload, zb0002) + z.Stats = make([]*ClientStatsPayload, zb0002) } for za0001 := range z.Stats { - err = z.Stats[za0001].DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + err = z.Stats[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "AgentVersion": @@ -1219,9 +1362,23 @@ func (z *StatsPayload) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 5 + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + if zb0001Len == 0 { + return + } // write "AgentHostname" - err = en.Append(0x85, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + err = en.Append(0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -1240,22 +1397,31 @@ func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "AgentEnv") return } - // write "Stats" - err = en.Append(0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.Stats))) - if err != nil { - err = msgp.WrapError(err, "Stats") - return - } - for za0001 := range z.Stats { - err = z.Stats[za0001].EncodeMsg(en) + if (zb0001Mask & 0x4) == 0 { // if not empty + // write "stats" + err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) if err != nil { - err = msgp.WrapError(err, "Stats", za0001) return } + err = en.WriteArrayHeader(uint32(len(z.Stats))) + if err != nil { + err = msgp.WrapError(err, "Stats") + return + } + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Stats[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } + } } // write "AgentVersion" err = en.Append(0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) @@ -1283,21 +1449,38 @@ func (z *StatsPayload) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *StatsPayload) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 5 + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + if z.Stats == nil { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len == 0 { + return + } // string "AgentHostname" - o = append(o, 0x85, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) + o = append(o, 0xad, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.AgentHostname) // string "AgentEnv" o = append(o, 0xa8, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76) o = msgp.AppendString(o, z.AgentEnv) - // string "Stats" - o = append(o, 0xa5, 0x53, 0x74, 0x61, 0x74, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) - for za0001 := range z.Stats { - o, err = z.Stats[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "stats" + o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Stats))) + for za0001 := range z.Stats { + if z.Stats[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Stats[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } + } } } // string "AgentVersion" @@ -1339,7 +1522,7 @@ func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "AgentEnv") return } - case "Stats": + case "stats": var zb0002 uint32 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { @@ -1349,13 +1532,24 @@ func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { if cap(z.Stats) >= int(zb0002) { z.Stats = (z.Stats)[:zb0002] } else { - z.Stats = make([]ClientStatsPayload, zb0002) + z.Stats = make([]*ClientStatsPayload, zb0002) } for za0001 := range z.Stats { - bts, err = z.Stats[za0001].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Stats", za0001) - return + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Stats[za0001] = nil + } else { + if z.Stats[za0001] == nil { + z.Stats[za0001] = new(ClientStatsPayload) + } + bts, err = z.Stats[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Stats", za0001) + return + } } } case "AgentVersion": @@ -1386,7 +1580,11 @@ func (z *StatsPayload) UnmarshalMsg(bts []byte) (o []byte, err error) { func (z *StatsPayload) Msgsize() (s int) { s = 1 + 14 + msgp.StringPrefixSize + len(z.AgentHostname) + 9 + msgp.StringPrefixSize + len(z.AgentEnv) + 6 + msgp.ArrayHeaderSize for za0001 := range z.Stats { - s += z.Stats[za0001].Msgsize() + if z.Stats[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Stats[za0001].Msgsize() + } } s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 15 + msgp.BoolSize return diff --git a/pkg/trace/pb/stats_gen_test.go b/pkg/proto/pbgo/trace/stats_gen_test.go similarity index 99% rename from pkg/trace/pb/stats_gen_test.go rename to pkg/proto/pbgo/trace/stats_gen_test.go index b52383b3bb278..3df4463284a75 100644 --- a/pkg/trace/pb/stats_gen_test.go +++ b/pkg/proto/pbgo/trace/stats_gen_test.go @@ -1,4 +1,4 @@ -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. diff --git a/pkg/proto/pbgo/trace/stats_vtproto.pb.go b/pkg/proto/pbgo/trace/stats_vtproto.pb.go new file mode 100644 index 0000000000000..d12a6f4eba119 --- /dev/null +++ b/pkg/proto/pbgo/trace/stats_vtproto.pb.go @@ -0,0 +1,1814 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: datadog/trace/stats.proto + +package trace + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *StatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ClientComputed { + i-- + if m.ClientComputed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.AgentVersion) > 0 { + i -= len(m.AgentVersion) + copy(dAtA[i:], m.AgentVersion) + i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AgentEnv) > 0 { + i -= len(m.AgentEnv) + copy(dAtA[i:], m.AgentEnv) + i = encodeVarint(dAtA, i, uint64(len(m.AgentEnv))) + i-- + dAtA[i] = 0x12 + } + if len(m.AgentHostname) > 0 { + i -= len(m.AgentHostname) + copy(dAtA[i:], m.AgentHostname) + i = encodeVarint(dAtA, i, uint64(len(m.AgentHostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsPayload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsPayload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x5a + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x52 + } + if len(m.AgentAggregation) > 0 { + i -= len(m.AgentAggregation) + copy(dAtA[i:], m.AgentAggregation) + i = encodeVarint(dAtA, i, uint64(len(m.AgentAggregation))) + i-- + dAtA[i] = 0x4a + } + if m.Sequence != 0 { + i = encodeVarint(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x40 + } + if len(m.RuntimeID) > 0 { + i -= len(m.RuntimeID) + copy(dAtA[i:], m.RuntimeID) + i = encodeVarint(dAtA, i, uint64(len(m.RuntimeID))) + i-- + dAtA[i] = 0x3a + } + if len(m.TracerVersion) > 0 { + i -= len(m.TracerVersion) + copy(dAtA[i:], m.TracerVersion) + i = encodeVarint(dAtA, i, uint64(len(m.TracerVersion))) + i-- + dAtA[i] = 0x32 + } + if len(m.Lang) > 0 { + i -= len(m.Lang) + copy(dAtA[i:], m.Lang) + i = encodeVarint(dAtA, i, uint64(len(m.Lang))) + i-- + dAtA[i] = 0x2a + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.Env) > 0 { + i -= len(m.Env) + copy(dAtA[i:], m.Env) + i = encodeVarint(dAtA, i, uint64(len(m.Env))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarint(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStatsBucket) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStatsBucket) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientStatsBucket) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AgentTimeShift != 0 { + i = encodeVarint(dAtA, i, uint64(m.AgentTimeShift)) + i-- + dAtA[i] = 0x20 + } + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Stats[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarint(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ClientGroupedStats) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientGroupedStats) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ClientGroupedStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SpanKind) > 0 { + i -= len(m.SpanKind) + copy(dAtA[i:], m.SpanKind) + i = encodeVarint(dAtA, i, uint64(len(m.SpanKind))) + i-- + dAtA[i] = 0x7a + } + if len(m.PeerService) > 0 { + i -= len(m.PeerService) + copy(dAtA[i:], m.PeerService) + i = encodeVarint(dAtA, i, uint64(len(m.PeerService))) + i-- + dAtA[i] = 0x72 + } + if m.TopLevelHits != 0 { + i = encodeVarint(dAtA, i, uint64(m.TopLevelHits)) + i-- + dAtA[i] = 0x68 + } + if m.Synthetics { + i-- + if m.Synthetics { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.ErrorSummary) > 0 { + i -= len(m.ErrorSummary) + copy(dAtA[i:], m.ErrorSummary) + i = encodeVarint(dAtA, i, uint64(len(m.ErrorSummary))) + i-- + dAtA[i] = 0x5a + } + if len(m.OkSummary) > 0 { + i -= len(m.OkSummary) + copy(dAtA[i:], m.OkSummary) + i = encodeVarint(dAtA, i, uint64(len(m.OkSummary))) + i-- + dAtA[i] = 0x52 + } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x48 + } + if m.Errors != 0 { + i = encodeVarint(dAtA, i, uint64(m.Errors)) + i-- + dAtA[i] = 0x40 + } + if m.Hits != 0 { + i = encodeVarint(dAtA, i, uint64(m.Hits)) + i-- + dAtA[i] = 0x38 + } + if len(m.DBType) > 0 { + i -= len(m.DBType) + copy(dAtA[i:], m.DBType) + i = encodeVarint(dAtA, i, uint64(len(m.DBType))) + i-- + dAtA[i] = 0x32 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPStatusCode != 0 { + i = encodeVarint(dAtA, i, uint64(m.HTTPStatusCode)) + i-- + dAtA[i] = 0x20 + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarint(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AgentHostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AgentEnv) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.AgentVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.ClientComputed { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsPayload) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Env) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Lang) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TracerVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.RuntimeID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sov(uint64(m.Sequence)) + } + l = len(m.AgentAggregation) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ClientStatsBucket) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sov(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AgentTimeShift != 0 { + n += 1 + sov(uint64(m.AgentTimeShift)) + } + n += len(m.unknownFields) + return n +} + +func (m *ClientGroupedStats) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.HTTPStatusCode != 0 { + n += 1 + sov(uint64(m.HTTPStatusCode)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DBType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Hits != 0 { + n += 1 + sov(uint64(m.Hits)) + } + if m.Errors != 0 { + n += 1 + sov(uint64(m.Errors)) + } + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + l = len(m.OkSummary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ErrorSummary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Synthetics { + n += 2 + } + if m.TopLevelHits != 0 { + n += 1 + sov(uint64(m.TopLevelHits)) + } + l = len(m.PeerService) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SpanKind) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentEnv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentEnv = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsPayload{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientComputed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClientComputed = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsPayload) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsPayload: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientStatsBucket{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lang", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Lang = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TracerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentAggregation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentAggregation = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStatsBucket) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStatsBucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStatsBucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ClientGroupedStats{}) + if err := m.Stats[len(m.Stats)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentTimeShift", wireType) + } + m.AgentTimeShift = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AgentTimeShift |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientGroupedStats) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientGroupedStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientGroupedStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPStatusCode", wireType) + } + m.HTTPStatusCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HTTPStatusCode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DBType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DBType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hits", wireType) + } + m.Hits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + m.Errors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Errors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OkSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OkSummary = append(m.OkSummary[:0], dAtA[iNdEx:postIndex]...) + if m.OkSummary == nil { + m.OkSummary = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorSummary", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorSummary = append(m.ErrorSummary[:0], dAtA[iNdEx:postIndex]...) + if m.ErrorSummary == nil { + m.ErrorSummary = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Synthetics", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Synthetics = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TopLevelHits", wireType) + } + m.TopLevelHits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TopLevelHits |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/trace/pb/trace.go b/pkg/proto/pbgo/trace/trace.go similarity index 99% rename from pkg/trace/pb/trace.go rename to pkg/proto/pbgo/trace/trace.go index aa10697965d7d..184a5c6b60a45 100644 --- a/pkg/trace/pb/trace.go +++ b/pkg/proto/pbgo/trace/trace.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace //go:generate go run github.com/tinylib/msgp -file=span.pb.go -o span_gen.go -io=false //go:generate go run github.com/tinylib/msgp -file=tracer_payload.pb.go -o tracer_payload_gen.go -io=false diff --git a/pkg/trace/pb/trace_gen.go b/pkg/proto/pbgo/trace/trace_gen.go similarity index 93% rename from pkg/trace/pb/trace_gen.go rename to pkg/proto/pbgo/trace/trace_gen.go index 8f8833cfacf54..2a2865f3dca59 100644 --- a/pkg/trace/pb/trace_gen.go +++ b/pkg/proto/pbgo/trace/trace_gen.go @@ -1,9 +1,4 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. diff --git a/pkg/trace/pb/trace_gen_safe_test.go b/pkg/proto/pbgo/trace/trace_gen_safe_test.go similarity index 99% rename from pkg/trace/pb/trace_gen_safe_test.go rename to pkg/proto/pbgo/trace/trace_gen_safe_test.go index 5d73c9db5498c..0420fcd73ce8a 100644 --- a/pkg/trace/pb/trace_gen_safe_test.go +++ b/pkg/proto/pbgo/trace/trace_gen_safe_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "math/rand" diff --git a/pkg/trace/pb/trace_gen_test.go b/pkg/proto/pbgo/trace/trace_gen_test.go similarity index 90% rename from pkg/trace/pb/trace_gen_test.go rename to pkg/proto/pbgo/trace/trace_gen_test.go index 7342e91dc066b..e16c7210f79eb 100644 --- a/pkg/trace/pb/trace_gen_test.go +++ b/pkg/proto/pbgo/trace/trace_gen_test.go @@ -1,9 +1,4 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. diff --git a/pkg/trace/pb/trace_test.go b/pkg/proto/pbgo/trace/trace_test.go similarity index 99% rename from pkg/trace/pb/trace_test.go rename to pkg/proto/pbgo/trace/trace_test.go index 90e951ad40e46..e120e0e52c2ba 100644 --- a/pkg/trace/pb/trace_test.go +++ b/pkg/proto/pbgo/trace/trace_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( "testing" diff --git a/pkg/trace/pb/tracer_payload.pb.go b/pkg/proto/pbgo/trace/tracer_payload.pb.go similarity index 51% rename from pkg/trace/pb/tracer_payload.pb.go rename to pkg/proto/pbgo/trace/tracer_payload.pb.go index 2f3bc339e0beb..3986e1a665671 100644 --- a/pkg/trace/pb/tracer_payload.pb.go +++ b/pkg/proto/pbgo/trace/tracer_payload.pb.go @@ -1,10 +1,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: tracer_payload.proto +// protoc v4.23.2 +// source: datadog/trace/tracer_payload.proto -package pb +package trace import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -46,7 +46,7 @@ type TraceChunk struct { func (x *TraceChunk) Reset() { *x = TraceChunk{} if protoimpl.UnsafeEnabled { - mi := &file_tracer_payload_proto_msgTypes[0] + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -59,7 +59,7 @@ func (x *TraceChunk) String() string { func (*TraceChunk) ProtoMessage() {} func (x *TraceChunk) ProtoReflect() protoreflect.Message { - mi := &file_tracer_payload_proto_msgTypes[0] + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -72,7 +72,7 @@ func (x *TraceChunk) ProtoReflect() protoreflect.Message { // Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead. func (*TraceChunk) Descriptor() ([]byte, []int) { - return file_tracer_payload_proto_rawDescGZIP(), []int{0} + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{0} } func (x *TraceChunk) GetPriority() int32 { @@ -151,7 +151,7 @@ type TracerPayload struct { func (x *TracerPayload) Reset() { *x = TracerPayload{} if protoimpl.UnsafeEnabled { - mi := &file_tracer_payload_proto_msgTypes[1] + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -164,7 +164,7 @@ func (x *TracerPayload) String() string { func (*TracerPayload) ProtoMessage() {} func (x *TracerPayload) ProtoReflect() protoreflect.Message { - mi := &file_tracer_payload_proto_msgTypes[1] + mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -177,7 +177,7 @@ func (x *TracerPayload) ProtoReflect() protoreflect.Message { // Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead. func (*TracerPayload) Descriptor() ([]byte, []int) { - return file_tracer_payload_proto_rawDescGZIP(), []int{1} + return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{1} } func (x *TracerPayload) GetContainerID() string { @@ -250,84 +250,87 @@ func (x *TracerPayload) GetAppVersion() string { return "" } -var File_tracer_payload_proto protoreflect.FileDescriptor - -var file_tracer_payload_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x0a, 0x73, 0x70, 0x61, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x01, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, - 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x05, 0x73, 0x70, 0x61, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, - 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, - 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, - 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, - 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa3, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, - 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, - 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, +var File_datadog_trace_tracer_payload_proto protoreflect.FileDescriptor + +var file_datadog_trace_tracer_payload_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x1a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x02, + 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x12, 0x29, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, + 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70, + 0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xb9, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x6c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, + 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, + 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_tracer_payload_proto_rawDescOnce sync.Once - file_tracer_payload_proto_rawDescData = file_tracer_payload_proto_rawDesc + file_datadog_trace_tracer_payload_proto_rawDescOnce sync.Once + file_datadog_trace_tracer_payload_proto_rawDescData = file_datadog_trace_tracer_payload_proto_rawDesc ) -func file_tracer_payload_proto_rawDescGZIP() []byte { - file_tracer_payload_proto_rawDescOnce.Do(func() { - file_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_tracer_payload_proto_rawDescData) +func file_datadog_trace_tracer_payload_proto_rawDescGZIP() []byte { + file_datadog_trace_tracer_payload_proto_rawDescOnce.Do(func() { + file_datadog_trace_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_tracer_payload_proto_rawDescData) }) - return file_tracer_payload_proto_rawDescData + return file_datadog_trace_tracer_payload_proto_rawDescData } -var file_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_tracer_payload_proto_goTypes = []interface{}{ - (*TraceChunk)(nil), // 0: pb.TraceChunk - (*TracerPayload)(nil), // 1: pb.TracerPayload - nil, // 2: pb.TraceChunk.TagsEntry - nil, // 3: pb.TracerPayload.TagsEntry - (*Span)(nil), // 4: pb.Span +var file_datadog_trace_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_datadog_trace_tracer_payload_proto_goTypes = []interface{}{ + (*TraceChunk)(nil), // 0: datadog.trace.TraceChunk + (*TracerPayload)(nil), // 1: datadog.trace.TracerPayload + nil, // 2: datadog.trace.TraceChunk.TagsEntry + nil, // 3: datadog.trace.TracerPayload.TagsEntry + (*Span)(nil), // 4: datadog.trace.Span } -var file_tracer_payload_proto_depIdxs = []int32{ - 4, // 0: pb.TraceChunk.spans:type_name -> pb.Span - 2, // 1: pb.TraceChunk.tags:type_name -> pb.TraceChunk.TagsEntry - 0, // 2: pb.TracerPayload.chunks:type_name -> pb.TraceChunk - 3, // 3: pb.TracerPayload.tags:type_name -> pb.TracerPayload.TagsEntry +var file_datadog_trace_tracer_payload_proto_depIdxs = []int32{ + 4, // 0: datadog.trace.TraceChunk.spans:type_name -> datadog.trace.Span + 2, // 1: datadog.trace.TraceChunk.tags:type_name -> datadog.trace.TraceChunk.TagsEntry + 0, // 2: datadog.trace.TracerPayload.chunks:type_name -> datadog.trace.TraceChunk + 3, // 3: datadog.trace.TracerPayload.tags:type_name -> datadog.trace.TracerPayload.TagsEntry 4, // [4:4] is the sub-list for method output_type 4, // [4:4] is the sub-list for method input_type 4, // [4:4] is the sub-list for extension type_name @@ -335,14 +338,14 @@ var file_tracer_payload_proto_depIdxs = []int32{ 0, // [0:4] is the sub-list for field type_name } -func init() { file_tracer_payload_proto_init() } -func file_tracer_payload_proto_init() { - if File_tracer_payload_proto != nil { +func init() { file_datadog_trace_tracer_payload_proto_init() } +func file_datadog_trace_tracer_payload_proto_init() { + if File_datadog_trace_tracer_payload_proto != nil { return } - file_span_proto_init() + file_datadog_trace_span_proto_init() if !protoimpl.UnsafeEnabled { - file_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_datadog_trace_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TraceChunk); i { case 0: return &v.state @@ -354,7 +357,7 @@ func file_tracer_payload_proto_init() { return nil } } - file_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_datadog_trace_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TracerPayload); i { case 0: return &v.state @@ -371,18 +374,18 @@ func file_tracer_payload_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_tracer_payload_proto_rawDesc, + RawDescriptor: file_datadog_trace_tracer_payload_proto_rawDesc, NumEnums: 0, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_tracer_payload_proto_goTypes, - DependencyIndexes: file_tracer_payload_proto_depIdxs, - MessageInfos: file_tracer_payload_proto_msgTypes, + GoTypes: file_datadog_trace_tracer_payload_proto_goTypes, + DependencyIndexes: file_datadog_trace_tracer_payload_proto_depIdxs, + MessageInfos: file_datadog_trace_tracer_payload_proto_msgTypes, }.Build() - File_tracer_payload_proto = out.File - file_tracer_payload_proto_rawDesc = nil - file_tracer_payload_proto_goTypes = nil - file_tracer_payload_proto_depIdxs = nil + File_datadog_trace_tracer_payload_proto = out.File + file_datadog_trace_tracer_payload_proto_rawDesc = nil + file_datadog_trace_tracer_payload_proto_goTypes = nil + file_datadog_trace_tracer_payload_proto_depIdxs = nil } diff --git a/pkg/trace/pb/tracer_payload_gen.go b/pkg/proto/pbgo/trace/tracer_payload_gen.go similarity index 97% rename from pkg/trace/pb/tracer_payload_gen.go rename to pkg/proto/pbgo/trace/tracer_payload_gen.go index d45264b771768..cd2b3925038db 100644 --- a/pkg/trace/pb/tracer_payload_gen.go +++ b/pkg/proto/pbgo/trace/tracer_payload_gen.go @@ -1,14 +1,8 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. import ( - // _ "github.com/gogo/protobuf/gogoproto" "github.com/tinylib/msgp/msgp" ) diff --git a/pkg/trace/pb/tracer_payload_gen_test.go b/pkg/proto/pbgo/trace/tracer_payload_gen_test.go similarity index 90% rename from pkg/trace/pb/tracer_payload_gen_test.go rename to pkg/proto/pbgo/trace/tracer_payload_gen_test.go index 25db0dd5f6d3b..a8bf4ed42e927 100644 --- a/pkg/trace/pb/tracer_payload_gen_test.go +++ b/pkg/proto/pbgo/trace/tracer_payload_gen_test.go @@ -1,9 +1,4 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package pb +package trace // Code generated by github.com/tinylib/msgp DO NOT EDIT. diff --git a/pkg/trace/pb/tracer_payload_utils.go b/pkg/proto/pbgo/trace/tracer_payload_utils.go similarity index 98% rename from pkg/trace/pb/tracer_payload_utils.go rename to pkg/proto/pbgo/trace/tracer_payload_utils.go index 04b9d76f1d13b..9f7fabba2852a 100644 --- a/pkg/trace/pb/tracer_payload_utils.go +++ b/pkg/proto/pbgo/trace/tracer_payload_utils.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace // traceChunkCopiedFields records the fields that are copied in ShallowCopy. // This should match exactly the fields set in (*TraceChunk).ShallowCopy. diff --git a/pkg/trace/pb/tracer_payload_vtproto.pb.go b/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go similarity index 99% rename from pkg/trace/pb/tracer_payload_vtproto.pb.go rename to pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go index 6528e324b432e..b1544fa221ae2 100644 --- a/pkg/trace/pb/tracer_payload_vtproto.pb.go +++ b/pkg/proto/pbgo/trace/tracer_payload_vtproto.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.4.0 -// source: tracer_payload.proto +// source: datadog/trace/tracer_payload.proto -package pb +package trace import ( fmt "fmt" diff --git a/pkg/trace/pb/utils_test.go b/pkg/proto/pbgo/trace/utils_test.go similarity index 99% rename from pkg/trace/pb/utils_test.go rename to pkg/proto/pbgo/trace/utils_test.go index 43afe155a09f6..5aa6483bf0dbb 100644 --- a/pkg/trace/pb/utils_test.go +++ b/pkg/proto/pbgo/trace/utils_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package pb +package trace import ( fmt "fmt" diff --git a/pkg/security/events/self_tests.go b/pkg/security/events/self_tests.go deleted file mode 100644 index 23c8275c9d801..0000000000000 --- a/pkg/security/events/self_tests.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:generate go run github.com/mailru/easyjson/easyjson -gen_build_flags=-mod=mod -no_std_marshalers $GOFILE - -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package events - -import ( - "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/secl/rules" -) - -// SelfTestEvent is used to report a self test result -// easyjson:json -type SelfTestEvent struct { - CustomEventCommonFields - Success []string `json:"succeeded_tests"` - Fails []string `json:"failed_tests"` -} - -// NewSelfTestEvent returns the rule and the result of the self test -func NewSelfTestEvent(success []string, fails []string) (*rules.Rule, *CustomEvent) { - evt := SelfTestEvent{ - Success: success, - Fails: fails, - } - evt.FillCustomEventCommonFields() - - return NewCustomRule(SelfTestRuleID, SelfTestRuleDesc), - NewCustomEvent(model.CustomSelfTestEventType, evt) -} diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index 8546774f7fe4f..aadf3b65601b2 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -26,6 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" + "github.com/DataDog/datadog-agent/pkg/security/serializers" ) // CWSConsumer represents the system-probe module for the runtime security agent @@ -156,7 +157,7 @@ func (c *CWSConsumer) RunSelfTest(sendLoadedReport bool) (bool, error) { return false, err } - success, fails, err := c.selfTester.RunSelfTest() + success, fails, testEvents, err := c.selfTester.RunSelfTest() if err != nil { return true, err } @@ -165,14 +166,14 @@ func (c *CWSConsumer) RunSelfTest(sendLoadedReport bool) (bool, error) { // send the report if c.config.SelfTestSendReport { - ReportSelfTest(c.eventSender, c.statsdClient, success, fails) + ReportSelfTest(c.eventSender, c.statsdClient, success, fails, testEvents) } return true, nil } // ReportSelfTest reports to Datadog that a self test was performed -func ReportSelfTest(sender events.EventSender, statsdClient statsd.ClientInterface, success []string, fails []string) { +func ReportSelfTest(sender events.EventSender, statsdClient statsd.ClientInterface, success []string, fails []string, testEvents map[string]*serializers.EventSerializer) { // send metric with number of success and fails tags := []string{ fmt.Sprintf("success:%d", len(success)), @@ -183,7 +184,7 @@ func ReportSelfTest(sender events.EventSender, statsdClient statsd.ClientInterfa } // send the custom event with the list of succeed and failed self tests - rule, event := events.NewSelfTestEvent(success, fails) + rule, event := selftests.NewSelfTestEvent(success, fails, testEvents) sender.SendEvent(rule, event, nil, "") } diff --git a/pkg/security/probe/selftests/self_tests.go b/pkg/security/probe/selftests/self_tests.go new file mode 100644 index 0000000000000..e7cb52ff22efb --- /dev/null +++ b/pkg/security/probe/selftests/self_tests.go @@ -0,0 +1,39 @@ +//go:generate go run github.com/mailru/easyjson/easyjson -gen_build_flags=-mod=mod -no_std_marshalers -build_tags linux $GOFILE + +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package selftests + +import ( + "github.com/DataDog/datadog-agent/pkg/security/events" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/serializers" +) + +// SelfTestEvent is used to report a self test result +// easyjson:json +type SelfTestEvent struct { + events.CustomEventCommonFields + Success []string `json:"succeeded_tests"` + Fails []string `json:"failed_tests"` + TestEvents map[string]*serializers.EventSerializer `json:"test_events"` +} + +// NewSelfTestEvent returns the rule and the result of the self test +func NewSelfTestEvent(success []string, fails []string, testEvents map[string]*serializers.EventSerializer) (*rules.Rule, *events.CustomEvent) { + evt := SelfTestEvent{ + Success: success, + Fails: fails, + TestEvents: testEvents, + } + evt.FillCustomEventCommonFields() + + return events.NewCustomRule(events.SelfTestRuleID, events.SelfTestRuleDesc), + events.NewCustomEvent(model.CustomSelfTestEventType, evt) +} diff --git a/pkg/security/events/self_tests_easyjson.go b/pkg/security/probe/selftests/self_tests_easyjson.go similarity index 64% rename from pkg/security/events/self_tests_easyjson.go rename to pkg/security/probe/selftests/self_tests_easyjson.go index 3103a05e700f3..ea2204d8bc5b6 100644 --- a/pkg/security/events/self_tests_easyjson.go +++ b/pkg/security/probe/selftests/self_tests_easyjson.go @@ -1,9 +1,13 @@ +//go:build linux +// +build linux + // Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. -package events +package selftests import ( json "encoding/json" + serializers "github.com/DataDog/datadog-agent/pkg/security/serializers" easyjson "github.com/mailru/easyjson" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" @@ -17,7 +21,7 @@ var ( _ easyjson.Marshaler ) -func easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityEvents(in *jlexer.Lexer, out *SelfTestEvent) { +func easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityProbeSelftests(in *jlexer.Lexer, out *SelfTestEvent) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -82,6 +86,30 @@ func easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityEvents(in *jle } in.Delim(']') } + case "test_events": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + out.TestEvents = make(map[string]*serializers.EventSerializer) + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v3 *serializers.EventSerializer + if in.IsNull() { + in.Skip() + v3 = nil + } else { + if v3 == nil { + v3 = new(serializers.EventSerializer) + } + (*v3).UnmarshalEasyJSON(in) + } + (out.TestEvents)[key] = v3 + in.WantComma() + } + in.Delim('}') + } case "date": if data := in.Raw(); in.Ok() { in.AddError((out.Timestamp).UnmarshalJSON(data)) @@ -98,7 +126,7 @@ func easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityEvents(in *jle in.Consumed() } } -func easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityEvents(out *jwriter.Writer, in SelfTestEvent) { +func easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityProbeSelftests(out *jwriter.Writer, in SelfTestEvent) { out.RawByte('{') first := true _ = first @@ -109,11 +137,11 @@ func easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityEvents(out *jw out.RawString("null") } else { out.RawByte('[') - for v3, v4 := range in.Success { - if v3 > 0 { + for v4, v5 := range in.Success { + if v4 > 0 { out.RawByte(',') } - out.String(string(v4)) + out.String(string(v5)) } out.RawByte(']') } @@ -125,15 +153,40 @@ func easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityEvents(out *jw out.RawString("null") } else { out.RawByte('[') - for v5, v6 := range in.Fails { - if v5 > 0 { + for v6, v7 := range in.Fails { + if v6 > 0 { out.RawByte(',') } - out.String(string(v6)) + out.String(string(v7)) } out.RawByte(']') } } + { + const prefix string = ",\"test_events\":" + out.RawString(prefix) + if in.TestEvents == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { + out.RawString(`null`) + } else { + out.RawByte('{') + v8First := true + for v8Name, v8Value := range in.TestEvents { + if v8First { + v8First = false + } else { + out.RawByte(',') + } + out.String(string(v8Name)) + out.RawByte(':') + if v8Value == nil { + out.RawString("null") + } else { + (*v8Value).MarshalEasyJSON(out) + } + } + out.RawByte('}') + } + } { const prefix string = ",\"date\":" out.RawString(prefix) @@ -149,10 +202,10 @@ func easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityEvents(out *jw // MarshalEasyJSON supports easyjson.Marshaler interface func (v SelfTestEvent) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityEvents(w, v) + easyjsonF0077844EncodeGithubComDataDogDatadogAgentPkgSecurityProbeSelftests(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *SelfTestEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityEvents(l, v) + easyjsonF0077844DecodeGithubComDataDogDatadogAgentPkgSecurityProbeSelftests(l, v) } diff --git a/pkg/security/probe/selftests/tester.go b/pkg/security/probe/selftests/tester.go index 4a42fd0f1df41..85687c4c82b63 100644 --- a/pkg/security/probe/selftests/tester.go +++ b/pkg/security/probe/selftests/tester.go @@ -127,9 +127,9 @@ func (t *SelfTester) createTargetFile() error { } // RunSelfTest runs the self test and return the result -func (t *SelfTester) RunSelfTest() ([]string, []string, error) { +func (t *SelfTester) RunSelfTest() ([]string, []string, map[string]*serializers.EventSerializer, error) { if err := t.BeginWaitingForEvent(); err != nil { - return nil, nil, fmt.Errorf("failed to run self test: %w", err) + return nil, nil, nil, fmt.Errorf("failed to run self test: %w", err) } defer t.EndWaitingForEvent() @@ -138,6 +138,8 @@ func (t *SelfTester) RunSelfTest() ([]string, []string, error) { // launch the self tests var success []string var fails []string + testEvents := make(map[string]*serializers.EventSerializer) + for _, selftest := range FileSelfTests { def := selftest.GetRuleDefinition(t.targetFilePath) @@ -147,8 +149,9 @@ func (t *SelfTester) RunSelfTest() ([]string, []string, error) { log.Errorf("Self test failed: %s", def.ID) continue } - - if err = t.expectEvent(predicate); err != nil { + event, err2 := t.expectEvent(predicate) + testEvents[def.ID] = event + if err2 != nil { fails = append(fails, def.ID) log.Errorf("Self test failed: %s", def.ID) } else { @@ -160,7 +163,7 @@ func (t *SelfTester) RunSelfTest() ([]string, []string, error) { t.success = success t.fails = fails - return success, fails, nil + return success, fails, testEvents, nil } // Start starts the self tester policy provider @@ -192,6 +195,7 @@ func (t *SelfTester) EndWaitingForEvent() { type selfTestEvent struct { Type string Filepath string + Event *serializers.EventSerializer } // IsExpectedEvent sends an event to the tester @@ -210,6 +214,7 @@ func (t *SelfTester) IsExpectedEvent(rule *rules.Rule, event eval.Event, p *prob selfTestEvent := selfTestEvent{ Type: event.GetType(), Filepath: s.FileEventSerializer.Path, + Event: s, } t.eventChan <- selfTestEvent return true @@ -217,16 +222,16 @@ func (t *SelfTester) IsExpectedEvent(rule *rules.Rule, event eval.Event, p *prob return false } -func (t *SelfTester) expectEvent(predicate func(selfTestEvent) bool) error { +func (t *SelfTester) expectEvent(predicate func(selfTestEvent) bool) (*serializers.EventSerializer, error) { timer := time.After(3 * time.Second) for { select { case event := <-t.eventChan: if predicate(event) { - return nil + return event.Event, nil } case <-timer: - return errors.New("failed to receive expected event") + return nil, errors.New("failed to receive expected event") } } } diff --git a/pkg/security/security_profile/activity_tree/paths_reducer.go b/pkg/security/security_profile/activity_tree/paths_reducer.go index 911fe4428bb71..0b5c69a57ed6b 100644 --- a/pkg/security/security_profile/activity_tree/paths_reducer.go +++ b/pkg/security/security_profile/activity_tree/paths_reducer.go @@ -73,7 +73,7 @@ func (r *PathsReducer) ReducePath(path string, fileEvent *model.FileEvent, node processNode: node, } - allMatches := r.patterns.FindAllStringSubmatchIndex(path, -1) + allMatches := r.patterns.FindAllStringSubmatchIndex(ctx.path, -1) for matchSet := len(allMatches) - 1; matchSet >= 0; matchSet-- { matches := allMatches[matchSet] for _, i := range r.callbackIndexes { diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 19ec9ef823f79..772eea3903ca6 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -186,7 +186,7 @@ func (pn *ProcessNode) InsertFileEvent(fileEvent *model.FileEvent, event *model. if !dryRun { // create new child - if len(fileEvent.PathnameStr) <= nextParentIndex+1 { + if len(filePath) <= nextParentIndex+1 { // this is the last child, add the fileEvent context at the leaf of the files tree. node := NewFileNode(fileEvent, event, parent, generationType, filePath, resolvers) node.MatchedRules = model.AppendMatchedRule(node.MatchedRules, event.Rules) @@ -196,7 +196,7 @@ func (pn *ProcessNode) InsertFileEvent(fileEvent *model.FileEvent, event *model. // This is an intermediary node in the branch that leads to the leaf we want to add. Create a node without the // fileEvent context. newChild := NewFileNode(nil, nil, parent, generationType, filePath, resolvers) - newChild.InsertFileEvent(fileEvent, event, fileEvent.PathnameStr[nextParentIndex:], generationType, stats, dryRun, filePath, resolvers) + newChild.InsertFileEvent(fileEvent, event, filePath[nextParentIndex:], generationType, stats, dryRun, filePath, resolvers) stats.FileNodes++ pn.Files[parent] = newChild } diff --git a/pkg/security/tests/schemas/self_test_schema.json b/pkg/security/tests/schemas/self_test_schema.json new file mode 100644 index 0000000000000..506b8395c5fa0 --- /dev/null +++ b/pkg/security/tests/schemas/self_test_schema.json @@ -0,0 +1,84 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "self_test.json", + "type": "object", + "properties": { + "agent": { + "type": "object", + "properties": { + "rule_id": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "required": ["rule_id", "version"] + }, + "date": { + "$ref": "/schemas/datetime.json" + }, + "hostname": { + "type": "string" + }, + "service": { + "type": "string" + }, + "status": { + "type": "string" + }, + "succeeded_tests": { + "type": "array", + "items": { + "type": "string" + } + }, + "failed_tests": { + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "number" + }, + "title": { + "type": "string" + }, + "test_events": { + "type": "object", + "properties": { + "datadog_agent_cws_self_test_rule_open": { + "$ref": "/schemas/open.json" + }, + "datadog_agent_cws_self_test_rule_chmod": { + "$ref": "/schemas/chmod.json" + }, + "datadog_agent_cws_self_test_rule_chown": { + "$ref": "/schemas/chown.json" + } + } + } + }, + "required": [ + "agent", + "date", + "hostname", + "service", + "status", + "timestamp", + "title", + "test_events" + ], + "oneOf": [ + { + "required": ["succeeded_tests"] + }, + { + "required": ["failed_tests"] + }, + { + "required": ["succeeded_tests", "failed_tests"] + } + ] +} diff --git a/pkg/security/utils/hostname.go b/pkg/security/utils/hostname.go index bc6e73d1005a7..316a9a9121e6b 100644 --- a/pkg/security/utils/hostname.go +++ b/pkg/security/utils/hostname.go @@ -9,7 +9,7 @@ import ( "context" "time" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/appsec/httpsec/proxy.go b/pkg/serverless/appsec/httpsec/proxy.go index 2ea908c15ae8a..2bf8e15fe1b98 100644 --- a/pkg/serverless/appsec/httpsec/proxy.go +++ b/pkg/serverless/appsec/httpsec/proxy.go @@ -9,9 +9,9 @@ import ( "bytes" "encoding/json" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/invocationlifecycle" "github.com/DataDog/datadog-agent/pkg/serverless/trigger" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" diff --git a/pkg/serverless/daemon/daemon_test.go b/pkg/serverless/daemon/daemon_test.go index 18aa0e03c89c9..b7dae8b6fff50 100644 --- a/pkg/serverless/daemon/daemon_test.go +++ b/pkg/serverless/daemon/daemon_test.go @@ -16,9 +16,9 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/serverless/trace" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/serverless/invocationlifecycle/init.go b/pkg/serverless/invocationlifecycle/init.go index 891d373d97496..6c8d5ab5b7d7b 100644 --- a/pkg/serverless/invocationlifecycle/init.go +++ b/pkg/serverless/invocationlifecycle/init.go @@ -11,12 +11,12 @@ import ( "strings" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/aws/aws-lambda-go/events" "github.com/DataDog/datadog-agent/pkg/serverless/trigger" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index d704621aca451..f24de57adea36 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -15,12 +15,12 @@ import ( "github.com/aws/aws-lambda-go/events" "github.com/DataDog/datadog-agent/pkg/aggregator" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" serverlessLog "github.com/DataDog/datadog-agent/pkg/serverless/logs" serverlessMetrics "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/serverless/trigger" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/invocationlifecycle/lifecycle_test.go b/pkg/serverless/invocationlifecycle/lifecycle_test.go index f2cb4c5947166..fdfc3abebe0bc 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle_test.go +++ b/pkg/serverless/invocationlifecycle/lifecycle_test.go @@ -14,10 +14,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/log" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/metrics" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/logs" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/fxutil" diff --git a/pkg/serverless/invocationlifecycle/trace.go b/pkg/serverless/invocationlifecycle/trace.go index 717b69e2c455b..b0f3c2a34de98 100644 --- a/pkg/serverless/invocationlifecycle/trace.go +++ b/pkg/serverless/invocationlifecycle/trace.go @@ -14,10 +14,10 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index b2b479290ac31..af5a4f6f1866d 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -12,9 +12,9 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/serverless/otlp/otlp_test.go b/pkg/serverless/otlp/otlp_test.go index 7f6e5c7d7586f..396dcacf29e64 100644 --- a/pkg/serverless/otlp/otlp_test.go +++ b/pkg/serverless/otlp/otlp_test.go @@ -21,9 +21,9 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" sdktrace "go.opentelemetry.io/otel/sdk/trace" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/trace" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/stretchr/testify/assert" ) diff --git a/pkg/serverless/trace/cold_start_span_creator.go b/pkg/serverless/trace/cold_start_span_creator.go index e85569755a4b4..404fbcdbce974 100644 --- a/pkg/serverless/trace/cold_start_span_creator.go +++ b/pkg/serverless/trace/cold_start_span_creator.go @@ -10,10 +10,10 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" serverlessLog "github.com/DataDog/datadog-agent/pkg/serverless/logs" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/trace/cold_start_span_creator_test.go b/pkg/serverless/trace/cold_start_span_creator_test.go index 8ff48408635cf..0518087c5096b 100644 --- a/pkg/serverless/trace/cold_start_span_creator_test.go +++ b/pkg/serverless/trace/cold_start_span_creator_test.go @@ -14,12 +14,12 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" serverlessLog "github.com/DataDog/datadog-agent/pkg/serverless/logs" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" ) diff --git a/pkg/serverless/trace/inferredspan/inferred_span.go b/pkg/serverless/trace/inferredspan/inferred_span.go index c23bd78d85386..cd0b82c16c499 100644 --- a/pkg/serverless/trace/inferredspan/inferred_span.go +++ b/pkg/serverless/trace/inferredspan/inferred_span.go @@ -14,11 +14,11 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/serverless/tags" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/trace/inferredspan/inferred_span_test.go b/pkg/serverless/trace/inferredspan/inferred_span_test.go index 9f29123ce8a0a..422411e28cdfb 100644 --- a/pkg/serverless/trace/inferredspan/inferred_span_test.go +++ b/pkg/serverless/trace/inferredspan/inferred_span_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/config" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/serverless/trace/inferredspan/span_enrichment_test.go b/pkg/serverless/trace/inferredspan/span_enrichment_test.go index 0fe107eeeeef2..7993aeb6f4cd0 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment_test.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment_test.go @@ -16,7 +16,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) const ( diff --git a/pkg/serverless/trace/span_modifer_test.go b/pkg/serverless/trace/span_modifer_test.go index a1defc6e47027..55214fdeb85a6 100644 --- a/pkg/serverless/trace/span_modifer_test.go +++ b/pkg/serverless/trace/span_modifer_test.go @@ -14,11 +14,11 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/serverless/trace/span_modifier.go b/pkg/serverless/trace/span_modifier.go index fe97d1c7eba9a..9c48f6fc2ab85 100644 --- a/pkg/serverless/trace/span_modifier.go +++ b/pkg/serverless/trace/span_modifier.go @@ -6,8 +6,8 @@ package trace import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/trace/trace.go b/pkg/serverless/trace/trace.go index 71cf34eda0fd2..74f6a2430727f 100644 --- a/pkg/serverless/trace/trace.go +++ b/pkg/serverless/trace/trace.go @@ -12,9 +12,9 @@ import ( tracecmdconfig "github.com/DataDog/datadog-agent/cmd/trace-agent/config" ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/serverless/trace/trace_test.go b/pkg/serverless/trace/trace_test.go index 88edc69f8d270..74311d4e60708 100644 --- a/pkg/serverless/trace/trace_test.go +++ b/pkg/serverless/trace/trace_test.go @@ -15,9 +15,9 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/tagger/remote/tagger.go b/pkg/tagger/remote/tagger.go index e50859916a30e..1bff561d7d97e 100644 --- a/pkg/tagger/remote/tagger.go +++ b/pkg/tagger/remote/tagger.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" tagger_api "github.com/DataDog/datadog-agent/pkg/tagger/api" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" "github.com/DataDog/datadog-agent/pkg/tagger/telemetry" diff --git a/pkg/tagger/replay/tagger.go b/pkg/tagger/replay/tagger.go index f58ecba96652a..d5538d7ac4d77 100644 --- a/pkg/tagger/replay/tagger.go +++ b/pkg/tagger/replay/tagger.go @@ -9,7 +9,7 @@ import ( "context" "time" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/status/health" tagger_api "github.com/DataDog/datadog-agent/pkg/tagger/api" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" diff --git a/pkg/tagger/server/server.go b/pkg/tagger/server/server.go index 79b8847cc41b0..656a600915f04 100644 --- a/pkg/tagger/server/server.go +++ b/pkg/tagger/server/server.go @@ -13,7 +13,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger" "github.com/DataDog/datadog-agent/pkg/tagger/telemetry" "github.com/DataDog/datadog-agent/pkg/util/grpc" diff --git a/pkg/trace/agent/agent.go b/pkg/trace/agent/agent.go index 87da431d3eb2d..e94c910144130 100644 --- a/pkg/trace/agent/agent.go +++ b/pkg/trace/agent/agent.go @@ -10,6 +10,7 @@ import ( "runtime" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/remoteconfighandler" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" @@ -22,7 +23,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/stats" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -87,7 +87,7 @@ type Agent struct { func NewAgent(ctx context.Context, conf *config.AgentConfig, telemetryCollector telemetry.TelemetryCollector) *Agent { dynConf := sampler.NewDynamicConfig() in := make(chan *api.Payload, 1000) - statsChan := make(chan pb.StatsPayload, 100) + statsChan := make(chan *pb.StatsPayload, 100) oconf := conf.Obfuscation.Export(conf) if oconf.Statsd == nil { oconf.Statsd = metrics.Client @@ -418,7 +418,7 @@ func (a *Agent) discardSpans(p *api.Payload) { } } -func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion string) pb.ClientStatsPayload { +func (a *Agent) processStats(in *pb.ClientStatsPayload, lang, tracerVersion string) *pb.ClientStatsPayload { enableContainers := a.conf.HasFeature("enable_cid_stats") || (a.conf.FargateOrchestrator != config.OrchestratorUnknown) if !enableContainers || a.conf.HasFeature("disable_cid_stats") { // only allow the ContainerID stats dimension if we're in a Fargate instance or it's @@ -439,12 +439,12 @@ func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion strin for i, group := range in.Stats { n := 0 for _, b := range group.Stats { - a.normalizeStatsGroup(&b, lang) - if !a.Blacklister.AllowsStat(&b) { + a.normalizeStatsGroup(b, lang) + if !a.Blacklister.AllowsStat(b) { continue } - a.obfuscateStatsGroup(&b) - a.Replacer.ReplaceStatsGroup(&b) + a.obfuscateStatsGroup(b) + a.Replacer.ReplaceStatsGroup(b) group.Stats[n] = b n++ } @@ -454,7 +454,7 @@ func (a *Agent) processStats(in pb.ClientStatsPayload, lang, tracerVersion strin return in } -func mergeDuplicates(s pb.ClientStatsBucket) { +func mergeDuplicates(s *pb.ClientStatsBucket) { indexes := make(map[stats.Aggregation]int, len(s.Stats)) for i, g := range s.Stats { a := stats.NewAggregationFromGroup(g) @@ -472,7 +472,7 @@ func mergeDuplicates(s pb.ClientStatsBucket) { } // ProcessStats processes incoming client stats in from the given tracer. -func (a *Agent) ProcessStats(in pb.ClientStatsPayload, lang, tracerVersion string) { +func (a *Agent) ProcessStats(in *pb.ClientStatsPayload, lang, tracerVersion string) { a.ClientStatsAggregator.In <- a.processStats(in, lang, tracerVersion) } diff --git a/pkg/trace/agent/agent_test.go b/pkg/trace/agent/agent_test.go index a7b097e3dd4e2..8e15cbf831e73 100644 --- a/pkg/trace/agent/agent_test.go +++ b/pkg/trace/agent/agent_test.go @@ -23,13 +23,13 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/event" "github.com/DataDog/datadog-agent/pkg/trace/filters" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/stats" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" @@ -1079,7 +1079,7 @@ func TestSample(t *testing.T) { func TestPartialSamplingFree(t *testing.T) { cfg := &config.AgentConfig{RareSamplerEnabled: false, BucketInterval: 10 * time.Second} - statsChan := make(chan pb.StatsPayload, 100) + statsChan := make(chan *pb.StatsPayload, 100) writerChan := make(chan *writer.SampledChunks, 100) dynConf := sampler.NewDynamicConfig() in := make(chan *api.Payload, 1000) @@ -1517,21 +1517,21 @@ func tracesFromFile(file string) (raw []byte, count int, err error) { func TestConvertStats(t *testing.T) { testCases := []struct { - in pb.ClientStatsPayload + in *pb.ClientStatsPayload lang string tracerVersion string - out pb.ClientStatsPayload + out *pb.ClientStatsPayload }{ { - in: pb.ClientStatsPayload{ + in: &pb.ClientStatsPayload{ Hostname: "tracer_hots", Env: "tracer_env", Version: "code_version", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 1, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "service", Name: "name------", @@ -1559,17 +1559,17 @@ func TestConvertStats(t *testing.T) { }, lang: "java", tracerVersion: "v1", - out: pb.ClientStatsPayload{ + out: &pb.ClientStatsPayload{ Hostname: "tracer_hots", Env: "tracer_env", Version: "code_version", Lang: "java", TracerVersion: "v1", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ { Start: 1, Duration: 2, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Service: "service", Name: "name", @@ -1603,8 +1603,8 @@ func TestConvertStats(t *testing.T) { } func TestMergeDuplicates(t *testing.T) { - in := pb.ClientStatsBucket{ - Stats: []pb.ClientGroupedStats{ + in := &pb.ClientStatsBucket{ + Stats: []*pb.ClientGroupedStats{ { Service: "s1", Resource: "r1", @@ -1643,8 +1643,8 @@ func TestMergeDuplicates(t *testing.T) { }, }, } - expected := pb.ClientStatsBucket{ - Stats: []pb.ClientGroupedStats{ + expected := &pb.ClientStatsBucket{ + Stats: []*pb.ClientGroupedStats{ { Service: "s1", Resource: "r1", diff --git a/pkg/trace/agent/fuzz_test.go b/pkg/trace/agent/fuzz_test.go index 59c110803ec1f..6615110611d32 100644 --- a/pkg/trace/agent/fuzz_test.go +++ b/pkg/trace/agent/fuzz_test.go @@ -11,19 +11,19 @@ import ( "reflect" "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) func FuzzProcessStats(f *testing.F) { agent, cancel := agentWithDefaults() defer cancel() - encode := func(pbStats pb.ClientStatsPayload) ([]byte, error) { - return pbStats.Marshal() + encode := func(pbStats *pb.ClientStatsPayload) ([]byte, error) { + return pbStats.MarshalVT() } - decode := func(stats []byte) (pb.ClientStatsPayload, error) { - var payload pb.ClientStatsPayload - err := payload.Unmarshal(stats) + decode := func(stats []byte) (*pb.ClientStatsPayload, error) { + payload := &pb.ClientStatsPayload{} + err := payload.UnmarshalVT(stats) return payload, err } pbStats := testutil.StatsPayloadSample() diff --git a/pkg/trace/agent/normalizer.go b/pkg/trace/agent/normalizer.go index 23a068bbae611..88b5a77de9d17 100644 --- a/pkg/trace/agent/normalizer.go +++ b/pkg/trace/agent/normalizer.go @@ -12,9 +12,9 @@ import ( "strconv" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/agent/normalizer_test.go b/pkg/trace/agent/normalizer_test.go index 2a419e17a14a4..00d24b1116e3d 100644 --- a/pkg/trace/agent/normalizer_test.go +++ b/pkg/trace/agent/normalizer_test.go @@ -16,9 +16,9 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" diff --git a/pkg/trace/agent/obfuscate.go b/pkg/trace/agent/obfuscate.go index da4ae3238c4af..622f5f9fa49f3 100644 --- a/pkg/trace/agent/obfuscate.go +++ b/pkg/trace/agent/obfuscate.go @@ -9,9 +9,9 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/agent/obfuscate_test.go b/pkg/trace/agent/obfuscate_test.go index 6605c17d55c20..621245a5f11f4 100644 --- a/pkg/trace/agent/obfuscate_test.go +++ b/pkg/trace/agent/obfuscate_test.go @@ -9,8 +9,8 @@ import ( "context" "testing" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/stretchr/testify/assert" diff --git a/pkg/trace/agent/truncator.go b/pkg/trace/agent/truncator.go index def142efc5556..eb12764eed40a 100644 --- a/pkg/trace/agent/truncator.go +++ b/pkg/trace/agent/truncator.go @@ -6,8 +6,8 @@ package agent import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/agent/truncator_test.go b/pkg/trace/agent/truncator_test.go index c4de1c8df7370..4c132a93a6a56 100644 --- a/pkg/trace/agent/truncator_test.go +++ b/pkg/trace/agent/truncator_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) func testSpan() *pb.Span { diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index 370f45db4c21c..2589a8f0c523d 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -26,6 +26,7 @@ import ( "github.com/tinylib/msgp/msgp" "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/apiutil" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" @@ -33,7 +34,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" @@ -423,7 +423,7 @@ func (r *HTTPReceiver) rateLimited(n int64) bool { type StatsProcessor interface { // ProcessStats takes a stats payload and consumes it. It is considered to be originating // from the given lang. - ProcessStats(p pb.ClientStatsPayload, lang, tracerVersion string) + ProcessStats(p *pb.ClientStatsPayload, lang, tracerVersion string) } // handleStats handles incoming stats payloads. @@ -433,8 +433,8 @@ func (r *HTTPReceiver) handleStats(w http.ResponseWriter, req *http.Request) { ts := r.tagStats(V07, req.Header) rd := apiutil.NewLimitedReader(req.Body, r.conf.MaxRequestBytes) req.Header.Set("Accept", "application/msgpack") - var in pb.ClientStatsPayload - if err := msgp.Decode(rd, &in); err != nil { + in := &pb.ClientStatsPayload{} + if err := msgp.Decode(rd, in); err != nil { log.Errorf("Error decoding pb.ClientStatsPayload: %v", err) httpDecodingError(err, []string{"handler:stats", "codec:msgpack", "v:v0.6"}, w) return diff --git a/pkg/trace/api/api_nix_test.go b/pkg/trace/api/api_nix_test.go index ffc0abf40b5dc..75318b51d1656 100644 --- a/pkg/trace/api/api_nix_test.go +++ b/pkg/trace/api/api_nix_test.go @@ -15,9 +15,9 @@ import ( "testing" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/api/api_oom_test.go b/pkg/trace/api/api_oom_test.go index 8680650e07a3e..1647ee712d961 100644 --- a/pkg/trace/api/api_oom_test.go +++ b/pkg/trace/api/api_oom_test.go @@ -17,8 +17,8 @@ import ( "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/trace/api/api_test.go b/pkg/trace/api/api_test.go index eef8e3923a7b4..432f9d9bfd1c7 100644 --- a/pkg/trace/api/api_test.go +++ b/pkg/trace/api/api_test.go @@ -19,10 +19,10 @@ import ( "testing" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/testutil" @@ -46,7 +46,7 @@ var headerFields = map[string]string{ type noopStatsProcessor struct{} -func (noopStatsProcessor) ProcessStats(_ pb.ClientStatsPayload, _, _ string) {} +func (noopStatsProcessor) ProcessStats(_ *pb.ClientStatsPayload, _, _ string) {} func newTestReceiverFromConfig(conf *config.AgentConfig) *HTTPReceiver { dynConf := sampler.NewDynamicConfig() @@ -626,12 +626,12 @@ func TestDecodeV05(t *testing.T) { type mockStatsProcessor struct { mu sync.RWMutex - lastP pb.ClientStatsPayload + lastP *pb.ClientStatsPayload lastLang string lastTracerVersion string } -func (m *mockStatsProcessor) ProcessStats(p pb.ClientStatsPayload, lang, tracerVersion string) { +func (m *mockStatsProcessor) ProcessStats(p *pb.ClientStatsPayload, lang, tracerVersion string) { m.mu.Lock() defer m.mu.Unlock() m.lastP = p @@ -639,7 +639,7 @@ func (m *mockStatsProcessor) ProcessStats(p pb.ClientStatsPayload, lang, tracerV m.lastTracerVersion = tracerVersion } -func (m *mockStatsProcessor) Got() (p pb.ClientStatsPayload, lang, tracerVersion string) { +func (m *mockStatsProcessor) Got() (p *pb.ClientStatsPayload, lang, tracerVersion string) { m.mu.RLock() defer m.mu.RUnlock() return m.lastP, m.lastLang, m.lastTracerVersion @@ -656,7 +656,7 @@ func TestHandleStats(t *testing.T) { server := httptest.NewServer(mux) var buf bytes.Buffer - if err := msgp.Encode(&buf, &p); err != nil { + if err := msgp.Encode(&buf, p); err != nil { t.Fatal(err) } req, _ := http.NewRequest("POST", server.URL+"/v0.6/stats", &buf) @@ -771,7 +771,7 @@ func TestHandleTraces(t *testing.T) { ts, ok := rs.Stats[info.Tags{Lang: lang, EndpointVersion: "v0.4"}] assert.True(ok) assert.Equal(int64(20), ts.TracesReceived.Load()) - assert.Equal(int64(61822), ts.TracesBytes.Load()) + assert.Equal(int64(59222), ts.TracesBytes.Load()) } // make sure we have all our languages registered assert.Equal("C#|go|java|python|ruby", receiver.Languages()) diff --git a/pkg/trace/api/container_linux_test.go b/pkg/trace/api/container_linux_test.go index df265d54afaa7..955be6480cd80 100644 --- a/pkg/trace/api/container_linux_test.go +++ b/pkg/trace/api/container_linux_test.go @@ -17,8 +17,8 @@ import ( "testing" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/api/fuzz_test.go b/pkg/trace/api/fuzz_test.go index c630e5278d899..66b2e9c67a3d2 100644 --- a/pkg/trace/api/fuzz_test.go +++ b/pkg/trace/api/fuzz_test.go @@ -16,9 +16,9 @@ import ( "reflect" "testing" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/apiutil" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/tinylib/msgp/msgp" @@ -161,10 +161,10 @@ func fuzzTracesAPI(f *testing.F, v Version, contentType string, encode encoder, func FuzzHandleStats(f *testing.F) { cfg := newTestReceiverConfig() - decode := func(stats []byte) (pb.ClientStatsPayload, error) { + decode := func(stats []byte) (*pb.ClientStatsPayload, error) { reader := bytes.NewReader(stats) - var payload pb.ClientStatsPayload - return payload, msgp.Decode(apiutil.NewLimitedReader(io.NopCloser(reader), cfg.MaxRequestBytes), &payload) + payload := &pb.ClientStatsPayload{} + return payload, msgp.Decode(apiutil.NewLimitedReader(io.NopCloser(reader), cfg.MaxRequestBytes), payload) } receiver := newTestReceiverFromConfig(cfg) mockProcessor := new(mockStatsProcessor) diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index 7f5a96eabb294..e69f24d5c437b 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -18,13 +18,13 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 4e7031c82adb4..8a6cbfb62272f 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -18,9 +18,9 @@ import ( "time" "unicode" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/teststatsd" "github.com/DataDog/datadog-agent/pkg/trace/testutil" diff --git a/pkg/trace/api/payload.go b/pkg/trace/api/payload.go index 678f37e48dc9c..1efc41aad09ca 100644 --- a/pkg/trace/api/payload.go +++ b/pkg/trace/api/payload.go @@ -6,8 +6,8 @@ package api import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Payload specifies information about a set of traces received by the API. diff --git a/pkg/trace/event/extractor.go b/pkg/trace/event/extractor.go index cbd9ff3f320f5..905472076b761 100644 --- a/pkg/trace/event/extractor.go +++ b/pkg/trace/event/extractor.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/extractor_fixed_rate.go b/pkg/trace/event/extractor_fixed_rate.go index 60596eb260b8e..3e1973d3a17cf 100644 --- a/pkg/trace/event/extractor_fixed_rate.go +++ b/pkg/trace/event/extractor_fixed_rate.go @@ -8,7 +8,7 @@ package event import ( "strings" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/extractor_fixed_rate_test.go b/pkg/trace/event/extractor_fixed_rate_test.go index 3f69e696724c3..f03da04716143 100644 --- a/pkg/trace/event/extractor_fixed_rate_test.go +++ b/pkg/trace/event/extractor_fixed_rate_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) func createTestSpans(serviceName string, operationName string) []*pb.Span { diff --git a/pkg/trace/event/extractor_legacy.go b/pkg/trace/event/extractor_legacy.go index d92c572575c58..465a299bf6fce 100644 --- a/pkg/trace/event/extractor_legacy.go +++ b/pkg/trace/event/extractor_legacy.go @@ -8,7 +8,7 @@ package event import ( "strings" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/event/extractor_legacy_test.go b/pkg/trace/event/extractor_legacy_test.go index 8e880c5f03a4d..1f2336ac6ac48 100644 --- a/pkg/trace/event/extractor_legacy_test.go +++ b/pkg/trace/event/extractor_legacy_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/event/extractor_metric.go b/pkg/trace/event/extractor_metric.go index df2eae87f255e..50d87704525a7 100644 --- a/pkg/trace/event/extractor_metric.go +++ b/pkg/trace/event/extractor_metric.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/extractor_metric_test.go b/pkg/trace/event/extractor_metric_test.go index a4d20e922c56e..4528fa81cc2cd 100644 --- a/pkg/trace/event/extractor_metric_test.go +++ b/pkg/trace/event/extractor_metric_test.go @@ -9,7 +9,7 @@ import ( "math/rand" "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/extractor_noop.go b/pkg/trace/event/extractor_noop.go index b1ce256870b73..408c05445f6a2 100644 --- a/pkg/trace/event/extractor_noop.go +++ b/pkg/trace/event/extractor_noop.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/extractor_test.go b/pkg/trace/event/extractor_test.go index de69e64d30506..60722c2bb2c5a 100644 --- a/pkg/trace/event/extractor_test.go +++ b/pkg/trace/event/extractor_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/event/processor.go b/pkg/trace/event/processor.go index 4e7c33e5e2d61..29f8ccdaf387d 100644 --- a/pkg/trace/event/processor.go +++ b/pkg/trace/event/processor.go @@ -6,7 +6,7 @@ package event import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/event/processor_test.go b/pkg/trace/event/processor_test.go index 4ce99b4aba0f9..71b678a2a3cb9 100644 --- a/pkg/trace/event/processor_test.go +++ b/pkg/trace/event/processor_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" diff --git a/pkg/trace/event/sampler_max_eps.go b/pkg/trace/event/sampler_max_eps.go index d742407ae5165..3ce8929332019 100644 --- a/pkg/trace/event/sampler_max_eps.go +++ b/pkg/trace/event/sampler_max_eps.go @@ -8,9 +8,9 @@ package event import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) diff --git a/pkg/trace/event/sampler_max_eps_test.go b/pkg/trace/event/sampler_max_eps_test.go index 26742c1b97bf1..d4c088062c2e8 100644 --- a/pkg/trace/event/sampler_max_eps_test.go +++ b/pkg/trace/event/sampler_max_eps_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/trace/filters/blacklister.go b/pkg/trace/filters/blacklister.go index 28fd436c6f293..76e863da2f122 100644 --- a/pkg/trace/filters/blacklister.go +++ b/pkg/trace/filters/blacklister.go @@ -8,8 +8,8 @@ package filters import ( "regexp" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Blacklister holds a list of regular expressions which will match resources diff --git a/pkg/trace/filters/blacklister_test.go b/pkg/trace/filters/blacklister_test.go index 2cfa3891fff66..58944340a0f0f 100644 --- a/pkg/trace/filters/blacklister_test.go +++ b/pkg/trace/filters/blacklister_test.go @@ -8,7 +8,7 @@ package filters import ( "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/stretchr/testify/assert" diff --git a/pkg/trace/filters/replacer.go b/pkg/trace/filters/replacer.go index 98b8c6d1ff061..482f87828f945 100644 --- a/pkg/trace/filters/replacer.go +++ b/pkg/trace/filters/replacer.go @@ -8,8 +8,8 @@ package filters import ( "strconv" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) // Replacer is a filter which replaces tag values based on its diff --git a/pkg/trace/filters/replacer_test.go b/pkg/trace/filters/replacer_test.go index 0038fa2a4a6fb..4aefd7995558a 100644 --- a/pkg/trace/filters/replacer_test.go +++ b/pkg/trace/filters/replacer_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) func TestReplacer(t *testing.T) { @@ -84,7 +84,7 @@ func TestReplacer(t *testing.T) { t.Run("stats", func(t *testing.T) { for _, tt := range []struct { rules [][3]string - got, want pb.ClientGroupedStats + got, want *pb.ClientGroupedStats }{ { rules: [][3]string{ @@ -92,11 +92,11 @@ func TestReplacer(t *testing.T) { {"resource.name", "prod", "stage"}, {"*", "123abc", "[REDACTED]"}, }, - got: pb.ClientGroupedStats{ + got: &pb.ClientGroupedStats{ Resource: "this is 123abc on prod", HTTPStatusCode: 400, }, - want: pb.ClientGroupedStats{ + want: &pb.ClientGroupedStats{ Resource: "this is [REDACTED] on stage", HTTPStatusCode: 200, }, @@ -105,18 +105,18 @@ func TestReplacer(t *testing.T) { rules: [][3]string{ {"*", "200", "202"}, }, - got: pb.ClientGroupedStats{ + got: &pb.ClientGroupedStats{ Resource: "/code/200/profile", HTTPStatusCode: 200, }, - want: pb.ClientGroupedStats{ + want: &pb.ClientGroupedStats{ Resource: "/code/202/profile", HTTPStatusCode: 202, }, }, } { tr := NewReplacer(parseRulesFromString(tt.rules)) - tr.ReplaceStatsGroup(&tt.got) + tr.ReplaceStatsGroup(tt.got) assert.Equal(tt.got, tt.want) } }) diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index 8c91080d370d8..02583c49f2d6f 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -11,6 +11,7 @@ replace github.com/docker/distribution => github.com/docker/distribution v2.8.1+ require ( github.com/DataDog/datadog-agent/pkg/obfuscate v0.47.0-rc.3 + github.com/DataDog/datadog-agent/pkg/proto v0.47.0-rc.3.0.20230717151521-271965684571 github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.47.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.47.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.47.0-rc.3 @@ -19,10 +20,9 @@ require ( github.com/DataDog/datadog-go/v5 v5.1.1 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.5.2 github.com/DataDog/sketches-go v1.4.2 - github.com/Microsoft/go-winio v0.6.0 + github.com/Microsoft/go-winio v0.6.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/davecgh/go-spew v1.1.1 - github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/google/gofuzz v1.2.0 @@ -36,8 +36,8 @@ require ( go.uber.org/atomic v1.11.0 golang.org/x/sys v0.10.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.56.0 - google.golang.org/protobuf v1.30.0 + google.golang.org/grpc v1.56.1 + google.golang.org/protobuf v1.31.0 k8s.io/apimachinery v0.25.5 ) @@ -45,18 +45,19 @@ require ( github.com/DataDog/go-tuf v1.0.1-0.5.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.0.4 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/docker/go-units v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect github.com/outcaste-io/ristretto v0.2.1 // indirect github.com/philhofer/fwd v1.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -68,12 +69,12 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.10.0 // indirect + golang.org/x/mod v0.11.0 // indirect golang.org/x/net v0.11.0 // indirect golang.org/x/text v0.11.0 // indirect golang.org/x/tools v0.9.1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index f95a2401daed4..7cbe3b5f342e9 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -8,8 +8,8 @@ github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjv github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -17,16 +17,16 @@ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -73,8 +73,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= +github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= @@ -143,8 +143,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -211,15 +211,15 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE= -google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/trace/pb/agent_payload.pb.go b/pkg/trace/pb/agent_payload.pb.go deleted file mode 100644 index 9aa335dd00c25..0000000000000 --- a/pkg/trace/pb/agent_payload.pb.go +++ /dev/null @@ -1,238 +0,0 @@ -// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: agent_payload.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// AgentPayload represents payload the agent sends to the intake. -type AgentPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // hostName specifies hostname of where the agent is running. - HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` - // env specifies `env` set in agent configuration. - Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` - // tracerPayloads specifies list of the payloads received from tracers. - TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` - // tags specifies tags common in all `tracerPayloads`. - Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // agentVersion specifies version of the agent. - AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` - // targetTPS holds `TargetTPS` value in AgentConfig. - TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"` - // errorTPS holds `ErrorTPS` value in AgentConfig. - ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` - // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig - RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` -} - -func (x *AgentPayload) Reset() { - *x = AgentPayload{} - if protoimpl.UnsafeEnabled { - mi := &file_agent_payload_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AgentPayload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AgentPayload) ProtoMessage() {} - -func (x *AgentPayload) ProtoReflect() protoreflect.Message { - mi := &file_agent_payload_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead. -func (*AgentPayload) Descriptor() ([]byte, []int) { - return file_agent_payload_proto_rawDescGZIP(), []int{0} -} - -func (x *AgentPayload) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *AgentPayload) GetEnv() string { - if x != nil { - return x.Env - } - return "" -} - -func (x *AgentPayload) GetTracerPayloads() []*TracerPayload { - if x != nil { - return x.TracerPayloads - } - return nil -} - -func (x *AgentPayload) GetTags() map[string]string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *AgentPayload) GetAgentVersion() string { - if x != nil { - return x.AgentVersion - } - return "" -} - -func (x *AgentPayload) GetTargetTPS() float64 { - if x != nil { - return x.TargetTPS - } - return 0 -} - -func (x *AgentPayload) GetErrorTPS() float64 { - if x != nil { - return x.ErrorTPS - } - return 0 -} - -func (x *AgentPayload) GetRareSamplerEnabled() bool { - if x != nil { - return x.RareSamplerEnabled - } - return false -} - -var File_agent_payload_proto protoreflect.FileDescriptor - -var file_agent_payload_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xee, 0x02, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x39, - 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, - 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, - 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2d, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x70, - 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_agent_payload_proto_rawDescOnce sync.Once - file_agent_payload_proto_rawDescData = file_agent_payload_proto_rawDesc -) - -func file_agent_payload_proto_rawDescGZIP() []byte { - file_agent_payload_proto_rawDescOnce.Do(func() { - file_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_payload_proto_rawDescData) - }) - return file_agent_payload_proto_rawDescData -} - -var file_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_agent_payload_proto_goTypes = []interface{}{ - (*AgentPayload)(nil), // 0: pb.AgentPayload - nil, // 1: pb.AgentPayload.TagsEntry - (*TracerPayload)(nil), // 2: pb.TracerPayload -} -var file_agent_payload_proto_depIdxs = []int32{ - 2, // 0: pb.AgentPayload.tracerPayloads:type_name -> pb.TracerPayload - 1, // 1: pb.AgentPayload.tags:type_name -> pb.AgentPayload.TagsEntry - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_agent_payload_proto_init() } -func file_agent_payload_proto_init() { - if File_agent_payload_proto != nil { - return - } - file_tracer_payload_proto_init() - if !protoimpl.UnsafeEnabled { - file_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AgentPayload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_agent_payload_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agent_payload_proto_goTypes, - DependencyIndexes: file_agent_payload_proto_depIdxs, - MessageInfos: file_agent_payload_proto_msgTypes, - }.Build() - File_agent_payload_proto = out.File - file_agent_payload_proto_rawDesc = nil - file_agent_payload_proto_goTypes = nil - file_agent_payload_proto_depIdxs = nil -} diff --git a/pkg/trace/pb/doc.go b/pkg/trace/pb/doc.go deleted file mode 100644 index c999287deca7f..0000000000000 --- a/pkg/trace/pb/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package pb contains the data structures used by the trace agent to communicate -// with tracers and the Datadog API. Note that the "//go:generate" directives from this -// package were removed because the generated files were manually edited to create -// adaptions (see decoder.go). -// -// TODO: eventually move this to https://github.com/DataDog/agent-payload/v5 -package pb diff --git a/pkg/trace/pb/generate.sh b/pkg/trace/pb/generate.sh deleted file mode 100755 index ed0d1388cfec0..0000000000000 --- a/pkg/trace/pb/generate.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - - -protoc -I. --go_out=paths=source_relative:. --go-vtproto_out=paths=source_relative:. --go-vtproto_opt=features=marshal+unmarshal+size span.proto tracer_payload.proto agent_payload.proto stats.proto -protoc-go-inject-tag -input=span.pb.go -protoc-go-inject-tag -input=tracer_payload.pb.go -protoc-go-inject-tag -input=agent_payload.pb.go - diff --git a/pkg/trace/pb/span.pb.go b/pkg/trace/pb/span.pb.go deleted file mode 100644 index 0d9607e45d24d..0000000000000 --- a/pkg/trace/pb/span.pb.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 -// source: span.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Span struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // service is the name of the service with which this span is associated. - // @gotags: json:"service" msg:"service" - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` - // name is the operation name of this span. - // @gotags: json:"name" msg:"name" - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"` - // resource is the resource name of this span, also sometimes called the endpoint (for web spans). - // @gotags: json:"resource" msg:"resource" - Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"` - // traceID is the ID of the trace to which this span belongs. - // @gotags: json:"trace_id" msg:"trace_id" - TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` - // spanID is the ID of this span. - // @gotags: json:"span_id" msg:"span_id" - SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` - // parentID is the ID of this span's parent, or zero if this span has no parent. - // @gotags: json:"parent_id" msg:"parent_id" - ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"` - // start is the number of nanoseconds between the Unix epoch and the beginning of this span. - // @gotags: json:"start" msg:"start" - Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"` - // duration is the time length of this span in nanoseconds. - // @gotags: json:"duration" msg:"duration" - Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"` - // error is 1 if there is an error associated with this span, or 0 if there is not. - // @gotags: json:"error" msg:"error" - Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` - // meta is a mapping from tag name to tag value for string-valued tags. - // @gotags: json:"meta" msg:"meta" - Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta"` - // metrics is a mapping from tag name to tag value for numeric-valued tags. - // @gotags: json:"metrics" msg:"metrics" - Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics"` - // type is the type of the service with which this span is associated. Example values: web, db, lambda. - // @gotags: json:"type" msg:"type" - Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` - // meta_struct is a registry of structured "other" data used by, e.g., AppSec. - // @gotags: json:"meta_struct,omitempty" msg:"meta_struct" - MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct"` -} - -func (x *Span) Reset() { - *x = Span{} - if protoimpl.UnsafeEnabled { - mi := &file_span_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span) ProtoMessage() {} - -func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_span_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span.ProtoReflect.Descriptor instead. -func (*Span) Descriptor() ([]byte, []int) { - return file_span_proto_rawDescGZIP(), []int{0} -} - -func (x *Span) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *Span) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Span) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *Span) GetTraceID() uint64 { - if x != nil { - return x.TraceID - } - return 0 -} - -func (x *Span) GetSpanID() uint64 { - if x != nil { - return x.SpanID - } - return 0 -} - -func (x *Span) GetParentID() uint64 { - if x != nil { - return x.ParentID - } - return 0 -} - -func (x *Span) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *Span) GetDuration() int64 { - if x != nil { - return x.Duration - } - return 0 -} - -func (x *Span) GetError() int32 { - if x != nil { - return x.Error - } - return 0 -} - -func (x *Span) GetMeta() map[string]string { - if x != nil { - return x.Meta - } - return nil -} - -func (x *Span) GetMetrics() map[string]float64 { - if x != nil { - return x.Metrics - } - return nil -} - -func (x *Span) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Span) GetMetaStruct() map[string][]byte { - if x != nil { - return x.MetaStruct - } - return nil -} - -var File_span_proto protoreflect.FileDescriptor - -var file_span_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, - 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, - 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, - 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, - 0x61, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_span_proto_rawDescOnce sync.Once - file_span_proto_rawDescData = file_span_proto_rawDesc -) - -func file_span_proto_rawDescGZIP() []byte { - file_span_proto_rawDescOnce.Do(func() { - file_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_span_proto_rawDescData) - }) - return file_span_proto_rawDescData -} - -var file_span_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_span_proto_goTypes = []interface{}{ - (*Span)(nil), // 0: pb.Span - nil, // 1: pb.Span.MetaEntry - nil, // 2: pb.Span.MetricsEntry - nil, // 3: pb.Span.MetaStructEntry -} -var file_span_proto_depIdxs = []int32{ - 1, // 0: pb.Span.meta:type_name -> pb.Span.MetaEntry - 2, // 1: pb.Span.metrics:type_name -> pb.Span.MetricsEntry - 3, // 2: pb.Span.meta_struct:type_name -> pb.Span.MetaStructEntry - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_span_proto_init() } -func file_span_proto_init() { - if File_span_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_span_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_span_proto_goTypes, - DependencyIndexes: file_span_proto_depIdxs, - MessageInfos: file_span_proto_msgTypes, - }.Build() - File_span_proto = out.File - file_span_proto_rawDesc = nil - file_span_proto_goTypes = nil - file_span_proto_depIdxs = nil -} diff --git a/pkg/trace/pb/stats.pb.go b/pkg/trace/pb/stats.pb.go deleted file mode 100644 index b8580586daba9..0000000000000 --- a/pkg/trace/pb/stats.pb.go +++ /dev/null @@ -1,2384 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: stats.proto - -package pb - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// StatsPayload is the payload used to send stats from the agent to the backend. -type StatsPayload struct { - AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` - AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` - Stats []ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats"` - AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` - ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` -} - -func (m *StatsPayload) Reset() { *m = StatsPayload{} } -func (m *StatsPayload) String() string { return proto.CompactTextString(m) } -func (*StatsPayload) ProtoMessage() {} -func (*StatsPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{0} -} -func (m *StatsPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsPayload.Merge(m, src) -} -func (m *StatsPayload) XXX_Size() int { - return m.Size() -} -func (m *StatsPayload) XXX_DiscardUnknown() { - xxx_messageInfo_StatsPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsPayload proto.InternalMessageInfo - -func (m *StatsPayload) GetAgentHostname() string { - if m != nil { - return m.AgentHostname - } - return "" -} - -func (m *StatsPayload) GetAgentEnv() string { - if m != nil { - return m.AgentEnv - } - return "" -} - -func (m *StatsPayload) GetStats() []ClientStatsPayload { - if m != nil { - return m.Stats - } - return nil -} - -func (m *StatsPayload) GetAgentVersion() string { - if m != nil { - return m.AgentVersion - } - return "" -} - -func (m *StatsPayload) GetClientComputed() bool { - if m != nil { - return m.ClientComputed - } - return false -} - -// ClientStatsPayload is the first layer of span stats aggregation. It is also -// the payload sent by tracers to the agent when stats in tracer are enabled. -type ClientStatsPayload struct { - // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta - // or set by tracer stats payload when hostname reporting is enabled. - Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` - Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - Stats []ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats"` - Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` - TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` - RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` - Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` - // AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer - // characterizes counts only and distributions only payloads - AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"` - // Service is the main service of the tracer. - // It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging - Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"` - // ContainerID specifies the origin container ID. It is meant to be populated by the client and may - // be enhanced by the agent to ensure it is unique. - ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"` - // Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID. - // This field should be left empty by the client. It only applies to some specific environment. - Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"` -} - -func (m *ClientStatsPayload) Reset() { *m = ClientStatsPayload{} } -func (m *ClientStatsPayload) String() string { return proto.CompactTextString(m) } -func (*ClientStatsPayload) ProtoMessage() {} -func (*ClientStatsPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{1} -} -func (m *ClientStatsPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientStatsPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientStatsPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientStatsPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientStatsPayload.Merge(m, src) -} -func (m *ClientStatsPayload) XXX_Size() int { - return m.Size() -} -func (m *ClientStatsPayload) XXX_DiscardUnknown() { - xxx_messageInfo_ClientStatsPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientStatsPayload proto.InternalMessageInfo - -func (m *ClientStatsPayload) GetHostname() string { - if m != nil { - return m.Hostname - } - return "" -} - -func (m *ClientStatsPayload) GetEnv() string { - if m != nil { - return m.Env - } - return "" -} - -func (m *ClientStatsPayload) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *ClientStatsPayload) GetStats() []ClientStatsBucket { - if m != nil { - return m.Stats - } - return nil -} - -func (m *ClientStatsPayload) GetLang() string { - if m != nil { - return m.Lang - } - return "" -} - -func (m *ClientStatsPayload) GetTracerVersion() string { - if m != nil { - return m.TracerVersion - } - return "" -} - -func (m *ClientStatsPayload) GetRuntimeID() string { - if m != nil { - return m.RuntimeID - } - return "" -} - -func (m *ClientStatsPayload) GetSequence() uint64 { - if m != nil { - return m.Sequence - } - return 0 -} - -func (m *ClientStatsPayload) GetAgentAggregation() string { - if m != nil { - return m.AgentAggregation - } - return "" -} - -func (m *ClientStatsPayload) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *ClientStatsPayload) GetContainerID() string { - if m != nil { - return m.ContainerID - } - return "" -} - -func (m *ClientStatsPayload) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -// ClientStatsBucket is a time bucket containing aggregated stats. -type ClientStatsBucket struct { - Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` - Stats []ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats"` - // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start - // when the received bucket start is outside of the agent aggregation window - AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` -} - -func (m *ClientStatsBucket) Reset() { *m = ClientStatsBucket{} } -func (m *ClientStatsBucket) String() string { return proto.CompactTextString(m) } -func (*ClientStatsBucket) ProtoMessage() {} -func (*ClientStatsBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{2} -} -func (m *ClientStatsBucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientStatsBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientStatsBucket.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientStatsBucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientStatsBucket.Merge(m, src) -} -func (m *ClientStatsBucket) XXX_Size() int { - return m.Size() -} -func (m *ClientStatsBucket) XXX_DiscardUnknown() { - xxx_messageInfo_ClientStatsBucket.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientStatsBucket proto.InternalMessageInfo - -func (m *ClientStatsBucket) GetStart() uint64 { - if m != nil { - return m.Start - } - return 0 -} - -func (m *ClientStatsBucket) GetDuration() uint64 { - if m != nil { - return m.Duration - } - return 0 -} - -func (m *ClientStatsBucket) GetStats() []ClientGroupedStats { - if m != nil { - return m.Stats - } - return nil -} - -func (m *ClientStatsBucket) GetAgentTimeShift() int64 { - if m != nil { - return m.AgentTimeShift - } - return 0 -} - -// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type -type ClientGroupedStats struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` - Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` - Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` - Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` - OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` - ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` - Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` - TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` - PeerService string `protobuf:"bytes,14,opt,name=peer_service,json=peerService,proto3" json:"peer_service,omitempty"` - SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` -} - -func (m *ClientGroupedStats) Reset() { *m = ClientGroupedStats{} } -func (m *ClientGroupedStats) String() string { return proto.CompactTextString(m) } -func (*ClientGroupedStats) ProtoMessage() {} -func (*ClientGroupedStats) Descriptor() ([]byte, []int) { - return fileDescriptor_b4756a0aec8b9d44, []int{3} -} -func (m *ClientGroupedStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientGroupedStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientGroupedStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientGroupedStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientGroupedStats.Merge(m, src) -} -func (m *ClientGroupedStats) XXX_Size() int { - return m.Size() -} -func (m *ClientGroupedStats) XXX_DiscardUnknown() { - xxx_messageInfo_ClientGroupedStats.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientGroupedStats proto.InternalMessageInfo - -func (m *ClientGroupedStats) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *ClientGroupedStats) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ClientGroupedStats) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -func (m *ClientGroupedStats) GetHTTPStatusCode() uint32 { - if m != nil { - return m.HTTPStatusCode - } - return 0 -} - -func (m *ClientGroupedStats) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ClientGroupedStats) GetDBType() string { - if m != nil { - return m.DBType - } - return "" -} - -func (m *ClientGroupedStats) GetHits() uint64 { - if m != nil { - return m.Hits - } - return 0 -} - -func (m *ClientGroupedStats) GetErrors() uint64 { - if m != nil { - return m.Errors - } - return 0 -} - -func (m *ClientGroupedStats) GetDuration() uint64 { - if m != nil { - return m.Duration - } - return 0 -} - -func (m *ClientGroupedStats) GetOkSummary() []byte { - if m != nil { - return m.OkSummary - } - return nil -} - -func (m *ClientGroupedStats) GetErrorSummary() []byte { - if m != nil { - return m.ErrorSummary - } - return nil -} - -func (m *ClientGroupedStats) GetSynthetics() bool { - if m != nil { - return m.Synthetics - } - return false -} - -func (m *ClientGroupedStats) GetTopLevelHits() uint64 { - if m != nil { - return m.TopLevelHits - } - return 0 -} - -func (m *ClientGroupedStats) GetPeerService() string { - if m != nil { - return m.PeerService - } - return "" -} - -func (m *ClientGroupedStats) GetSpanKind() string { - if m != nil { - return m.SpanKind - } - return "" -} - -func init() { - proto.RegisterType((*StatsPayload)(nil), "pb.StatsPayload") - proto.RegisterType((*ClientStatsPayload)(nil), "pb.ClientStatsPayload") - proto.RegisterType((*ClientStatsBucket)(nil), "pb.ClientStatsBucket") - proto.RegisterType((*ClientGroupedStats)(nil), "pb.ClientGroupedStats") -} - -func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } - -var fileDescriptor_b4756a0aec8b9d44 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xc1, 0x6e, 0xda, 0x4a, - 0x14, 0xc5, 0xd8, 0x21, 0xf1, 0x85, 0xf0, 0xf2, 0x46, 0xef, 0xe5, 0x59, 0x79, 0x91, 0x4b, 0x51, - 0x15, 0xa1, 0x4a, 0x25, 0x6a, 0xfa, 0x05, 0x25, 0x54, 0x4d, 0xd4, 0x2e, 0x22, 0x83, 0xba, 0x45, - 0xc6, 0xbe, 0x31, 0x56, 0xf0, 0x8c, 0x3b, 0x1e, 0x23, 0xf1, 0x17, 0xfd, 0x85, 0x7e, 0x4b, 0x37, - 0x59, 0x66, 0xd9, 0x55, 0x55, 0x25, 0x1f, 0xd2, 0x6a, 0xae, 0x31, 0xc1, 0x44, 0xea, 0xee, 0x9e, - 0x33, 0x97, 0x3b, 0xf7, 0x1c, 0x9f, 0x01, 0x9a, 0x99, 0xf2, 0x55, 0xd6, 0x4f, 0xa5, 0x50, 0x82, - 0xd5, 0xd3, 0xe9, 0xd1, 0xab, 0x28, 0x56, 0xb3, 0x7c, 0xda, 0x0f, 0x44, 0x72, 0x1a, 0x89, 0x48, - 0x9c, 0xd2, 0xd1, 0x34, 0xbf, 0x26, 0x44, 0x80, 0xaa, 0xe2, 0x27, 0xdd, 0x3b, 0x03, 0x5a, 0x23, - 0x3d, 0xe2, 0xca, 0x5f, 0xce, 0x85, 0x1f, 0xb2, 0x17, 0xb0, 0xef, 0x47, 0xc8, 0xd5, 0x85, 0xc8, - 0x14, 0xf7, 0x13, 0x74, 0x8c, 0x8e, 0xd1, 0xb3, 0xbd, 0x2a, 0xc9, 0x8e, 0x60, 0x8f, 0x88, 0x77, - 0x7c, 0xe1, 0xd4, 0xa9, 0x61, 0x8d, 0xd9, 0x19, 0xec, 0xd0, 0x52, 0x8e, 0xd9, 0x31, 0x7b, 0xcd, - 0xb3, 0xc3, 0x7e, 0x3a, 0xed, 0x9f, 0xcf, 0x63, 0xe4, 0x6a, 0xf3, 0xa2, 0x81, 0x75, 0xfb, 0xe3, - 0x59, 0xcd, 0x2b, 0x5a, 0x59, 0x17, 0x5a, 0xf4, 0xfb, 0x4f, 0x28, 0xb3, 0x58, 0x70, 0xc7, 0xa2, - 0x99, 0x15, 0x8e, 0x9d, 0x40, 0x3b, 0xa0, 0x31, 0xe7, 0x22, 0x49, 0x73, 0x85, 0xa1, 0xb3, 0xd3, - 0x31, 0x7a, 0x7b, 0xde, 0x16, 0xdb, 0xfd, 0x55, 0x07, 0xf6, 0xf4, 0x3e, 0xbd, 0xf2, 0xac, 0xaa, - 0x69, 0x8d, 0xd9, 0x01, 0x98, 0xb8, 0x56, 0xa2, 0x4b, 0xe6, 0xc0, 0xee, 0x62, 0xb5, 0x8b, 0x49, - 0x6c, 0x09, 0xd9, 0xeb, 0x52, 0x9e, 0x45, 0xf2, 0xfe, 0xdd, 0x92, 0x37, 0xc8, 0x83, 0x1b, 0x54, - 0x55, 0x75, 0x0c, 0xac, 0xb9, 0xcf, 0x23, 0xda, 0xd7, 0xf6, 0xa8, 0xd6, 0x3e, 0x2b, 0xe9, 0x07, - 0x28, 0x4b, 0xc9, 0x8d, 0xc2, 0xe7, 0x0a, 0xc9, 0x8e, 0xc1, 0x96, 0x39, 0x57, 0x71, 0x82, 0x97, - 0x43, 0x67, 0x97, 0x3a, 0x1e, 0x09, 0x2d, 0x29, 0xc3, 0xcf, 0x39, 0xf2, 0x00, 0x9d, 0xbd, 0x8e, - 0xd1, 0xb3, 0xbc, 0x35, 0x66, 0x2f, 0xe1, 0x80, 0xdc, 0x7b, 0x1b, 0x45, 0x12, 0x23, 0x5f, 0xe9, - 0x2b, 0x6c, 0x1a, 0xf0, 0x84, 0xd7, 0x62, 0x33, 0x94, 0x8b, 0x38, 0x40, 0x07, 0x0a, 0xb1, 0x2b, - 0xc8, 0x3a, 0xd0, 0x0c, 0x04, 0x57, 0x7e, 0xcc, 0x51, 0x5e, 0x0e, 0x9d, 0x26, 0x9d, 0x6e, 0x52, - 0x5a, 0x9b, 0xf2, 0xa3, 0xcc, 0x69, 0x75, 0x4c, 0xad, 0x4d, 0xd7, 0xdd, 0xaf, 0x06, 0xfc, 0xfd, - 0xc4, 0x12, 0xf6, 0x0f, 0x19, 0x27, 0x15, 0xb9, 0x6f, 0x79, 0x05, 0xd0, 0x1a, 0xc2, 0x5c, 0x16, - 0xfb, 0xd5, 0x0b, 0x0d, 0x25, 0xfe, 0x43, 0x92, 0xde, 0x4b, 0x91, 0xa7, 0x18, 0x16, 0xe3, 0x2b, - 0x5e, 0x9f, 0x40, 0x9b, 0xf4, 0x8d, 0xe3, 0x04, 0x47, 0xb3, 0xf8, 0x5a, 0x51, 0x96, 0x4c, 0x6f, - 0x8b, 0xed, 0x7e, 0x33, 0xcb, 0x94, 0x6c, 0xce, 0xda, 0xb4, 0xc2, 0xa8, 0x5a, 0xc1, 0xc0, 0xa2, - 0xec, 0x14, 0x21, 0xb1, 0xca, 0x67, 0x20, 0x31, 0x13, 0xb9, 0x0c, 0x70, 0x15, 0x93, 0x35, 0x66, - 0x3d, 0x38, 0xb8, 0x18, 0x8f, 0xaf, 0x26, 0x7a, 0xad, 0x3c, 0x9b, 0x04, 0x22, 0x44, 0x5a, 0x65, - 0xdf, 0x6b, 0x6b, 0x7e, 0x44, 0xf4, 0xb9, 0x08, 0x69, 0xb2, 0x5a, 0xa6, 0x58, 0xc6, 0x43, 0xd7, - 0xec, 0x3f, 0xd8, 0x1d, 0x0e, 0x26, 0x44, 0x17, 0xc1, 0x68, 0x0c, 0x07, 0x63, 0x7d, 0xc0, 0xc0, - 0x9a, 0xc5, 0x2a, 0xa3, 0x30, 0x58, 0x1e, 0xd5, 0xec, 0x10, 0x1a, 0x28, 0xa5, 0x90, 0xd9, 0x2a, - 0x05, 0x2b, 0x54, 0xf1, 0xd6, 0xde, 0xf2, 0xf6, 0x18, 0x6c, 0x71, 0x33, 0xca, 0x93, 0xc4, 0x97, - 0x4b, 0xfa, 0xea, 0x2d, 0xef, 0x91, 0xd0, 0xef, 0x91, 0x66, 0x94, 0x0d, 0x4d, 0x6a, 0xa8, 0x70, - 0xcc, 0x05, 0xc8, 0x96, 0x5c, 0xcd, 0x50, 0xc5, 0x81, 0xfe, 0xfe, 0xfa, 0x2d, 0x6e, 0x30, 0x7a, - 0x86, 0x12, 0xe9, 0x47, 0x5c, 0xe0, 0xfc, 0x42, 0x6f, 0xbc, 0x4f, 0x1b, 0x54, 0x38, 0xf6, 0x1c, - 0x5a, 0x29, 0xa2, 0x9c, 0x94, 0x9e, 0xb7, 0x8b, 0x80, 0x69, 0x6e, 0xb4, 0xf2, 0xfd, 0x7f, 0xb0, - 0xb3, 0xd4, 0xe7, 0x93, 0x9b, 0x98, 0x87, 0xce, 0x5f, 0x85, 0xc9, 0x9a, 0xf8, 0x10, 0xf3, 0x70, - 0xe0, 0xdc, 0xde, 0xbb, 0xc6, 0xdd, 0xbd, 0x6b, 0xfc, 0xbc, 0x77, 0x8d, 0x2f, 0x0f, 0x6e, 0xed, - 0xee, 0xc1, 0xad, 0x7d, 0x7f, 0x70, 0x6b, 0xd3, 0x06, 0xfd, 0xbf, 0xbd, 0xf9, 0x1d, 0x00, 0x00, - 0xff, 0xff, 0xc7, 0x5d, 0xe8, 0x01, 0x21, 0x05, 0x00, 0x00, -} - -func (m *StatsPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AgentHostname) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentHostname))) - i += copy(dAtA[i:], m.AgentHostname) - } - if len(m.AgentEnv) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentEnv))) - i += copy(dAtA[i:], m.AgentEnv) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AgentVersion) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentVersion))) - i += copy(dAtA[i:], m.AgentVersion) - } - if m.ClientComputed { - dAtA[i] = 0x28 - i++ - if m.ClientComputed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ClientStatsPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientStatsPayload) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hostname) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - } - if len(m.Env) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Env))) - i += copy(dAtA[i:], m.Env) - } - if len(m.Version) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x22 - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Lang) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Lang))) - i += copy(dAtA[i:], m.Lang) - } - if len(m.TracerVersion) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.TracerVersion))) - i += copy(dAtA[i:], m.TracerVersion) - } - if len(m.RuntimeID) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.RuntimeID))) - i += copy(dAtA[i:], m.RuntimeID) - } - if m.Sequence != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Sequence)) - } - if len(m.AgentAggregation) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.AgentAggregation))) - i += copy(dAtA[i:], m.AgentAggregation) - } - if len(m.Service) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.ContainerID) > 0 { - dAtA[i] = 0x5a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - dAtA[i] = 0x62 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ClientStatsBucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientStatsBucket) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Start != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Start)) - } - if m.Duration != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Duration)) - } - if len(m.Stats) > 0 { - for _, msg := range m.Stats { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AgentTimeShift != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.AgentTimeShift)) - } - return i, nil -} - -func (m *ClientGroupedStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientGroupedStats) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Service) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Service))) - i += copy(dAtA[i:], m.Service) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Resource) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - } - if m.HTTPStatusCode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.HTTPStatusCode)) - } - if len(m.Type) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - } - if len(m.DBType) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.DBType))) - i += copy(dAtA[i:], m.DBType) - } - if m.Hits != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Hits)) - } - if m.Errors != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Errors)) - } - if m.Duration != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.Duration)) - } - if len(m.OkSummary) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.OkSummary))) - i += copy(dAtA[i:], m.OkSummary) - } - if len(m.ErrorSummary) > 0 { - dAtA[i] = 0x5a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.ErrorSummary))) - i += copy(dAtA[i:], m.ErrorSummary) - } - if m.Synthetics { - dAtA[i] = 0x60 - i++ - if m.Synthetics { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.TopLevelHits != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintStats(dAtA, i, uint64(m.TopLevelHits)) - } - if len(m.PeerService) > 0 { - dAtA[i] = 0x72 - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.PeerService))) - i += copy(dAtA[i:], m.PeerService) - } - if len(m.SpanKind) > 0 { - dAtA[i] = 0x7a - i++ - i = encodeVarintStats(dAtA, i, uint64(len(m.SpanKind))) - i += copy(dAtA[i:], m.SpanKind) - } - return i, nil -} - -func encodeVarintStats(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *StatsPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.AgentHostname) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.AgentEnv) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - l = len(m.AgentVersion) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.ClientComputed { - n += 2 - } - return n -} - -func (m *ClientStatsPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Env) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - l = len(m.Lang) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.TracerVersion) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.RuntimeID) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Sequence != 0 { - n += 1 + sovStats(uint64(m.Sequence)) - } - l = len(m.AgentAggregation) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sovStats(uint64(l)) - } - } - return n -} - -func (m *ClientStatsBucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Start != 0 { - n += 1 + sovStats(uint64(m.Start)) - } - if m.Duration != 0 { - n += 1 + sovStats(uint64(m.Duration)) - } - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovStats(uint64(l)) - } - } - if m.AgentTimeShift != 0 { - n += 1 + sovStats(uint64(m.AgentTimeShift)) - } - return n -} - -func (m *ClientGroupedStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Service) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.Resource) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.HTTPStatusCode != 0 { - n += 1 + sovStats(uint64(m.HTTPStatusCode)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.DBType) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Hits != 0 { - n += 1 + sovStats(uint64(m.Hits)) - } - if m.Errors != 0 { - n += 1 + sovStats(uint64(m.Errors)) - } - if m.Duration != 0 { - n += 1 + sovStats(uint64(m.Duration)) - } - l = len(m.OkSummary) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.ErrorSummary) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - if m.Synthetics { - n += 2 - } - if m.TopLevelHits != 0 { - n += 1 + sovStats(uint64(m.TopLevelHits)) - } - l = len(m.PeerService) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - l = len(m.SpanKind) - if l > 0 { - n += 1 + l + sovStats(uint64(l)) - } - return n -} - -func sovStats(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozStats(x uint64) (n int) { - return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StatsPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentHostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentHostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentEnv", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentEnv = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientStatsPayload{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientComputed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClientComputed = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientStatsPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientStatsPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientStatsPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientStatsBucket{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lang", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Lang = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TracerVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TracerVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) - } - m.Sequence = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sequence |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentAggregation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentAggregation = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientStatsBucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientStatsBucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientStatsBucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - m.Start = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Start |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, ClientGroupedStats{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentTimeShift", wireType) - } - m.AgentTimeShift = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AgentTimeShift |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientGroupedStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientGroupedStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientGroupedStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPStatusCode", wireType) - } - m.HTTPStatusCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HTTPStatusCode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DBType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DBType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hits", wireType) - } - m.Hits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hits |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) - } - m.Errors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Errors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OkSummary", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OkSummary = append(m.OkSummary[:0], dAtA[iNdEx:postIndex]...) - if m.OkSummary == nil { - m.OkSummary = []byte{} - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorSummary", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ErrorSummary = append(m.ErrorSummary[:0], dAtA[iNdEx:postIndex]...) - if m.ErrorSummary == nil { - m.ErrorSummary = []byte{} - } - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Synthetics", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Synthetics = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TopLevelHits", wireType) - } - m.TopLevelHits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TopLevelHits |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerService", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerService = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanKind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStats - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStats - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStats - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SpanKind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStats(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStats - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStats(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStats - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStats - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipStats(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthStats - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/trace/pb/util.go b/pkg/trace/pb/util.go new file mode 100644 index 0000000000000..bb892573e64c8 --- /dev/null +++ b/pkg/trace/pb/util.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-2020 Datadog, Inc. + +package pb + +import ( + "google.golang.org/protobuf/runtime/protoiface" +) + +func PbToStringSlice(s []protoiface.MessageV1) []string { + slice := []string{} + for _, s := range s { + if s == nil { + continue + } + slice = append(slice, s.String()) + } + + return slice +} diff --git a/pkg/trace/sampler/prioritysampler.go b/pkg/trace/sampler/prioritysampler.go index 905a45c45ce40..0f2bc99888041 100644 --- a/pkg/trace/sampler/prioritysampler.go +++ b/pkg/trace/sampler/prioritysampler.go @@ -21,8 +21,8 @@ package sampler import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/pkg/trace/sampler/prioritysampler_test.go b/pkg/trace/sampler/prioritysampler_test.go index a096101895661..158d06c857860 100644 --- a/pkg/trace/sampler/prioritysampler_test.go +++ b/pkg/trace/sampler/prioritysampler_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/stretchr/testify/assert" "go.uber.org/atomic" ) diff --git a/pkg/trace/sampler/rare_sampler.go b/pkg/trace/sampler/rare_sampler.go index 3b898df360cf9..4e0712eddf670 100644 --- a/pkg/trace/sampler/rare_sampler.go +++ b/pkg/trace/sampler/rare_sampler.go @@ -12,9 +12,9 @@ import ( "go.uber.org/atomic" "golang.org/x/time/rate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/metrics" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/sampler/rare_sampler_test.go b/pkg/trace/sampler/rare_sampler_test.go index 25fa149f6ce06..33e3f5b9cc736 100644 --- a/pkg/trace/sampler/rare_sampler_test.go +++ b/pkg/trace/sampler/rare_sampler_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/assert" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) func TestSpanSeenTTLExpiration(t *testing.T) { diff --git a/pkg/trace/sampler/sampler.go b/pkg/trace/sampler/sampler.go index 66eb643b41913..f8b1251d9fa82 100644 --- a/pkg/trace/sampler/sampler.go +++ b/pkg/trace/sampler/sampler.go @@ -9,7 +9,7 @@ package sampler import ( "math" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/sampler/scoresampler.go b/pkg/trace/sampler/scoresampler.go index 29809acac82f5..3597c7c0bf5fb 100644 --- a/pkg/trace/sampler/scoresampler.go +++ b/pkg/trace/sampler/scoresampler.go @@ -9,8 +9,8 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/pkg/trace/sampler/scoresampler_test.go b/pkg/trace/sampler/scoresampler_test.go index af3e32bf72d04..f902c6901f734 100644 --- a/pkg/trace/sampler/scoresampler_test.go +++ b/pkg/trace/sampler/scoresampler_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/atomic" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const defaultEnv = "testEnv" diff --git a/pkg/trace/sampler/signature.go b/pkg/trace/sampler/signature.go index 1916cc3b6c6c6..bca9a5f56ab96 100644 --- a/pkg/trace/sampler/signature.go +++ b/pkg/trace/sampler/signature.go @@ -8,7 +8,7 @@ package sampler import ( "sort" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/sampler/signature_test.go b/pkg/trace/sampler/signature_test.go index ac5d01b4b150d..71c4e00a59131 100644 --- a/pkg/trace/sampler/signature_test.go +++ b/pkg/trace/sampler/signature_test.go @@ -9,7 +9,7 @@ import ( "hash/fnv" "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/stretchr/testify/assert" diff --git a/pkg/trace/sampler/spansampler_test.go b/pkg/trace/sampler/spansampler_test.go index 7fa72879ee5ec..6c9617dbc920d 100644 --- a/pkg/trace/sampler/spansampler_test.go +++ b/pkg/trace/sampler/spansampler_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/stats/aggregation.go b/pkg/trace/stats/aggregation.go index 1e2bc0fd87f4e..47bb9716ef8a3 100644 --- a/pkg/trace/stats/aggregation.go +++ b/pkg/trace/stats/aggregation.go @@ -9,8 +9,8 @@ import ( "strconv" "strings" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) @@ -87,7 +87,7 @@ func NewAggregationFromSpan(s *pb.Span, origin string, aggKey PayloadAggregation } // NewAggregationFromGroup gets the Aggregation key of grouped stats. -func NewAggregationFromGroup(g pb.ClientGroupedStats) Aggregation { +func NewAggregationFromGroup(g *pb.ClientGroupedStats) Aggregation { return Aggregation{ BucketsAggregationKey: BucketsAggregationKey{ Resource: g.Resource, diff --git a/pkg/trace/stats/aggregation_test.go b/pkg/trace/stats/aggregation_test.go index 2775cf1985364..dc5856c628897 100644 --- a/pkg/trace/stats/aggregation_test.go +++ b/pkg/trace/stats/aggregation_test.go @@ -8,7 +8,7 @@ package stats import ( "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/stats/client_stats_aggregator.go b/pkg/trace/stats/client_stats_aggregator.go index e46411ed276ef..f98c911507e6e 100644 --- a/pkg/trace/stats/client_stats_aggregator.go +++ b/pkg/trace/stats/client_stats_aggregator.go @@ -8,8 +8,8 @@ package stats import ( "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) @@ -35,8 +35,8 @@ const ( // This and the aggregator timestamp alignment ensure that all counts will have at most one point per second per agent for a specific granularity. // While distributions are not tied to the agent. type ClientStatsAggregator struct { - In chan pb.ClientStatsPayload - out chan pb.StatsPayload + In chan *pb.ClientStatsPayload + out chan *pb.StatsPayload buckets map[int64]*bucket // buckets used to aggregate client stats flushTicker *time.Ticker @@ -51,10 +51,10 @@ type ClientStatsAggregator struct { } // NewClientStatsAggregator initializes a new aggregator ready to be started -func NewClientStatsAggregator(conf *config.AgentConfig, out chan pb.StatsPayload) *ClientStatsAggregator { +func NewClientStatsAggregator(conf *config.AgentConfig, out chan *pb.StatsPayload) *ClientStatsAggregator { c := &ClientStatsAggregator{ flushTicker: time.NewTicker(time.Second), - In: make(chan pb.ClientStatsPayload, 10), + In: make(chan *pb.ClientStatsPayload, 10), buckets: make(map[int64]*bucket, 20), out: out, agentEnv: conf.DefaultEnv, @@ -124,7 +124,7 @@ func (a *ClientStatsAggregator) getAggregationBucketTime(now, bs time.Time) (tim return alignAggTs(bs), false } -func (a *ClientStatsAggregator) add(now time.Time, p pb.ClientStatsPayload) { +func (a *ClientStatsAggregator) add(now time.Time, p *pb.ClientStatsPayload) { for _, clientBucket := range p.Stats { clientBucketStart := time.Unix(0, int64(clientBucket.Start)) ts, shifted := a.getAggregationBucketTime(now, clientBucketStart) @@ -137,16 +137,17 @@ func (a *ClientStatsAggregator) add(now time.Time, p pb.ClientStatsPayload) { b = &bucket{ts: ts} a.buckets[ts.Unix()] = b } - p.Stats = []pb.ClientStatsBucket{clientBucket} + p.Stats = []*pb.ClientStatsBucket{clientBucket} a.flush(b.add(p, a.peerSvcAggregation)) } } -func (a *ClientStatsAggregator) flush(p []pb.ClientStatsPayload) { +func (a *ClientStatsAggregator) flush(p []*pb.ClientStatsPayload) { if len(p) == 0 { return } - a.out <- pb.StatsPayload{ + + a.out <- &pb.StatsPayload{ Stats: p, AgentEnv: a.agentEnv, AgentHostname: a.agentHostname, @@ -167,7 +168,7 @@ func alignAggTs(t time.Time) time.Time { type bucket struct { // first is the first payload matching the bucket. If a second payload matches the bucket // this field will be empty - first pb.ClientStatsPayload + first *pb.ClientStatsPayload // ts is the timestamp attached to the payload ts time.Time // n counts the number of payloads matching the bucket @@ -176,26 +177,39 @@ type bucket struct { agg map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedCounts } -func (b *bucket) add(p pb.ClientStatsPayload, enablePeerSvcAgg bool) []pb.ClientStatsPayload { +func (b *bucket) add(p *pb.ClientStatsPayload, enablePeerSvcAgg bool) []*pb.ClientStatsPayload { b.n++ if b.n == 1 { - b.first = p + b.first = &pb.ClientStatsPayload{ + Hostname: p.GetHostname(), + Env: p.GetEnv(), + Version: p.GetVersion(), + Stats: p.GetStats(), + Lang: p.GetLang(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Sequence: p.GetSequence(), + AgentAggregation: p.GetAgentAggregation(), + Service: p.GetService(), + ContainerID: p.GetContainerID(), + Tags: p.GetTags(), + } return nil } // if it's the second payload we flush the first payload with counts trimmed if b.n == 2 { first := b.first - b.first = pb.ClientStatsPayload{} + b.first = &pb.ClientStatsPayload{} b.agg = make(map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedCounts, 2) b.aggregateCounts(first, enablePeerSvcAgg) b.aggregateCounts(p, enablePeerSvcAgg) - return []pb.ClientStatsPayload{trimCounts(first), trimCounts(p)} + return []*pb.ClientStatsPayload{trimCounts(first), trimCounts(p)} } b.aggregateCounts(p, enablePeerSvcAgg) - return []pb.ClientStatsPayload{trimCounts(p)} + return []*pb.ClientStatsPayload{trimCounts(p)} } -func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) { +func (b *bucket) aggregateCounts(p *pb.ClientStatsPayload, enablePeerSvcAgg bool) { payloadAggKey := newPayloadAggregationKey(p.Env, p.Hostname, p.Version, p.ContainerID) payloadAgg, ok := b.agg[payloadAggKey] if !ok { @@ -208,6 +222,9 @@ func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) } for _, s := range p.Stats { for _, sb := range s.Stats { + if sb == nil { + continue + } aggKey := newBucketAggregationKey(sb, enablePeerSvcAgg) agg, ok := payloadAgg[aggKey] if !ok { @@ -221,19 +238,19 @@ func (b *bucket) aggregateCounts(p pb.ClientStatsPayload, enablePeerSvcAgg bool) } } -func (b *bucket) flush() []pb.ClientStatsPayload { +func (b *bucket) flush() []*pb.ClientStatsPayload { if b.n == 1 { - return []pb.ClientStatsPayload{b.first} + return []*pb.ClientStatsPayload{b.first} } return b.aggregationToPayloads() } -func (b *bucket) aggregationToPayloads() []pb.ClientStatsPayload { - res := make([]pb.ClientStatsPayload, 0, len(b.agg)) +func (b *bucket) aggregationToPayloads() []*pb.ClientStatsPayload { + res := make([]*pb.ClientStatsPayload, 0, len(b.agg)) for payloadKey, aggrCounts := range b.agg { - stats := make([]pb.ClientGroupedStats, 0, len(aggrCounts)) + stats := make([]*pb.ClientGroupedStats, 0, len(aggrCounts)) for aggrKey, counts := range aggrCounts { - stats = append(stats, pb.ClientGroupedStats{ + stats = append(stats, &pb.ClientGroupedStats{ Service: aggrKey.Service, PeerService: aggrKey.PeerService, Name: aggrKey.Name, @@ -247,13 +264,13 @@ func (b *bucket) aggregationToPayloads() []pb.ClientStatsPayload { Duration: counts.duration, }) } - clientBuckets := []pb.ClientStatsBucket{ + clientBuckets := []*pb.ClientStatsBucket{ { Start: uint64(b.ts.UnixNano()), Duration: uint64(clientBucketDuration.Nanoseconds()), Stats: stats, }} - res = append(res, pb.ClientStatsPayload{ + res = append(res, &pb.ClientStatsPayload{ Hostname: payloadKey.Hostname, Env: payloadKey.Env, Version: payloadKey.Version, @@ -268,7 +285,7 @@ func newPayloadAggregationKey(env, hostname, version, cid string) PayloadAggrega return PayloadAggregationKey{Env: env, Hostname: hostname, Version: version, ContainerID: cid} } -func newBucketAggregationKey(b pb.ClientGroupedStats, enablePeerSvcAgg bool) BucketsAggregationKey { +func newBucketAggregationKey(b *pb.ClientGroupedStats, enablePeerSvcAgg bool) BucketsAggregationKey { k := BucketsAggregationKey{ Service: b.Service, Name: b.Name, @@ -284,10 +301,13 @@ func newBucketAggregationKey(b pb.ClientGroupedStats, enablePeerSvcAgg bool) Buc return k } -func trimCounts(p pb.ClientStatsPayload) pb.ClientStatsPayload { +func trimCounts(p *pb.ClientStatsPayload) *pb.ClientStatsPayload { p.AgentAggregation = keyDistributions for _, s := range p.Stats { for i, b := range s.Stats { + if b == nil { + continue + } b.Hits = 0 b.Errors = 0 b.Duration = 0 diff --git a/pkg/trace/stats/client_stats_aggregator_test.go b/pkg/trace/stats/client_stats_aggregator_test.go index 746c36b4372a7..8da6115a6f414 100644 --- a/pkg/trace/stats/client_stats_aggregator_test.go +++ b/pkg/trace/stats/client_stats_aggregator_test.go @@ -11,7 +11,9 @@ import ( fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/runtime/protoiface" + proto "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/pb" ) @@ -23,18 +25,18 @@ func newTestAggregator() *ClientStatsAggregator { DefaultEnv: "agentEnv", Hostname: "agentHostname", } - a := NewClientStatsAggregator(conf, make(chan pb.StatsPayload, 100)) + a := NewClientStatsAggregator(conf, make(chan *proto.StatsPayload, 100)) a.Start() a.flushTicker.Stop() return a } -func wrapPayload(p pb.ClientStatsPayload) pb.StatsPayload { - return wrapPayloads([]pb.ClientStatsPayload{p}) +func wrapPayload(p *proto.ClientStatsPayload) *proto.StatsPayload { + return wrapPayloads([]*proto.ClientStatsPayload{p}) } -func wrapPayloads(p []pb.ClientStatsPayload) pb.StatsPayload { - return pb.StatsPayload{ +func wrapPayloads(p []*proto.ClientStatsPayload) *proto.StatsPayload { + return &proto.StatsPayload{ AgentEnv: "agentEnv", AgentHostname: "agentHostname", ClientComputed: true, @@ -42,14 +44,14 @@ func wrapPayloads(p []pb.ClientStatsPayload) pb.StatsPayload { } } -func payloadWithCounts(ts time.Time, k BucketsAggregationKey, hits, errors, duration uint64) pb.ClientStatsPayload { - return pb.ClientStatsPayload{ +func payloadWithCounts(ts time.Time, k BucketsAggregationKey, hits, errors, duration uint64) *proto.ClientStatsPayload { + return &proto.ClientStatsPayload{ Env: "test-env", Version: "test-version", - Stats: []pb.ClientStatsBucket{ + Stats: []*proto.ClientStatsBucket{ { Start: uint64(ts.UnixNano()), - Stats: []pb.ClientGroupedStats{ + Stats: []*proto.ClientGroupedStats{ { Service: k.Service, PeerService: k.PeerService, @@ -69,32 +71,35 @@ func payloadWithCounts(ts time.Time, k BucketsAggregationKey, hits, errors, dura } } -func getTestStatsWithStart(start time.Time) pb.ClientStatsPayload { - b := pb.ClientStatsBucket{} - fuzzer.Fuzz(&b) +func getTestStatsWithStart(start time.Time) *proto.ClientStatsPayload { + b := &proto.ClientStatsBucket{} + fuzzer.Fuzz(b) b.Start = uint64(start.UnixNano()) - p := pb.ClientStatsPayload{} - fuzzer.Fuzz(&p) + p := &proto.ClientStatsPayload{} + fuzzer.Fuzz(p) p.Tags = nil - p.Stats = []pb.ClientStatsBucket{b} + p.Stats = []*proto.ClientStatsBucket{b} return p } -func assertDistribPayload(t *testing.T, withCounts, res pb.StatsPayload) { +func assertDistribPayload(t *testing.T, withCounts, res *proto.StatsPayload) { for j, p := range withCounts.Stats { withCounts.Stats[j].AgentAggregation = keyDistributions for _, s := range p.Stats { for i := range s.Stats { + if s.Stats[i] == nil { + continue + } s.Stats[i].Hits = 0 s.Stats[i].Errors = 0 s.Stats[i].Duration = 0 } } } - assert.Equal(t, withCounts, res) + assert.Equal(t, withCounts.String(), res.String()) } -func assertAggCountsPayload(t *testing.T, aggCounts pb.StatsPayload) { +func assertAggCountsPayload(t *testing.T, aggCounts *proto.StatsPayload) { for _, p := range aggCounts.Stats { assert.Empty(t, p.Lang) assert.Empty(t, p.TracerVersion) @@ -110,7 +115,7 @@ func assertAggCountsPayload(t *testing.T, aggCounts pb.StatsPayload) { } } -func agg2Counts(insertionTime time.Time, p pb.ClientStatsPayload) pb.ClientStatsPayload { +func agg2Counts(insertionTime time.Time, p *proto.ClientStatsPayload) *proto.ClientStatsPayload { p.Lang = "" p.TracerVersion = "" p.RuntimeID = "" @@ -118,18 +123,21 @@ func agg2Counts(insertionTime time.Time, p pb.ClientStatsPayload) pb.ClientStats p.AgentAggregation = "counts" p.Service = "" p.ContainerID = "" - for i, s := range p.Stats { - p.Stats[i].Start = uint64(alignAggTs(insertionTime).UnixNano()) - p.Stats[i].Duration = uint64(clientBucketDuration.Nanoseconds()) - p.Stats[i].AgentTimeShift = 0 - for j := range s.Stats { - s.Stats[j].DBType = "" - s.Stats[j].Hits *= 2 - s.Stats[j].Errors *= 2 - s.Stats[j].Duration *= 2 - s.Stats[j].TopLevelHits = 0 - s.Stats[j].OkSummary = nil - s.Stats[j].ErrorSummary = nil + for _, s := range p.Stats { + s.Start = uint64(alignAggTs(insertionTime).UnixNano()) + s.Duration = uint64(clientBucketDuration.Nanoseconds()) + s.AgentTimeShift = 0 + for _, stat := range s.Stats { + if stat == nil { + continue + } + stat.DBType = "" + stat.Hits *= 2 + stat.Errors *= 2 + stat.Duration *= 2 + stat.TopLevelHits = 0 + stat.OkSummary = nil + stat.ErrorSummary = nil } } return p @@ -148,7 +156,8 @@ func TestAggregatorFlushTime(t *testing.T) { a.flushOnTime(testTime.Add(oldestBucketStart - bucketDuration)) assert.Len(a.out, 0) a.flushOnTime(testTime.Add(oldestBucketStart)) - assert.Equal(<-a.out, wrapPayload(testPayload)) + s := <-a.out + assert.Equal(s.String(), wrapPayload(testPayload).String()) assert.Len(a.buckets, 0) } @@ -172,9 +181,10 @@ func TestMergeMany(t *testing.T) { assert.Len(a.out, 3) a.flushOnTime(payloadTime.Add(oldestBucketStart)) assert.Len(a.out, 4) - assertDistribPayload(t, wrapPayloads([]pb.ClientStatsPayload{merge1, merge2}), <-a.out) + assertDistribPayload(t, wrapPayloads([]*proto.ClientStatsPayload{merge1, merge2}), <-a.out) assertDistribPayload(t, wrapPayload(merge3), <-a.out) - assert.Equal(wrapPayload(other), <-a.out) + s := <-a.out + assert.Equal(wrapPayload(other).String(), s.String()) assertAggCountsPayload(t, <-a.out) assert.Len(a.buckets, 0) } @@ -224,7 +234,8 @@ func TestTimeShifts(t *testing.T) { assert.Len(a.out, 1) stats.Stats[0].AgentTimeShift = -tc.expectedShift.Nanoseconds() stats.Stats[0].Start -= uint64(tc.expectedShift.Nanoseconds()) - assert.Equal(wrapPayload(stats), <-a.out) + s := <-a.out + assert.Equal(wrapPayload(stats).String(), s.String()) }) } } @@ -244,11 +255,28 @@ func TestFuzzCountFields(t *testing.T) { assert.Len(a.out, 1) a.flushOnTime(payloadTime.Add(oldestBucketStart)) assert.Len(a.out, 2) - assertDistribPayload(t, wrapPayloads([]pb.ClientStatsPayload{deepCopy(merge1), deepCopy(merge1)}), <-a.out) + assertDistribPayload(t, wrapPayloads([]*proto.ClientStatsPayload{deepCopy(merge1), deepCopy(merge1)}), <-a.out) aggCounts := <-a.out expectedAggCounts := wrapPayload(agg2Counts(insertionTime, merge1)) + // map gives random orders post aggregation - assert.ElementsMatch(aggCounts.Stats[0].Stats[0].Stats, expectedAggCounts.Stats[0].Stats[0].Stats) + + actual := []protoiface.MessageV1{} + expected := []protoiface.MessageV1{} + for _, s := range expectedAggCounts.Stats[0].Stats[0].Stats { + if s == nil { + continue + } + actual = append(actual, s) + } + for _, s := range aggCounts.Stats[0].Stats[0].Stats { + if s == nil { + continue + } + expected = append(expected, s) + } + + assert.ElementsMatch(pb.PbToStringSlice(expected), pb.PbToStringSlice(actual)) aggCounts.Stats[0].Stats[0].Stats = nil expectedAggCounts.Stats[0].Stats[0].Stats = nil assert.Equal(expectedAggCounts, aggCounts) @@ -257,65 +285,148 @@ func TestFuzzCountFields(t *testing.T) { } func TestCountAggregation(t *testing.T) { + assert := assert.New(t) + type tt struct { + k BucketsAggregationKey + res *proto.ClientGroupedStats + name string + } + tts := []tt{ + { + BucketsAggregationKey{Service: "s"}, + &proto.ClientGroupedStats{Service: "s"}, + "service", + }, + { + BucketsAggregationKey{Name: "n"}, + &proto.ClientGroupedStats{Name: "n"}, + "name", + }, + { + BucketsAggregationKey{Resource: "r"}, + &proto.ClientGroupedStats{Resource: "r"}, + "resource", + }, + { + BucketsAggregationKey{Type: "t"}, + &proto.ClientGroupedStats{Type: "t"}, + "resource", + }, + { + BucketsAggregationKey{Synthetics: true}, + &proto.ClientGroupedStats{Synthetics: true}, + "synthetics", + }, + { + BucketsAggregationKey{StatusCode: 10}, + &proto.ClientGroupedStats{HTTPStatusCode: 10}, + "status", + }, + } + for _, tc := range tts { + t.Run(tc.name, func(t *testing.T) { + a := newTestAggregator() + testTime := time.Unix(time.Now().Unix(), 0) + + c1 := payloadWithCounts(testTime, tc.k, 11, 7, 100) + c2 := payloadWithCounts(testTime, tc.k, 27, 2, 300) + c3 := payloadWithCounts(testTime, tc.k, 5, 10, 3) + keyDefault := BucketsAggregationKey{} + cDefault := payloadWithCounts(testTime, keyDefault, 0, 2, 4) + + assert.Len(a.out, 0) + a.add(testTime, deepCopy(c1)) + a.add(testTime, deepCopy(c2)) + a.add(testTime, deepCopy(c3)) + a.add(testTime, deepCopy(cDefault)) + assert.Len(a.out, 3) + a.flushOnTime(testTime.Add(oldestBucketStart + time.Nanosecond)) + assert.Len(a.out, 4) + + assertDistribPayload(t, wrapPayloads([]*proto.ClientStatsPayload{c1, c2}), <-a.out) + assertDistribPayload(t, wrapPayload(c3), <-a.out) + assertDistribPayload(t, wrapPayload(cDefault), <-a.out) + aggCounts := <-a.out + assertAggCountsPayload(t, aggCounts) + + tc.res.Hits = 43 + tc.res.Errors = 19 + tc.res.Duration = 403 + assert.ElementsMatch(aggCounts.Stats[0].Stats[0].Stats, []*proto.ClientGroupedStats{ + tc.res, + // Additional grouped stat object that corresponds to the keyDefault/cDefault. + // We do not expect this to be aggregated with the non-default key in the test. + { + Hits: 0, + Errors: 2, + Duration: 4, + }, + }) + assert.Len(a.buckets, 0) + }) + } +} + +func TestCountAggregationPeerService(t *testing.T) { assert := assert.New(t) type tt struct { k BucketsAggregationKey - res pb.ClientGroupedStats + res *proto.ClientGroupedStats name string enablePeerSvcAgg bool } tts := []tt{ { BucketsAggregationKey{Service: "s"}, - pb.ClientGroupedStats{Service: "s"}, + &proto.ClientGroupedStats{Service: "s"}, "service", false, }, { BucketsAggregationKey{Name: "n"}, - pb.ClientGroupedStats{Name: "n"}, + &proto.ClientGroupedStats{Name: "n"}, "name", false, }, { BucketsAggregationKey{Resource: "r"}, - pb.ClientGroupedStats{Resource: "r"}, + &proto.ClientGroupedStats{Resource: "r"}, "resource", false, }, { BucketsAggregationKey{Type: "t"}, - pb.ClientGroupedStats{Type: "t"}, + &proto.ClientGroupedStats{Type: "t"}, "resource", false, }, { BucketsAggregationKey{Synthetics: true}, - pb.ClientGroupedStats{Synthetics: true}, + &proto.ClientGroupedStats{Synthetics: true}, "synthetics", false, }, { BucketsAggregationKey{StatusCode: 10}, - pb.ClientGroupedStats{HTTPStatusCode: 10}, + &proto.ClientGroupedStats{HTTPStatusCode: 10}, "status", false, }, { BucketsAggregationKey{Service: "s", PeerService: "remote-service"}, - pb.ClientGroupedStats{Service: "s", PeerService: ""}, + &proto.ClientGroupedStats{Service: "s", PeerService: ""}, "peer.service disabled", false, }, { BucketsAggregationKey{Service: "s", PeerService: "remote-service"}, - pb.ClientGroupedStats{Service: "s", PeerService: "remote-service"}, + &proto.ClientGroupedStats{Service: "s", PeerService: "remote-service"}, "peer.service enabled", true, }, { BucketsAggregationKey{SpanKind: "client"}, - pb.ClientGroupedStats{SpanKind: "client"}, + &proto.ClientGroupedStats{SpanKind: "client"}, "span.kind", false, }, @@ -341,7 +452,7 @@ func TestCountAggregation(t *testing.T) { a.flushOnTime(testTime.Add(oldestBucketStart + time.Nanosecond)) assert.Len(a.out, 4) - assertDistribPayload(t, wrapPayloads([]pb.ClientStatsPayload{c1, c2}), <-a.out) + assertDistribPayload(t, wrapPayloads([]*proto.ClientStatsPayload{c1, c2}), <-a.out) assertDistribPayload(t, wrapPayload(c3), <-a.out) assertDistribPayload(t, wrapPayload(cDefault), <-a.out) aggCounts := <-a.out @@ -350,7 +461,7 @@ func TestCountAggregation(t *testing.T) { tc.res.Hits = 43 tc.res.Errors = 19 tc.res.Duration = 403 - assert.ElementsMatch(aggCounts.Stats[0].Stats[0].Stats, []pb.ClientGroupedStats{ + assert.ElementsMatch(aggCounts.Stats[0].Stats[0].Stats, []*proto.ClientGroupedStats{ tc.res, // Additional grouped stat object that corresponds to the keyDefault/cDefault. // We do not expect this to be aggregated with the non-default key in the test. @@ -368,41 +479,76 @@ func TestCountAggregation(t *testing.T) { func TestNewBucketAggregationKeyPeerService(t *testing.T) { t.Run("disabled", func(t *testing.T) { assert := assert.New(t) - r := newBucketAggregationKey(pb.ClientGroupedStats{Service: "a", PeerService: "remote-test"}, false) + r := newBucketAggregationKey(&proto.ClientGroupedStats{Service: "a", PeerService: "remote-test"}, false) assert.Equal(BucketsAggregationKey{Service: "a"}, r) }) t.Run("enabled", func(t *testing.T) { assert := assert.New(t) - r := newBucketAggregationKey(pb.ClientGroupedStats{Service: "a", PeerService: "remote-test"}, true) + r := newBucketAggregationKey(&proto.ClientGroupedStats{Service: "a", PeerService: "remote-test"}, true) assert.Equal(BucketsAggregationKey{Service: "a", PeerService: "remote-test"}, r) }) } -func deepCopy(p pb.ClientStatsPayload) pb.ClientStatsPayload { - new := p +func deepCopy(p *proto.ClientStatsPayload) *proto.ClientStatsPayload { + new := &proto.ClientStatsPayload{ + Hostname: p.GetHostname(), + Env: p.GetEnv(), + Version: p.GetVersion(), + Lang: p.GetLang(), + TracerVersion: p.GetTracerVersion(), + RuntimeID: p.GetRuntimeID(), + Sequence: p.GetSequence(), + AgentAggregation: p.GetAgentAggregation(), + Service: p.GetService(), + ContainerID: p.GetContainerID(), + Tags: p.GetTags(), + } new.Stats = deepCopyStatsBucket(p.Stats) return new } -func deepCopyStatsBucket(s []pb.ClientStatsBucket) []pb.ClientStatsBucket { +func deepCopyStatsBucket(s []*proto.ClientStatsBucket) []*proto.ClientStatsBucket { if s == nil { return nil } - new := make([]pb.ClientStatsBucket, len(s)) + new := make([]*proto.ClientStatsBucket, len(s)) for i, b := range s { - new[i] = b + new[i] = &proto.ClientStatsBucket{ + Start: b.GetStart(), + Duration: b.GetDuration(), + AgentTimeShift: b.GetAgentTimeShift(), + } new[i].Stats = deepCopyGroupedStats(b.Stats) } return new } -func deepCopyGroupedStats(s []pb.ClientGroupedStats) []pb.ClientGroupedStats { +func deepCopyGroupedStats(s []*proto.ClientGroupedStats) []*proto.ClientGroupedStats { if s == nil { return nil } - new := make([]pb.ClientGroupedStats, len(s)) + new := make([]*proto.ClientGroupedStats, len(s)) for i, b := range s { - new[i] = b + if b == nil { + new[i] = nil + continue + } + + new[i] = &proto.ClientGroupedStats{ + Service: b.GetService(), + Name: b.GetName(), + Resource: b.GetResource(), + HTTPStatusCode: b.GetHTTPStatusCode(), + Type: b.GetType(), + DBType: b.GetDBType(), + Hits: b.GetHits(), + Errors: b.GetErrors(), + Duration: b.GetDuration(), + Synthetics: b.GetSynthetics(), + TopLevelHits: b.GetTopLevelHits(), + PeerService: b.GetPeerService(), + SpanKind: b.GetSpanKind(), + } if b.OkSummary != nil { new[i].OkSummary = make([]byte, len(b.OkSummary)) copy(new[i].OkSummary, b.OkSummary) diff --git a/pkg/trace/stats/concentrator.go b/pkg/trace/stats/concentrator.go index d1db4089ca6fb..e59bfc6eca055 100644 --- a/pkg/trace/stats/concentrator.go +++ b/pkg/trace/stats/concentrator.go @@ -10,9 +10,9 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" ) @@ -27,7 +27,7 @@ const defaultBufferLen = 2 // allowing to find the gold (stats) amongst the traces. type Concentrator struct { In chan Input - Out chan pb.StatsPayload + Out chan *pb.StatsPayload // bucket duration in nanoseconds bsize int64 @@ -51,7 +51,7 @@ type Concentrator struct { } // NewConcentrator initializes a new concentrator ready to be started -func NewConcentrator(conf *config.AgentConfig, out chan pb.StatsPayload, now time.Time) *Concentrator { +func NewConcentrator(conf *config.AgentConfig, out chan *pb.StatsPayload, now time.Time) *Concentrator { bsize := conf.BucketInterval.Nanoseconds() c := Concentrator{ bsize: bsize, @@ -207,12 +207,12 @@ func (c *Concentrator) addNow(pt *traceutil.ProcessedTrace, containerID string) // Flush deletes and returns complete statistic buckets. // The force boolean guarantees flushing all buckets if set to true. -func (c *Concentrator) Flush(force bool) pb.StatsPayload { +func (c *Concentrator) Flush(force bool) *pb.StatsPayload { return c.flushNow(time.Now().UnixNano(), force) } -func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { - m := make(map[PayloadAggregationKey][]pb.ClientStatsBucket) +func (c *Concentrator) flushNow(now int64, force bool) *pb.StatsPayload { + m := make(map[PayloadAggregationKey][]*pb.ClientStatsBucket) c.mu.Lock() for ts, srb := range c.buckets { @@ -241,9 +241,9 @@ func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { c.oldestTs = newOldestTs } c.mu.Unlock() - sb := make([]pb.ClientStatsPayload, 0, len(m)) + sb := make([]*pb.ClientStatsPayload, 0, len(m)) for k, s := range m { - p := pb.ClientStatsPayload{ + p := &pb.ClientStatsPayload{ Env: k.Env, Hostname: k.Hostname, ContainerID: k.ContainerID, @@ -252,7 +252,7 @@ func (c *Concentrator) flushNow(now int64, force bool) pb.StatsPayload { } sb = append(sb, p) } - return pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion} + return &pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion} } // alignTs returns the provided timestamp truncated to the bucket size. diff --git a/pkg/trace/stats/concentrator_test.go b/pkg/trace/stats/concentrator_test.go index 6aa6c1ae6de44..06255883788fc 100644 --- a/pkg/trace/stats/concentrator_test.go +++ b/pkg/trace/stats/concentrator_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -27,7 +27,7 @@ var ( ) func NewTestConcentrator(now time.Time) *Concentrator { - statsChan := make(chan pb.StatsPayload) + statsChan := make(chan *pb.StatsPayload) cfg := config.AgentConfig{ BucketInterval: time.Duration(testBucketInterval), AgentVersion: "0.99.0", @@ -79,9 +79,9 @@ func spansToTraceChunk(spans []*pb.Span) *pb.TraceChunk { } // assertCountsEqual is a test utility function to assert expected == actual for count aggregations. -func assertCountsEqual(t *testing.T, expected []pb.ClientGroupedStats, actual []pb.ClientGroupedStats) { - expectedM := make(map[BucketsAggregationKey]pb.ClientGroupedStats) - actualM := make(map[BucketsAggregationKey]pb.ClientGroupedStats) +func assertCountsEqual(t *testing.T, expected []*pb.ClientGroupedStats, actual []*pb.ClientGroupedStats) { + expectedM := make(map[BucketsAggregationKey]*pb.ClientGroupedStats) + actualM := make(map[BucketsAggregationKey]*pb.ClientGroupedStats) for _, e := range expected { e.ErrorSummary = nil e.OkSummary = nil @@ -153,7 +153,7 @@ func TestConcentratorOldestTs(t *testing.T) { // First oldest bucket aggregates old past time buckets, so each count // should be an aggregated total across the spans. - expected := []pb.ClientGroupedStats{ + expected := []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource1", @@ -190,7 +190,7 @@ func TestConcentratorOldestTs(t *testing.T) { // First oldest bucket aggregates, it should have it all except the // last four spans that have offset of 0. - expected := []pb.ClientGroupedStats{ + expected := []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource1", @@ -210,7 +210,7 @@ func TestConcentratorOldestTs(t *testing.T) { } // Stats of the last four spans. - expected = []pb.ClientGroupedStats{ + expected = []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource1", @@ -317,9 +317,9 @@ func TestConcentratorStatsCounts(t *testing.T) { testSpan(now, 6, 0, 24, 0, "A1", "resource2", 0, nil), } - expectedCountValByKeyByTime := make(map[int64][]pb.ClientGroupedStats) + expectedCountValByKeyByTime := make(map[int64][]*pb.ClientGroupedStats) // 2-bucket old flush - expectedCountValByKeyByTime[alignedNow-2*testBucketInterval] = []pb.ClientGroupedStats{ + expectedCountValByKeyByTime[alignedNow-2*testBucketInterval] = []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource1", @@ -374,7 +374,7 @@ func TestConcentratorStatsCounts(t *testing.T) { }, } // 1-bucket old flush - expectedCountValByKeyByTime[alignedNow-testBucketInterval] = []pb.ClientGroupedStats{ + expectedCountValByKeyByTime[alignedNow-testBucketInterval] = []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource1", @@ -427,7 +427,7 @@ func TestConcentratorStatsCounts(t *testing.T) { }, } // last bucket to be flushed - expectedCountValByKeyByTime[alignedNow] = []pb.ClientGroupedStats{ + expectedCountValByKeyByTime[alignedNow] = []*pb.ClientGroupedStats{ { Service: "A1", Resource: "resource2", @@ -439,7 +439,7 @@ func TestConcentratorStatsCounts(t *testing.T) { Errors: 0, }, } - expectedCountValByKeyByTime[alignedNow+testBucketInterval] = []pb.ClientGroupedStats{} + expectedCountValByKeyByTime[alignedNow+testBucketInterval] = []*pb.ClientGroupedStats{} traceutil.ComputeTopLevel(spans) testTrace := toProcessedTrace(spans, "none", "") diff --git a/pkg/trace/stats/statsraw.go b/pkg/trace/stats/statsraw.go index d7a22691e43c2..a56531eef8eb2 100644 --- a/pkg/trace/stats/statsraw.go +++ b/pkg/trace/stats/statsraw.go @@ -8,8 +8,8 @@ package stats import ( "math/rand" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/sketches-go/ddsketch" "github.com/golang/protobuf/proto" @@ -48,18 +48,18 @@ func round(f float64) uint64 { return i } -func (s *groupedStats) export(a Aggregation) (pb.ClientGroupedStats, error) { +func (s *groupedStats) export(a Aggregation) (*pb.ClientGroupedStats, error) { msg := s.okDistribution.ToProto() okSummary, err := proto.Marshal(msg) if err != nil { - return pb.ClientGroupedStats{}, err + return &pb.ClientGroupedStats{}, err } msg = s.errDistribution.ToProto() errSummary, err := proto.Marshal(msg) if err != nil { - return pb.ClientGroupedStats{}, err + return &pb.ClientGroupedStats{}, err } - return pb.ClientGroupedStats{ + return &pb.ClientGroupedStats{ Service: a.Service, Name: a.Name, Resource: a.Resource, @@ -118,8 +118,8 @@ func NewRawBucket(ts, d uint64) *RawBucket { // Export transforms a RawBucket into a ClientStatsBucket, typically used // before communicating data to the API, as RawBucket is the internal // type while ClientStatsBucket is the public, shared one. -func (sb *RawBucket) Export() map[PayloadAggregationKey]pb.ClientStatsBucket { - m := make(map[PayloadAggregationKey]pb.ClientStatsBucket) +func (sb *RawBucket) Export() map[PayloadAggregationKey]*pb.ClientStatsBucket { + m := make(map[PayloadAggregationKey]*pb.ClientStatsBucket) for k, v := range sb.data { b, err := v.export(k) if err != nil { @@ -134,7 +134,7 @@ func (sb *RawBucket) Export() map[PayloadAggregationKey]pb.ClientStatsBucket { } s, ok := m[key] if !ok { - s = pb.ClientStatsBucket{ + s = &pb.ClientStatsBucket{ Start: sb.start, Duration: sb.duration, } diff --git a/pkg/trace/stats/statsraw_test.go b/pkg/trace/stats/statsraw_test.go index ad5db46be6d46..ea89a3ffc7c93 100644 --- a/pkg/trace/stats/statsraw_test.go +++ b/pkg/trace/stats/statsraw_test.go @@ -8,7 +8,7 @@ package stats import ( "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/stats/weight.go b/pkg/trace/stats/weight.go index ee19fe9251fba..d28ca5e46eed6 100644 --- a/pkg/trace/stats/weight.go +++ b/pkg/trace/stats/weight.go @@ -5,9 +5,7 @@ package stats -import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" -) +import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" // keySamplingRateGlobal is a metric key holding the global sampling rate. const keySamplingRateGlobal = "_sample_rate" diff --git a/pkg/trace/stats/weight_test.go b/pkg/trace/stats/weight_test.go index 6468300eb66bd..5a57441498e8a 100644 --- a/pkg/trace/stats/weight_test.go +++ b/pkg/trace/stats/weight_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) func fixedSpan() *pb.Span { diff --git a/pkg/trace/testutil/generate.go b/pkg/trace/testutil/generate.go index ea6e7afd872dd..741014c38f2e2 100644 --- a/pkg/trace/testutil/generate.go +++ b/pkg/trace/testutil/generate.go @@ -9,7 +9,7 @@ import ( "math/rand" "time" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) // SpanConfig defines the configuration for generating spans. diff --git a/pkg/trace/testutil/span.go b/pkg/trace/testutil/span.go index 2dadd727863d7..adfaf7328db0c 100644 --- a/pkg/trace/testutil/span.go +++ b/pkg/trace/testutil/span.go @@ -13,7 +13,7 @@ import ( "math/rand" "time" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" ) diff --git a/pkg/trace/testutil/stats.go b/pkg/trace/testutil/stats.go index 658988dd3cb99..d6cb57475f612 100644 --- a/pkg/trace/testutil/stats.go +++ b/pkg/trace/testutil/stats.go @@ -6,9 +6,9 @@ package testutil import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/stats" ) @@ -19,7 +19,7 @@ const ( ) // BucketWithSpans returns a stats bucket populated with spans stats -func BucketWithSpans(spans []*pb.Span) pb.ClientStatsBucket { +func BucketWithSpans(spans []*pb.Span) *pb.ClientStatsBucket { srb := stats.NewRawBucket(0, 1e9) aggKey := stats.PayloadAggregationKey{ Env: defaultEnv, @@ -36,14 +36,14 @@ func BucketWithSpans(spans []*pb.Span) pb.ClientStatsBucket { if len(buckets) != 1 { panic("All entries must have the same payload key.") } - for _, b := range srb.Export() { + for _, b := range buckets { return b } - return pb.ClientStatsBucket{} + return &pb.ClientStatsBucket{} } // RandomBucket returns a bucket made from n random spans, useful to run benchmarks and tests -func RandomBucket(n int) pb.ClientStatsBucket { +func RandomBucket(n int) *pb.ClientStatsBucket { spans := make([]*pb.Span, 0, n) for i := 0; i < n; i++ { spans = append(spans, RandomSpan()) @@ -53,12 +53,12 @@ func RandomBucket(n int) pb.ClientStatsBucket { } // StatsPayloadSample returns a populated client stats payload -func StatsPayloadSample() pb.ClientStatsPayload { - bucket := func(start, duration uint64) pb.ClientStatsBucket { - return pb.ClientStatsBucket{ +func StatsPayloadSample() *pb.ClientStatsPayload { + bucket := func(start, duration uint64) *pb.ClientStatsBucket { + return &pb.ClientStatsBucket{ Start: start, Duration: duration, - Stats: []pb.ClientGroupedStats{ + Stats: []*pb.ClientGroupedStats{ { Name: "name", Service: "service", @@ -70,11 +70,11 @@ func StatsPayloadSample() pb.ClientStatsPayload { }, } } - return pb.ClientStatsPayload{ + return &pb.ClientStatsPayload{ Hostname: "h", Env: "env", Version: "1.2", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ bucket(1, 10), bucket(500, 100342), }, diff --git a/pkg/trace/testutil/trace.go b/pkg/trace/testutil/trace.go index 5c5f8b5889ca4..b5e3772e16229 100644 --- a/pkg/trace/testutil/trace.go +++ b/pkg/trace/testutil/trace.go @@ -8,7 +8,7 @@ package testutil import ( "math/rand" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/sampler" ) diff --git a/pkg/trace/traceutil/processed_trace.go b/pkg/trace/traceutil/processed_trace.go index e3558cd02133b..6295bc1d65075 100644 --- a/pkg/trace/traceutil/processed_trace.go +++ b/pkg/trace/traceutil/processed_trace.go @@ -5,9 +5,7 @@ package traceutil -import ( - "github.com/DataDog/datadog-agent/pkg/trace/pb" -) +import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" // ProcessedTrace represents a trace being processed in the agent. type ProcessedTrace struct { diff --git a/pkg/trace/traceutil/processed_trace_test.go b/pkg/trace/traceutil/processed_trace_test.go index 9977ccb0cb691..a96f1473cb3b3 100644 --- a/pkg/trace/traceutil/processed_trace_test.go +++ b/pkg/trace/traceutil/processed_trace_test.go @@ -8,7 +8,7 @@ package traceutil import ( "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/traceutil/span.go b/pkg/trace/traceutil/span.go index 8c7636baa635d..b3a00834f7b9d 100644 --- a/pkg/trace/traceutil/span.go +++ b/pkg/trace/traceutil/span.go @@ -10,7 +10,7 @@ import ( "github.com/tinylib/msgp/msgp" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) const ( diff --git a/pkg/trace/traceutil/span_test.go b/pkg/trace/traceutil/span_test.go index db5711651cd1c..3dc3b794c5216 100644 --- a/pkg/trace/traceutil/span_test.go +++ b/pkg/trace/traceutil/span_test.go @@ -9,7 +9,7 @@ import ( "math/rand" "testing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/stretchr/testify/assert" ) diff --git a/pkg/trace/traceutil/trace.go b/pkg/trace/traceutil/trace.go index 3f2058377cb60..b5871ce9ffe0b 100644 --- a/pkg/trace/traceutil/trace.go +++ b/pkg/trace/traceutil/trace.go @@ -6,8 +6,8 @@ package traceutil import ( + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/pb" ) const ( diff --git a/pkg/trace/traceutil/trace_test.go b/pkg/trace/traceutil/trace_test.go index 8112208ba0991..b5337fb8258f2 100644 --- a/pkg/trace/traceutil/trace_test.go +++ b/pkg/trace/traceutil/trace_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/trace/pb" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" ) func TestGetRootFromCompleteTrace(t *testing.T) { diff --git a/pkg/trace/writer/stats.go b/pkg/trace/writer/stats.go index f7e8571c98a65..32bdbfc9ad937 100644 --- a/pkg/trace/writer/stats.go +++ b/pkg/trace/writer/stats.go @@ -13,12 +13,12 @@ import ( "strings" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/tinylib/msgp/msgp" @@ -40,7 +40,7 @@ const ( // StatsWriter ingests stats buckets and flushes them to the API. type StatsWriter struct { - in <-chan pb.StatsPayload + in <-chan *pb.StatsPayload senders []*sender stop chan struct{} stats *info.StatsWriterInfo @@ -48,14 +48,14 @@ type StatsWriter struct { // syncMode reports whether the writer should flush on its own or only when FlushSync is called syncMode bool - payloads []pb.StatsPayload // payloads buffered for sync mode + payloads []*pb.StatsPayload // payloads buffered for sync mode flushChan chan chan struct{} easylog *log.ThrottledLogger } // NewStatsWriter returns a new StatsWriter. It must be started using Run. -func NewStatsWriter(cfg *config.AgentConfig, in <-chan pb.StatsPayload, telemetryCollector telemetry.TelemetryCollector) *StatsWriter { +func NewStatsWriter(cfg *config.AgentConfig, in <-chan *pb.StatsPayload, telemetryCollector telemetry.TelemetryCollector) *StatsWriter { sw := &StatsWriter{ in: in, stats: &info.StatsWriterInfo{}, @@ -130,14 +130,14 @@ func (w *StatsWriter) Stop() { stopSenders(w.senders) } -func (w *StatsWriter) addStats(sp pb.StatsPayload) { +func (w *StatsWriter) addStats(sp *pb.StatsPayload) { defer timing.Since("datadog.trace_agent.stats_writer.encode_ms", time.Now()) payloads := w.buildPayloads(sp, maxEntriesPerPayload) w.payloads = append(w.payloads, payloads...) } // SendPayload sends a stats payload to the Datadog backend. -func (w *StatsWriter) SendPayload(p pb.StatsPayload) { +func (w *StatsWriter) SendPayload(p *pb.StatsPayload) { req := newPayload(map[string]string{ headerLanguages: strings.Join(info.Languages(), "|"), "Content-Type": "application/msgpack", @@ -158,11 +158,11 @@ func (w *StatsWriter) sendPayloads() { } func (w *StatsWriter) resetBuffer() { - w.payloads = make([]pb.StatsPayload, 0, len(w.payloads)) + w.payloads = make([]*pb.StatsPayload, 0, len(w.payloads)) } // encodePayload encodes the payload as Gzipped msgPack into w. -func encodePayload(w io.Writer, payload pb.StatsPayload) error { +func encodePayload(w io.Writer, payload *pb.StatsPayload) error { gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed) if err != nil { return err @@ -172,15 +172,15 @@ func encodePayload(w io.Writer, payload pb.StatsPayload) error { log.Errorf("Error closing gzip stream when writing stats payload: %v", err) } }() - return msgp.Encode(gz, &payload) + return msgp.Encode(gz, payload) } // buildPayloads splits pb.ClientStatsPayload that have more than maxEntriesPerPayload // and then groups them into pb.StatsPayload with less than maxEntriesPerPayload -func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int) []pb.StatsPayload { +func (w *StatsWriter) buildPayloads(sp *pb.StatsPayload, maxEntriesPerPayload int) []*pb.StatsPayload { split := splitPayloads(sp.Stats, maxEntriesPerPayload) - grouped := make([]pb.StatsPayload, 0, len(sp.Stats)) - current := pb.StatsPayload{ + grouped := make([]*pb.StatsPayload, 0, len(sp.Stats)) + current := &pb.StatsPayload{ AgentHostname: sp.AgentHostname, AgentEnv: sp.AgentEnv, AgentVersion: sp.AgentVersion, @@ -193,9 +193,14 @@ func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int w.stats.ClientPayloads.Add(int64(len(current.Stats))) w.stats.StatsEntries.Add(int64(nbEntries)) grouped = append(grouped, current) - current.Stats = nil nbEntries = 0 nbBuckets = 0 + current = &pb.StatsPayload{ + AgentHostname: sp.AgentHostname, + AgentEnv: sp.AgentEnv, + AgentVersion: sp.AgentVersion, + ClientComputed: sp.ClientComputed, + } } for _, p := range split { if nbEntries+p.nbEntries > maxEntriesPerPayload { @@ -203,7 +208,7 @@ func (w *StatsWriter) buildPayloads(sp pb.StatsPayload, maxEntriesPerPayload int } nbEntries += p.nbEntries nbBuckets += len(p.Stats) - w.resolveContainerTags(&p.ClientStatsPayload) + w.resolveContainerTags(p.ClientStatsPayload) current.Stats = append(current.Stats, p.ClientStatsPayload) } if nbEntries > 0 { @@ -234,7 +239,7 @@ func (w *StatsWriter) resolveContainerTags(p *pb.ClientStatsPayload) { } } -func splitPayloads(payloads []pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { +func splitPayloads(payloads []*pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { split := make([]clientStatsPayload, 0, len(payloads)) for _, p := range payloads { split = append(split, splitPayload(p, maxEntriesPerPayload)...) @@ -245,7 +250,7 @@ func splitPayloads(payloads []pb.ClientStatsPayload, maxEntriesPerPayload int) [ type timeWindow struct{ start, duration uint64 } type clientStatsPayload struct { - pb.ClientStatsPayload + *pb.ClientStatsPayload nbEntries int // bucketIndexes maps from a timeWindow to a bucket in the ClientStatsPayload. // it allows quick checking of what bucket to add a payload to. @@ -253,7 +258,7 @@ type clientStatsPayload struct { } // splitPayload splits a stats payload to ensure that each stats payload has less than maxEntriesPerPayload entries. -func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { +func splitPayload(p *pb.ClientStatsPayload, maxEntriesPerPayload int) []clientStatsPayload { if len(p.Stats) == 0 { return nil } @@ -277,7 +282,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta for i := 0; i < nbPayloads; i++ { payloads[i] = clientStatsPayload{ bucketIndexes: make(map[timeWindow]int, 1), - ClientStatsPayload: pb.ClientStatsPayload{ + ClientStatsPayload: &pb.ClientStatsPayload{ Hostname: p.Hostname, Env: p.Env, Version: p.Version, @@ -288,7 +293,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta Sequence: p.Sequence, AgentAggregation: p.AgentAggregation, ContainerID: p.ContainerID, - Stats: make([]pb.ClientStatsBucket, 0, maxEntriesPerPayload), + Stats: make([]*pb.ClientStatsBucket, 0, maxEntriesPerPayload), }, } } @@ -304,7 +309,7 @@ func splitPayload(p pb.ClientStatsPayload, maxEntriesPerPayload int) []clientSta if !ok { bi = len(payloads[j].Stats) payloads[j].bucketIndexes[tw] = bi - payloads[j].Stats = append(payloads[j].Stats, pb.ClientStatsBucket{Start: tw.start, Duration: tw.duration}) + payloads[j].Stats = append(payloads[j].Stats, &pb.ClientStatsBucket{Start: tw.start, Duration: tw.duration}) } // here, we can just append the group, because there are no duplicate groups in the original stats payloads sent to the writer. payloads[j].Stats[bi].Stats = append(payloads[j].Stats[bi].Stats, g) diff --git a/pkg/trace/writer/stats_test.go b/pkg/trace/writer/stats_test.go index ae5c86a450046..afe3f917e2ba9 100644 --- a/pkg/trace/writer/stats_test.go +++ b/pkg/trace/writer/stats_test.go @@ -14,9 +14,9 @@ import ( "strings" "testing" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/stats" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/testutil" @@ -30,14 +30,14 @@ const ( testEnv = "testing" ) -func assertPayload(assert *assert.Assertions, testSets []pb.StatsPayload, payloads []*payload) { +func assertPayload(assert *assert.Assertions, testSets []*pb.StatsPayload, payloads []*payload) { expectedHeaders := map[string]string{ "X-Datadog-Reported-Languages": strings.Join(info.Languages(), "|"), "Content-Type": "application/msgpack", "Content-Encoding": "gzip", "Dd-Api-Key": "123", } - var decoded []pb.StatsPayload + var decoded []*pb.StatsPayload for _, p := range payloads { var statsPayload pb.StatsPayload r, err := gzip.NewReader(p.body) @@ -47,14 +47,14 @@ func assertPayload(assert *assert.Assertions, testSets []pb.StatsPayload, payloa for k, v := range expectedHeaders { assert.Equal(v, p.headers[k]) } - decoded = append(decoded, statsPayload) + decoded = append(decoded, &statsPayload) } // Sorting payloads as the sender can alter their order. sort.Slice(decoded, func(i, j int) bool { return decoded[i].AgentEnv < decoded[j].AgentEnv }) for i, p := range decoded { - assert.Equal(testSets[i], p) + assert.Equal(testSets[i].String(), p.String()) } } @@ -64,15 +64,15 @@ func TestStatsWriter(t *testing.T) { sw, statsChannel, srv := testStatsWriter() go sw.Run() - testSets := []pb.StatsPayload{ + testSets := []*pb.StatsPayload{ { AgentHostname: "1", AgentEnv: "1", AgentVersion: "agent-version", - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), @@ -83,10 +83,10 @@ func TestStatsWriter(t *testing.T) { AgentHostname: "2", AgentEnv: "2", AgentVersion: "agent-version", - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), @@ -106,11 +106,11 @@ func TestStatsWriter(t *testing.T) { // This gives us a total of 45 entries. 3 per span, 5 // spans per stat bucket. Each buckets have the same // time window (start: 0, duration 1e9). - stats := pb.StatsPayload{ + stats := &pb.StatsPayload{ AgentHostname: "agenthost", AgentEnv: "agentenv", AgentVersion: "agent-version", - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, Version: "version", @@ -121,15 +121,27 @@ func TestStatsWriter(t *testing.T) { AgentAggregation: "aggregation", Service: "service", ContainerID: "container-id", - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(5), testutil.RandomBucket(5), testutil.RandomBucket(5), }}, }, } - baseClientPayload := stats.Stats[0] - baseClientPayload.Stats = nil + + baseClientPayload := &pb.ClientStatsPayload{ + Hostname: stats.Stats[0].GetHostname(), + Env: stats.Stats[0].GetEnv(), + Version: stats.Stats[0].GetVersion(), + Lang: stats.Stats[0].GetLang(), + TracerVersion: stats.Stats[0].GetTracerVersion(), + RuntimeID: stats.Stats[0].GetRuntimeID(), + Sequence: stats.Stats[0].GetSequence(), + AgentAggregation: stats.Stats[0].GetAgentAggregation(), + Service: stats.Stats[0].GetService(), + ContainerID: stats.Stats[0].GetContainerID(), + } + expectedNbEntries := 15 expectedNbPayloads := int(math.Ceil(float64(expectedNbEntries) / 12)) // Compute our expected number of entries by payload @@ -139,7 +151,6 @@ func TestStatsWriter(t *testing.T) { } payloads := sw.buildPayloads(stats, 12) - assert.Equal(expectedNbPayloads, len(payloads)) for i := 0; i < expectedNbPayloads; i++ { assert.Equal(1, len(payloads[i].Stats)) @@ -147,9 +158,9 @@ func TestStatsWriter(t *testing.T) { assert.Equal(expectedNbEntriesByPayload[i], len(payloads[i].Stats[0].Stats[0].Stats)) actual := payloads[i].Stats[0] actual.Stats = nil - assert.Equal(baseClientPayload, actual) + assert.Equal(baseClientPayload.String(), actual.String()) } - assert.Equal(extractCounts([]pb.StatsPayload{stats}), extractCounts(payloads)) + assert.Equal(extractCounts([]*pb.StatsPayload{stats}), extractCounts(payloads)) for _, p := range payloads { assert.Equal("agentenv", p.AgentEnv) assert.Equal("agenthost", p.AgentHostname) @@ -165,17 +176,17 @@ func TestStatsWriter(t *testing.T) { // This gives us a tota of 45 entries. 3 per span, 5 spans per // stat bucket. Each buckets have the same time window (start: // 0, duration 1e9). - stats := pb.ClientStatsPayload{ + stats := &pb.ClientStatsPayload{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(5), testutil.RandomBucket(5), testutil.RandomBucket(5), }, } - payloads := sw.buildPayloads(pb.StatsPayload{Stats: []pb.ClientStatsPayload{stats}}, 1337) + payloads := sw.buildPayloads(&pb.StatsPayload{Stats: []*pb.ClientStatsPayload{stats}}, 1337) assert.Equal(1, len(payloads)) s := payloads[0].Stats assert.Equal(3, len(s[0].Stats)) @@ -193,7 +204,7 @@ func TestStatsResetBuffer(t *testing.T) { runtime.ReadMemStats(&m) assert.Less(t, m.HeapInuse, uint64(50*1e6)) - bigPayload := pb.StatsPayload{ + bigPayload := &pb.StatsPayload{ AgentHostname: string(make([]byte, 50*1e6)), } @@ -214,22 +225,22 @@ func TestStatsSyncWriter(t *testing.T) { assert := assert.New(t) sw, statsChannel, srv := testStatsSyncWriter() go sw.Run() - testSets := []pb.StatsPayload{ + testSets := []*pb.StatsPayload{ { - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), }, }}}, { - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), @@ -248,22 +259,22 @@ func TestStatsSyncWriter(t *testing.T) { sw, statsChannel, srv := testStatsSyncWriter() go sw.Run() - testSets := []pb.StatsPayload{ + testSets := []*pb.StatsPayload{ { - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), }, }}}, { - Stats: []pb.ClientStatsPayload{{ + Stats: []*pb.ClientStatsPayload{{ Hostname: testHostname, Env: testEnv, - Stats: []pb.ClientStatsBucket{ + Stats: []*pb.ClientStatsBucket{ testutil.RandomBucket(3), testutil.RandomBucket(3), testutil.RandomBucket(3), @@ -277,11 +288,11 @@ func TestStatsSyncWriter(t *testing.T) { }) } -func testStatsWriter() (*StatsWriter, chan pb.StatsPayload, *testServer) { +func testStatsWriter() (*StatsWriter, chan *pb.StatsPayload, *testServer) { srv := newTestServer() // We use a blocking channel to make sure that sends get received on the // other end. - in := make(chan pb.StatsPayload) + in := make(chan *pb.StatsPayload) cfg := &config.AgentConfig{ Endpoints: []*config.Endpoint{{Host: srv.URL, APIKey: "123"}}, StatsWriter: &config.WriterConfig{ConnectionLimit: 20, QueueSize: 20}, @@ -290,11 +301,11 @@ func testStatsWriter() (*StatsWriter, chan pb.StatsPayload, *testServer) { return NewStatsWriter(cfg, in, telemetry.NewNoopCollector()), in, srv } -func testStatsSyncWriter() (*StatsWriter, chan pb.StatsPayload, *testServer) { +func testStatsSyncWriter() (*StatsWriter, chan *pb.StatsPayload, *testServer) { srv := newTestServer() // We use a blocking channel to make sure that sends get received on the // other end. - in := make(chan pb.StatsPayload) + in := make(chan *pb.StatsPayload) cfg := &config.AgentConfig{ Endpoints: []*config.Endpoint{{Host: srv.URL, APIKey: "123"}}, StatsWriter: &config.WriterConfig{ConnectionLimit: 20, QueueSize: 20}, @@ -315,7 +326,7 @@ type counts struct { duration uint64 } -func getKey(b pb.ClientGroupedStats, start, duration uint64) key { +func getKey(b *pb.ClientGroupedStats, start, duration uint64) key { return key{ start: start, duration: duration, @@ -331,7 +342,7 @@ func getKey(b pb.ClientGroupedStats, start, duration uint64) key { } } -func extractCounts(stats []pb.StatsPayload) map[key]counts { +func extractCounts(stats []*pb.StatsPayload) map[key]counts { counts := make(map[key]counts) for _, s := range stats { for _, p := range s.Stats { diff --git a/pkg/trace/writer/trace.go b/pkg/trace/writer/trace.go index af14f5bdb8000..fc4a23aad2bd8 100644 --- a/pkg/trace/writer/trace.go +++ b/pkg/trace/writer/trace.go @@ -13,12 +13,12 @@ import ( "sync" "time" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/trace/metrics/timing" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" ) diff --git a/pkg/trace/writer/trace_test.go b/pkg/trace/writer/trace_test.go index f44b7bb0d6deb..b1b6d8395e1c1 100644 --- a/pkg/trace/writer/trace_test.go +++ b/pkg/trace/writer/trace_test.go @@ -16,8 +16,8 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" - "github.com/DataDog/datadog-agent/pkg/trace/pb" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/testutil" ) diff --git a/pkg/util/backoff/backoff.go b/pkg/util/backoff/backoff.go index 2315c923b15a7..7396201f295fb 100644 --- a/pkg/util/backoff/backoff.go +++ b/pkg/util/backoff/backoff.go @@ -11,9 +11,9 @@ import ( "time" ) -// Policy contains parameters and logic necessary to implement an exponential backoff +// ExpBackoffPolicy contains parameters and logic necessary to implement an exponential backoff // strategy when handling errors. -type Policy struct { +type ExpBackoffPolicy struct { // MinBackoffFactor controls the overlap between consecutive retry interval ranges. When // set to `2`, there is a guarantee that there will be no overlap. The overlap // will asymptotically approach 50% the higher the value is set. @@ -36,21 +36,33 @@ type Policy struct { MaxErrors int } +// ConstantBackoffPolicy contains a constant backoff duration +type ConstantBackoffPolicy struct { + backoffTime time.Duration +} + const secondsFloat = float64(time.Second) func randomBetween(min, max float64) float64 { return rand.Float64()*(max-min) + min } -// NewPolicy constructs new Backoff object with given parameters -func NewPolicy(minBackoffFactor, baseBackoffTime, maxBackoffTime float64, recoveryInterval int, recoveryReset bool) Policy { +// NewConstantBackoffPolicy constructs new Backoff object with a given duration (used in serverless) +func NewConstantBackoffPolicy(backoffTime time.Duration) Policy { + return &ConstantBackoffPolicy{ + backoffTime, + } +} + +// NewExpBackoffPolicy constructs new Backoff object with given parameters +func NewExpBackoffPolicy(minBackoffFactor, baseBackoffTime, maxBackoffTime float64, recoveryInterval int, recoveryReset bool) Policy { maxErrors := int(math.Floor(math.Log2(maxBackoffTime/baseBackoffTime))) + 1 if recoveryReset { recoveryInterval = maxErrors } - return Policy{ + return &ExpBackoffPolicy{ MinBackoffFactor: minBackoffFactor, BaseBackoffTime: baseBackoffTime, MaxBackoffTime: maxBackoffTime, @@ -60,17 +72,17 @@ func NewPolicy(minBackoffFactor, baseBackoffTime, maxBackoffTime float64, recove } // GetBackoffDuration returns amount of time to sleep after numErrors error -func (b *Policy) GetBackoffDuration(numErrors int) time.Duration { +func (e *ExpBackoffPolicy) GetBackoffDuration(numErrors int) time.Duration { var backoffTime float64 if numErrors > 0 { - backoffTime = b.BaseBackoffTime * math.Pow(2, float64(numErrors)) + backoffTime = e.BaseBackoffTime * math.Pow(2, float64(numErrors)) - if backoffTime > b.MaxBackoffTime { - backoffTime = b.MaxBackoffTime + if backoffTime > e.MaxBackoffTime { + backoffTime = e.MaxBackoffTime } else { - min := backoffTime / b.MinBackoffFactor - max := math.Min(b.MaxBackoffTime, backoffTime) + min := backoffTime / e.MinBackoffFactor + max := math.Min(e.MaxBackoffTime, backoffTime) backoffTime = randomBetween(min, max) } } @@ -80,19 +92,36 @@ func (b *Policy) GetBackoffDuration(numErrors int) time.Duration { } // IncError increments the error counter up to MaxErrors -func (b *Policy) IncError(numErrors int) int { +func (e *ExpBackoffPolicy) IncError(numErrors int) int { numErrors++ - if numErrors > b.MaxErrors { - return b.MaxErrors + if numErrors > e.MaxErrors { + return e.MaxErrors } return numErrors } // DecError decrements the error counter down to zero at RecoveryInterval rate -func (b *Policy) DecError(numErrors int) int { - numErrors -= b.RecoveryInterval +func (e *ExpBackoffPolicy) DecError(numErrors int) int { + numErrors -= e.RecoveryInterval if numErrors < 0 { return 0 } return numErrors } + +// GetBackoffDuration returns amount of time to sleep after numErrors error +func (c *ConstantBackoffPolicy) GetBackoffDuration(numErrors int) time.Duration { + return c.backoffTime +} + +// IncError is a no-op here +func (c *ConstantBackoffPolicy) IncError(numErrors int) int { + numErrors++ + return numErrors +} + +// DecError is a no-op here +func (c *ConstantBackoffPolicy) DecError(numErrors int) int { + numErrors-- + return numErrors +} diff --git a/pkg/util/backoff/backoff_test.go b/pkg/util/backoff/backoff_test.go index 917ad4ea80fc6..e4c61f6694eb0 100644 --- a/pkg/util/backoff/backoff_test.go +++ b/pkg/util/backoff/backoff_test.go @@ -37,14 +37,14 @@ func TestRandomBetween(t *testing.T) { } func TestEmpty(t *testing.T) { - b := Policy{} + b := ExpBackoffPolicy{} assert.Equal(t, 0, b.IncError(0)) assert.Equal(t, 0, b.DecError(0)) assert.Equal(t, time.Duration(0), b.GetBackoffDuration(0)) } func TestBackoff(t *testing.T) { - b := NewPolicy(1, 1, 9, 2, false) + b := NewExpBackoffPolicy(1, 1, 9, 2, false) assert.Equal(t, 1, b.IncError(0)) assert.Equal(t, 2, b.IncError(1)) @@ -64,3 +64,17 @@ func TestBackoff(t *testing.T) { assert.Equal(t, 8*time.Second, b.GetBackoffDuration(3)) assert.Equal(t, 9*time.Second, b.GetBackoffDuration(4)) } + +func TestNewConstantBackoffPolicy(t *testing.T) { + testDuration := 10 * time.Second + b := NewConstantBackoffPolicy(testDuration) + + assert.Equal(t, 2, b.IncError(1)) + assert.Equal(t, 101, b.IncError(100)) + + assert.Equal(t, 0, b.DecError(1)) + assert.Equal(t, 99, b.DecError(100)) + + assert.Equal(t, testDuration, b.GetBackoffDuration(1)) + assert.Equal(t, testDuration, b.GetBackoffDuration(100)) +} diff --git a/pkg/util/backoff/policy.go b/pkg/util/backoff/policy.go new file mode 100644 index 0000000000000..a865dc309473a --- /dev/null +++ b/pkg/util/backoff/policy.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package backoff + +import "time" + +// Policy is the common interface for all backoff policies +type Policy interface { + // GetBackoffDuration returns the backoff duration for the given number of errors + GetBackoffDuration(numErrors int) time.Duration + // IncError increments the number of errors and returns the new value + IncError(numErrors int) int + // DecError decrements the number of errors and returns the new value + DecError(numErrors int) int +} diff --git a/pkg/util/grpc/agent_client.go b/pkg/util/grpc/agent_client.go index 3ed3a517619bc..407735f6ca82f 100644 --- a/pkg/util/grpc/agent_client.go +++ b/pkg/util/grpc/agent_client.go @@ -17,7 +17,7 @@ import ( "google.golang.org/grpc/credentials" "github.com/DataDog/datadog-agent/pkg/config" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" ) diff --git a/pkg/util/proto/tagger.go b/pkg/util/proto/tagger.go index 74c269ec41174..99abcbafcfd63 100644 --- a/pkg/util/proto/tagger.go +++ b/pkg/util/proto/tagger.go @@ -13,7 +13,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/tagger/collectors" "github.com/DataDog/datadog-agent/pkg/tagger/types" ) diff --git a/pkg/util/proto/workloadmeta.go b/pkg/util/proto/workloadmeta.go index 0d9d9f1249990..29ed6ad6a7bcb 100644 --- a/pkg/util/proto/workloadmeta.go +++ b/pkg/util/proto/workloadmeta.go @@ -9,7 +9,7 @@ import ( "fmt" "time" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) diff --git a/pkg/util/proto/workloadmeta_test.go b/pkg/util/proto/workloadmeta_test.go index 0ef1818e7f1bf..3d9b0a3fe07d8 100644 --- a/pkg/util/proto/workloadmeta_test.go +++ b/pkg/util/proto/workloadmeta_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/workloadmeta" ) diff --git a/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector.go b/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector.go index 7ab8bae6a14c5..e4045cc6f3d1d 100644 --- a/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector.go +++ b/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/flavor" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" diff --git a/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector_test.go b/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector_test.go index d9e6f24128133..05621f14cbf2c 100644 --- a/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector_test.go +++ b/pkg/workloadmeta/collectors/internal/remote/process_collector/process_collector_test.go @@ -22,8 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/workloadmeta" "github.com/DataDog/datadog-agent/pkg/workloadmeta/collectors/internal/remote" "google.golang.org/grpc" @@ -32,7 +31,7 @@ import ( type mockServer struct { pbgo.UnimplementedProcessEntityStreamServer - responses []*pb.ProcessStreamResponse + responses []*pbgo.ProcessStreamResponse errorResponse bool // first response is an error currentResponse int @@ -72,21 +71,21 @@ func TestCollection(t *testing.T) { name string preEvents []workloadmeta.CollectorEvent - serverResponses []*pb.ProcessStreamResponse + serverResponses []*pbgo.ProcessStreamResponse expectedProcesses []*workloadmeta.Process errorResponse bool }{ { name: "initially empty", - serverResponses: []*pb.ProcessStreamResponse{ + serverResponses: []*pbgo.ProcessStreamResponse{ { EventID: 0, - SetEvents: []*pb.ProcessEventSet{ + SetEvents: []*pbgo.ProcessEventSet{ { Pid: 123, Nspid: 345, ContainerId: "cid", - Language: &pb.Language{Name: string(languagemodels.Java)}, + Language: &pbgo.Language{Name: string(languagemodels.Java)}, CreationTime: creationTime, }, }, @@ -108,27 +107,27 @@ func TestCollection(t *testing.T) { }, { name: "two response with set", - serverResponses: []*pb.ProcessStreamResponse{ + serverResponses: []*pbgo.ProcessStreamResponse{ { EventID: 0, - SetEvents: []*pb.ProcessEventSet{ + SetEvents: []*pbgo.ProcessEventSet{ { Pid: 123, Nspid: 345, ContainerId: "cid", - Language: &pb.Language{Name: string(languagemodels.Java)}, + Language: &pbgo.Language{Name: string(languagemodels.Java)}, CreationTime: creationTime, }, }, }, { EventID: 1, - SetEvents: []*pb.ProcessEventSet{ + SetEvents: []*pbgo.ProcessEventSet{ { Pid: 345, Nspid: 567, ContainerId: "cid", - Language: &pb.Language{Name: string(languagemodels.Java)}, + Language: &pbgo.Language{Name: string(languagemodels.Java)}, CreationTime: creationTime, }, }, @@ -160,22 +159,22 @@ func TestCollection(t *testing.T) { }, { name: "one set one unset", - serverResponses: []*pb.ProcessStreamResponse{ + serverResponses: []*pbgo.ProcessStreamResponse{ { EventID: 0, - SetEvents: []*pb.ProcessEventSet{ + SetEvents: []*pbgo.ProcessEventSet{ { Pid: 123, Nspid: 345, ContainerId: "cid", - Language: &pb.Language{Name: string(languagemodels.Java)}, + Language: &pbgo.Language{Name: string(languagemodels.Java)}, CreationTime: creationTime, }, }, }, { EventID: 1, - UnsetEvents: []*pb.ProcessEventUnset{ + UnsetEvents: []*pbgo.ProcessEventUnset{ { Pid: 123, }, @@ -202,15 +201,15 @@ func TestCollection(t *testing.T) { }, }, }, - serverResponses: []*pb.ProcessStreamResponse{ + serverResponses: []*pbgo.ProcessStreamResponse{ { EventID: 0, - SetEvents: []*pb.ProcessEventSet{ + SetEvents: []*pbgo.ProcessEventSet{ { Pid: 345, Nspid: 678, ContainerId: "cid", - Language: &pb.Language{Name: string(languagemodels.Java)}, + Language: &pbgo.Language{Name: string(languagemodels.Java)}, CreationTime: creationTime, }, }, diff --git a/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go b/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go index 24a4818c83417..0da9e6684c6cf 100644 --- a/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go +++ b/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go @@ -13,8 +13,7 @@ import ( "google.golang.org/grpc/grpclog" "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" "github.com/DataDog/datadog-agent/pkg/workloadmeta" @@ -43,7 +42,7 @@ func (c *client) StreamEntities(ctx context.Context, opts ...grpc.CallOption) (r } type stream struct { - cl pbgo.AgentSecure_WorkloadmetaStreamEntitiesClient + cl pb.AgentSecure_WorkloadmetaStreamEntitiesClient } func (s *stream) Recv() (interface{}, error) { diff --git a/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go b/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go index 36cd8d6a3ee7a..d54477afe74bf 100644 --- a/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go +++ b/pkg/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/proto" "github.com/DataDog/datadog-agent/pkg/workloadmeta" "github.com/DataDog/datadog-agent/pkg/workloadmeta/collectors/internal/remote" diff --git a/pkg/workloadmeta/server/server.go b/pkg/workloadmeta/server/server.go index 1ee6dc7937085..8612602d480a2 100644 --- a/pkg/workloadmeta/server/server.go +++ b/pkg/workloadmeta/server/server.go @@ -8,7 +8,7 @@ package server import ( "time" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" protoutils "github.com/DataDog/datadog-agent/pkg/util/proto" diff --git a/tasks/go.py b/tasks/go.py index 5f8557b4e8bee..004b53bb301cc 100644 --- a/tasks/go.py +++ b/tasks/go.py @@ -193,13 +193,71 @@ def generate_licenses(ctx, filename='LICENSE-3rdparty.csv', verbose=False): def generate_protobuf(ctx): """ Generates protobuf definitions in pkg/proto + + We must build the packages one at a time due to protoc-gen-go limitations """ + + # Key: path, Value: grpc_gateway, inject_tags + PROTO_PKGS = { + 'model/v1': (False, False), + 'remoteconfig': (False, False), + 'api/v1': (True, False), + 'trace': (False, True), + 'process': (False, False), + 'workloadmeta': (False, False), + } + + # maybe put this in a separate function + PKG_PLUGINS = { + 'trace': '--go-vtproto_out=', + } + + PKG_CLI_EXTRAS = { + 'trace': '--go-vtproto_opt=features=marshal+unmarshal+size', + } + + # protoc-go-inject-tag targets + inject_tag_targets = { + 'trace': ['span.pb.go', 'stats.pb.go', 'tracer_payload.pb.go', 'agent_payload.pb.go'], + } + + # msgp targets (file, io) + msgp_targets = { + 'trace': [ + ('trace.go', False), + ('span.pb.go', False), + ('stats.pb.go', True), + ('tracer_payload.pb.go', False), + ('agent_payload.pb.go', False), + ], + 'core': [('remoteconfig.pb.go', False)], + } + + # msgp patches key is `pkg` : (patch, destination) + # if `destination` is `None` diff will target inherent patch files + msgp_patches = { + 'trace': [ + ('0001-Customize-msgpack-parsing.patch', '-p4'), + ('0002-Make-nil-map-deserialization-retrocompatible.patch', '-p4'), + ('0003-pkg-trace-traceutil-credit-card-obfuscation-9213.patch', '-p4'), + ], + } + base = os.path.dirname(os.path.abspath(__file__)) repo_root = os.path.abspath(os.path.join(base, "..")) proto_root = os.path.join(repo_root, "pkg", "proto") + protodep_root = os.path.join(proto_root, "protodep") print(f"nuking old definitions at: {proto_root}") - file_list = glob.glob(os.path.join(proto_root, "pbgo", "*.go")) + file_list = glob.glob(os.path.join(proto_root, "pbgo", "*.pb.go")) + for file_path in file_list: + try: + os.remove(file_path) + except OSError: + print("Error while deleting file : ", file_path) + + # also cleanup gateway generated files + file_list = glob.glob(os.path.join(proto_root, "pbgo", "*.pb.gw.go")) for file_path in file_list: try: os.remove(file_path) @@ -210,13 +268,42 @@ def generate_protobuf(ctx): # protobuf defs print(f"generating protobuf code from: {proto_root}") - files = [] - for path in Path(os.path.join(proto_root, "datadog")).rglob('*.proto'): - files.append(path.as_posix()) + for pkg, (grpc_gateway, inject_tags) in PROTO_PKGS.items(): + files = [] + pkg_root = os.path.join(proto_root, "datadog", pkg).rstrip(os.sep) + pkg_root_level = pkg_root.count(os.sep) + for path in Path(pkg_root).rglob('*.proto'): + if path.as_posix().count(os.sep) == pkg_root_level + 1: + files.append(path.as_posix()) + + targets = ' '.join(files) + + # output_generator could potentially change for some packages + # so keep it in a variable for sanity. + output_generator = "--go_out=plugins=grpc:" + cli_extras = '' + ctx.run(f"protoc -I{proto_root} -I{protodep_root} {output_generator}{repo_root} {cli_extras} {targets}") + + if pkg in PKG_PLUGINS: + output_generator = PKG_PLUGINS[pkg] + + if pkg in PKG_CLI_EXTRAS: + cli_extras = PKG_CLI_EXTRAS[pkg] + + ctx.run(f"protoc -I{proto_root} -I{protodep_root} {output_generator}{repo_root} {cli_extras} {targets}") + + if inject_tags: + inject_path = os.path.join(proto_root, "pbgo", pkg) + # inject_tags logic + for target in inject_tag_targets[pkg]: + ctx.run(f"protoc-go-inject-tag -input={os.path.join(inject_path, target)}") + + if grpc_gateway: + # grpc-gateway logic + ctx.run( + f"protoc -I{proto_root} -I{protodep_root} --grpc-gateway_out=logtostderr=true:{repo_root} {targets}" + ) - ctx.run(f"protoc -I{proto_root} --go_out=plugins=grpc:{repo_root} {' '.join(files)}") - # grpc-gateway logic - ctx.run(f"protoc -I{proto_root} --grpc-gateway_out=logtostderr=true:{repo_root} {' '.join(files)}") # mockgen pbgo_dir = os.path.join(proto_root, "pbgo") mockgen_out = os.path.join(proto_root, "pbgo", "mocks") @@ -225,10 +312,22 @@ def generate_protobuf(ctx): except FileExistsError: print(f"{mockgen_out} folder already exists") - ctx.run(f"mockgen -source={pbgo_dir}/api.pb.go -destination={mockgen_out}/api_mockgen.pb.go") + # TODO: this should be parametrized + ctx.run(f"mockgen -source={pbgo_dir}/core/api.pb.go -destination={mockgen_out}/core/api_mockgen.pb.go") # generate messagepack marshallers - ctx.run("msgp -file pkg/proto/msgpgo/key.go -o=pkg/proto/msgpgo/key_gen.go") + for pkg, files in msgp_targets.items(): + for (src, io_gen) in files: + dst = os.path.splitext(os.path.basename(src))[0] # .go + dst = os.path.splitext(dst)[0] # .pb + ctx.run(f"msgp -file {pbgo_dir}/{pkg}/{src} -o={pbgo_dir}/{pkg}/{dst}_gen.go -io={io_gen}") + + # apply msgp patches + for pkg, patches in msgp_patches.items(): + for patch in patches: + patch_file = os.path.join(proto_root, "patches", patch[0]) + switches = patch[1] if patch[1] else '' + ctx.run(f"git apply {switches} --unsafe-paths --directory='{pbgo_dir}/{pkg}' {patch_file}") @task diff --git a/tasks/libs/copyright.py b/tasks/libs/copyright.py index 2ee40ad29fc9e..77a74b4af4a2b 100755 --- a/tasks/libs/copyright.py +++ b/tasks/libs/copyright.py @@ -27,13 +27,15 @@ PATH_EXCLUSION_REGEX = [ # These are auto-generated files but without headers to indicate it '/pkg/clusteragent/custommetrics/api/generated/', - '/pkg/proto/msgpgo/.*_gen(_test){,1}.go', '/pkg/process/events/model/.*_gen.go', + '/pkg/proto/msgpgo/.*_gen(_test){,1}.go', + '/pkg/proto/pbgo/core/.*_gen(_test){,1}.go', + '/pkg/proto/pbgo/trace/.*_gen(_test){,1}.go', '/pkg/remoteconfig/state/products/apmsampling/.*_gen(_test){,1}.go', '/pkg/security/security_profile/dump/activity_dump_easyjson.go', '/pkg/security/probe/custom_events_easyjson.go', '/pkg/security/serializers/serializers_easyjson.go', - '/pkg/trace/pb/.*_gen(_test){,1}.go', + '/pkg/security/probe/selftests/self_tests_easyjson.go', # These are files that we should not add our copyright to '/internal/patch/grpc-go-insecure/', '/internal/patch/logr/funcr/funcr(_test){,1}.go', diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 53dac77677a71..d48d5aa0a94ed 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -741,16 +741,15 @@ def update_circleci_config(file_path, image_tag, test_version): """ Override variables in .gitlab-ci.yml file """ - image_name = "datadog/datadog-agent-runner-circle" + image_name = "datadog/agent-buildimages-circleci-runner" with open(file_path, "r") as circle: circle_ci = circle.read() - if test_version: - image_tag += "_test_only" - match = re.search(rf"{image_name}:(\w+)\n", circle_ci) + match = re.search(rf"{image_name}:([a-zA-Z0-9_-]+)\n", circle_ci) if not match: raise RuntimeError(f"Impossible to find the version of image {image_name} in circleci configuration file") + image = f"{image_name}_test_only" if test_version else image_name with open(file_path, "w") as circle: - circle.write(circle_ci.replace(match.group(1), image_tag)) + circle.write(circle_ci.replace(f"{image_name}:{match.group(1)}", f"{image}:{image_tag}")) def commit_and_push(ctx, branch_name=None): diff --git a/tasks/system_probe.py b/tasks/system_probe.py index 29ce66859b893..3fe45874c93d4 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -236,9 +236,10 @@ def ninja_network_ebpf_programs(nw, build_dir, co_re_build_dir): "tracer", "prebuilt/usm", "prebuilt/usm_events_test", + "prebuilt/shared-libraries", "prebuilt/conntrack", ] - network_co_re_programs = ["tracer", "co-re/tracer-fentry", "runtime/usm"] + network_co_re_programs = ["tracer", "co-re/tracer-fentry", "runtime/usm", "runtime/shared-libraries"] for prog in network_programs: infile = os.path.join(network_c_dir, f"{prog}.c") @@ -294,6 +295,7 @@ def ninja_runtime_compilation_files(nw, gobin): "pkg/collector/corechecks/ebpf/probe/oom_kill.go": "oom-kill", "pkg/collector/corechecks/ebpf/probe/tcp_queue_length.go": "tcp-queue-length", "pkg/network/usm/compile.go": "usm", + "pkg/network/usm/sharedlibraries/compile.go": "shared-libraries", "pkg/network/tracer/compile.go": "conntrack", "pkg/network/tracer/connection/kprobe/compile.go": "tracer", "pkg/network/tracer/offsetguess_test.go": "offsetguess-test", @@ -357,7 +359,6 @@ def ninja_cgo_type_files(nw, windows): "pkg/network/protocols/http/types.go": [ "pkg/network/ebpf/c/tracer/tracer.h", "pkg/network/ebpf/c/protocols/tls/tags-types.h", - "pkg/network/ebpf/c/protocols/tls/sowatcher-types.h", "pkg/network/ebpf/c/protocols/http/types.h", "pkg/network/ebpf/c/protocols/classification/defs.h", ], @@ -381,6 +382,9 @@ def ninja_cgo_type_files(nw, windows): "pkg/collector/corechecks/ebpf/probe/tcp_queue_length_kern_types.go": [ "pkg/collector/corechecks/ebpf/c/runtime/tcp-queue-length-kern-user.h", ], + "pkg/network/usm/sharedlibraries/types.go": [ + "pkg/network/ebpf/c/shared-libraries/types.h", + ], "pkg/collector/corechecks/ebpf/probe/ebpfcheck/c_types.go": [ "pkg/collector/corechecks/ebpf/c/runtime/ebpf-kern-user.h" ], diff --git a/tasks/test.py b/tasks/test.py index 722d2fd19e143..5c808c3ba3cf3 100644 --- a/tasks/test.py +++ b/tasks/test.py @@ -97,6 +97,7 @@ def environ(env): 'github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway', 'github.com/golang/protobuf/protoc-gen-go', 'github.com/golang/mock/mockgen', + 'github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto', 'github.com/tinylib/msgp', ] diff --git a/tasks/unit-tests/pipeline_tests.py b/tasks/unit-tests/pipeline_tests.py index e170deec73eb0..3ab66e660f2d5 100644 --- a/tasks/unit-tests/pipeline_tests.py +++ b/tasks/unit-tests/pipeline_tests.py @@ -44,7 +44,7 @@ class TestUpdateGitlabCI(unittest.TestCase): erroneous_file = "tasks/unit-tests/testdata/erroneous_gitlab-ci.yml" def tearDown(self) -> None: - subprocess.run(f"git checkout -- {self.gitlabci_file}".split()) + subprocess.run(f"git checkout -- {self.gitlabci_file} {self.erroneous_file}".split()) return super().tearDown() def test_nominal(self): @@ -73,23 +73,25 @@ class TestUpdateCircleCI(unittest.TestCase): erroneous_file = "tasks/unit-tests/testdata/erroneous_circleci_config.yml" def tearDown(self) -> None: - subprocess.run(f"git checkout -- {self.circleci_file}".split()) + subprocess.run(f"git checkout -- {self.circleci_file} {self.erroneous_file}".split()) return super().tearDown() def test_nominal(self): pipeline.update_circleci_config(self.circleci_file, "1m4g3", test_version=True) with open(self.circleci_file, "r") as gl: circle_ci = yaml.safe_load(gl) - image = circle_ci['templates']['job_template']['docker'][0]['image'] - version = image.split(":")[-1] - self.assertEqual("1m4g3_test_only", version) + full_image = circle_ci['templates']['job_template']['docker'][0]['image'] + image, version = full_image.split(":") + self.assertTrue(image.endswith("_test_only")) + self.assertEqual("1m4g3", version) def test_update_no_test(self): pipeline.update_circleci_config(self.circleci_file, "1m4g3", test_version=False) with open(self.circleci_file, "r") as gl: circle_ci = yaml.safe_load(gl) - image = circle_ci['templates']['job_template']['docker'][0]['image'] - version = image.split(":")[-1] + full_image = circle_ci['templates']['job_template']['docker'][0]['image'] + image, version = full_image.split(":") + self.assertFalse(image.endswith("_test_only")) self.assertEqual("1m4g3", version) def test_raise(self): diff --git a/tasks/unit-tests/testdata/erroneous_circleci_config.yml b/tasks/unit-tests/testdata/erroneous_circleci_config.yml index edbd034a6f013..7a997e555eebc 100644 --- a/tasks/unit-tests/testdata/erroneous_circleci_config.yml +++ b/tasks/unit-tests/testdata/erroneous_circleci_config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: datadog/datadog-agent-runner-fake:go1199 + - image: datadog/agent-buildimages-circleci-runner-fake:v17660676-da3ba55 environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/tasks/unit-tests/testdata/fake_circleci_config.yml b/tasks/unit-tests/testdata/fake_circleci_config.yml index f15f6a3e5f18b..23ae4e652c969 100644 --- a/tasks/unit-tests/testdata/fake_circleci_config.yml +++ b/tasks/unit-tests/testdata/fake_circleci_config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: datadog/datadog-agent-runner-circle:go1199 + - image: datadog/agent-buildimages-circleci-runner:v17660676-da3ba55 environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt index 94942a94e60f1..2bd7c244a981d 100644 --- a/test/e2e/cws-tests/requirements.txt +++ b/test/e2e/cws-tests/requirements.txt @@ -5,3 +5,4 @@ docker==6.1.3 retry==0.9.2 emoji==2.6.0 requests==2.31.0 +jsonschema==3.2.0 \ No newline at end of file diff --git a/test/e2e/cws-tests/tests/lib/cws/schemas.py b/test/e2e/cws-tests/tests/lib/cws/schemas.py new file mode 100644 index 0000000000000..5a4492d6fb3e3 --- /dev/null +++ b/test/e2e/cws-tests/tests/lib/cws/schemas.py @@ -0,0 +1,25 @@ +import json +import os + +from jsonschema import Draft7Validator, RefResolver + + +class JsonSchemaValidator: + def __init__(self): + self.schema_directory = os.path.join(os.path.dirname(__file__), "../../../../../../pkg/security/tests/schemas") + self.schema_store = {} + for filename in os.listdir(self.schema_directory): + if filename.endswith('.json'): + with open(os.path.join(self.schema_directory, filename)) as file: + schema = json.load(file) + if "$id" in schema: + # Add each schema to the store using its 'id' as key + self.schema_store[f"/schemas/{schema['$id']}"] = schema + + # Create a resolver that uses the schema store for resolving references + self.resolver = RefResolver(base_uri='', referrer=None, store=self.schema_store) + + def validate_json_data(self, schema_filename, json_data): + # Validate the instance using the references + validator = Draft7Validator(self.schema_store[f"/schemas/{schema_filename}"], resolver=self.resolver) + validator.validate(json_data) diff --git a/test/e2e/cws-tests/tests/test_e2e_cws_docker.py b/test/e2e/cws-tests/tests/test_e2e_cws_docker.py index 0053f9bf60be5..d68c339ade283 100644 --- a/test/e2e/cws-tests/tests/test_e2e_cws_docker.py +++ b/test/e2e/cws-tests/tests/test_e2e_cws_docker.py @@ -10,6 +10,7 @@ from lib.const import SECURITY_START_LOG, SYS_PROBE_START_LOG from lib.cws.app import App from lib.cws.policy import PolicyLoader +from lib.cws.schemas import JsonSchemaValidator from lib.docker import DockerHelper from lib.log import wait_agent_log from lib.stepper import Step @@ -133,6 +134,25 @@ def test_open_signal(self): self.app.check_policy_found(self, attributes, "file", "default.policy") self.app.check_for_ignored_policies(self, attributes) + with Step(msg="check self_tests", emoji=":test_tube:"): + rule_id = "self_test" + event = self.app.wait_app_log(f"rule_id:{rule_id}") + attributes = event["data"][0]["attributes"]["attributes"] + if "date" in attributes: + attributes["date"] = attributes["date"].strftime("%Y-%m-%dT%H:%M:%S") + + self.assertEqual(rule_id, attributes["agent"]["rule_id"], "unable to find rule_id tag attribute") + self.assertTrue( + "failed_tests" not in attributes, + f"failed tests: {attributes['failed_tests']}" if "failed_tests" in attributes else "success", + ) + + jsonSchemaValidator = JsonSchemaValidator() + jsonSchemaValidator.validate_json_data("self_test.json", attributes) + + with Step(msg="wait for host tags (3m)", emoji=":alarm_clock:"): + time.sleep(3 * 60) + with Step(msg="wait for datadog.security_agent.runtime.running metric", emoji="\N{beer mug}"): self.app.wait_for_metric("datadog.security_agent.runtime.running", host=socket.gethostname()) diff --git a/test/e2e/cws-tests/tests/test_e2e_cws_kubernetes.py b/test/e2e/cws-tests/tests/test_e2e_cws_kubernetes.py index 6ab396cefdeef..9e956564c1d87 100644 --- a/test/e2e/cws-tests/tests/test_e2e_cws_kubernetes.py +++ b/test/e2e/cws-tests/tests/test_e2e_cws_kubernetes.py @@ -11,6 +11,7 @@ from lib.const import SECURITY_START_LOG, SYS_PROBE_START_LOG from lib.cws.app import App from lib.cws.policy import PolicyLoader +from lib.cws.schemas import JsonSchemaValidator from lib.kubernetes import KubernetesHelper from lib.log import wait_agent_log from lib.stepper import Step @@ -157,6 +158,22 @@ def test_open_signal(self): self.app.check_policy_found(self, attributes, "file", "test.policy") self.app.check_for_ignored_policies(self, attributes) + with Step(msg="check self_tests", emoji=":test_tube:"): + rule_id = "self_test" + event = self.app.wait_app_log(f"rule_id:{rule_id}") + attributes = event["data"][0]["attributes"]["attributes"] + if "date" in attributes: + attributes["date"] = attributes["date"].strftime("%Y-%m-%dT%H:%M:%S") + + self.assertEqual(rule_id, attributes["agent"]["rule_id"], "unable to find rule_id tag attribute") + self.assertTrue( + "failed_tests" not in attributes, + f"failed tests: {attributes['failed_tests']}" if "failed_tests" in attributes else "success", + ) + + jsonSchemaValidator = JsonSchemaValidator() + jsonSchemaValidator.validate_json_data("self_test.json", attributes) + with Step(msg="wait for datadog.security_agent.runtime.running metric", emoji="\N{beer mug}"): self.app.wait_for_metric("datadog.security_agent.runtime.running", host=TestE2EKubernetes.hostname) diff --git a/test/new-e2e/utils/clients/ssh.go b/test/new-e2e/utils/clients/ssh.go index 6180110c866c4..79ef2e25bf721 100644 --- a/test/new-e2e/utils/clients/ssh.go +++ b/test/new-e2e/utils/clients/ssh.go @@ -17,7 +17,7 @@ import ( ) // GetSSHClient returns an ssh Client for the specified host -func GetSSHClient(user, host, privateKey string, retryInterval time.Duration, maxRetries uint64) (client *ssh.Client, session *ssh.Session, err error) { +func GetSSHClient(user, host string, privateKey []byte, retryInterval time.Duration, maxRetries uint64) (client *ssh.Client, session *ssh.Session, err error) { err = backoff.Retry(func() error { client, session, err = getSSHClient(user, host, privateKey) return err @@ -26,11 +26,11 @@ func GetSSHClient(user, host, privateKey string, retryInterval time.Duration, ma return } -func getSSHClient(user, host, privateKey string) (*ssh.Client, *ssh.Session, error) { +func getSSHClient(user, host string, privateKey []byte) (*ssh.Client, *ssh.Session, error) { var auth ssh.AuthMethod - if privateKey != "" { - privateKeyAuth, err := ssh.ParsePrivateKey([]byte(privateKey)) + if privateKey != nil { + privateKeyAuth, err := ssh.ParsePrivateKey(privateKey) if err != nil { return nil, nil, err } diff --git a/test/new-e2e/utils/e2e/client/agent.go b/test/new-e2e/utils/e2e/client/agent.go index 2a5dafc4e90bf..704b65c42bf60 100644 --- a/test/new-e2e/utils/e2e/client/agent.go +++ b/test/new-e2e/utils/e2e/client/agent.go @@ -7,12 +7,15 @@ package client import ( "errors" + "os" "regexp" "testing" "time" + "github.com/DataDog/datadog-agent/test/new-e2e/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/runner/parameters" "github.com/DataDog/test-infra-definitions/components/datadog/agent" - "github.com/DataDog/test-infra-definitions/components/os" + e2eOs "github.com/DataDog/test-infra-definitions/components/os" "github.com/cenkalti/backoff" ) @@ -24,7 +27,7 @@ var _ clientService[agent.ClientData] = (*Agent)(nil) type Agent struct { *UpResultDeserializer[agent.ClientData] *vmClient - os os.OS + os e2eOs.OS } // Create a new instance of Agent @@ -37,7 +40,21 @@ func NewAgent(installer *agent.Installer) *Agent { //lint:ignore U1000 Ignore unused function as this function is call using reflection func (agent *Agent) initService(t *testing.T, data *agent.ClientData) error { var err error - agent.vmClient, err = newVMClient(t, "", &data.Connection) + var privateSshKey []byte + + privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "") + if err != nil { + return err + } + + if privateKeyPath != "" { + privateSshKey, err = os.ReadFile(privateKeyPath) + if err != nil { + return err + } + } + + agent.vmClient, err = newVMClient(t, privateSshKey, &data.Connection) return err } diff --git a/test/new-e2e/utils/e2e/client/vm.go b/test/new-e2e/utils/e2e/client/vm.go index 9879e888bce32..b01bc3ca7550f 100644 --- a/test/new-e2e/utils/e2e/client/vm.go +++ b/test/new-e2e/utils/e2e/client/vm.go @@ -7,8 +7,11 @@ package client import ( + "os" "testing" + "github.com/DataDog/datadog-agent/test/new-e2e/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/runner/parameters" commonvm "github.com/DataDog/test-infra-definitions/components/vm" ) @@ -30,6 +33,20 @@ func NewVM(infraVM commonvm.VM) *VM { //lint:ignore U1000 Ignore unused function as this function is call using reflection func (vm *VM) initService(t *testing.T, data *commonvm.ClientData) error { var err error - vm.vmClient, err = newVMClient(t, "", &data.Connection) + var privateSshKey []byte + + privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "") + if err != nil { + return err + } + + if privateKeyPath != "" { + privateSshKey, err = os.ReadFile(privateKeyPath) + if err != nil { + return err + } + } + + vm.vmClient, err = newVMClient(t, privateSshKey, &data.Connection) return err } diff --git a/test/new-e2e/utils/e2e/client/vm_client.go b/test/new-e2e/utils/e2e/client/vm_client.go index f8d4f60011be8..79967fdafbd68 100644 --- a/test/new-e2e/utils/e2e/client/vm_client.go +++ b/test/new-e2e/utils/e2e/client/vm_client.go @@ -21,7 +21,7 @@ type vmClient struct { t *testing.T } -func newVMClient(t *testing.T, sshKey string, connection *utils.Connection) (*vmClient, error) { +func newVMClient(t *testing.T, sshKey []byte, connection *utils.Connection) (*vmClient, error) { client, _, err := clients.GetSSHClient( connection.User, fmt.Sprintf("%s:%d", connection.Host, 22),