diff --git a/.github/codeql-config.yml b/.github/codeql-config.yml deleted file mode 100644 index 056a23ff72c..00000000000 --- a/.github/codeql-config.yml +++ /dev/null @@ -1,9 +0,0 @@ -query-filters: - - exclude: - id: go/incorrect-integer-conversion - - exclude: - id: go/reflected-xss - - exclude: - id: go/allocation-size-overflow - - exclude: - id: go/log-injection diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 95024fb3c44..00000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "gomod" - directory: "/" # Location of package manifests - schedule: - interval: "daily" - open-pull-requests-limit: 5 - groups: - golang.org/x: - patterns: - - "golang.org/x/*" - ignore: - - dependency-name: "k8s.io/*" - - dependency-name: "sigs.k8s.io/*" - - dependency-name: "github.com/containernetworking/*" - - dependency-name: "github.com/k8snetworkplumbingwg/*" - update-types: ["version-update:semver-major", "version-update:semver-minor"] # ignore all except for patch updates - - dependency-name: "github.com/vmware/go-ipfix" - - dependency-name: "github.com/TomCodeLV/OVSDB-golang-lib" - - dependency-name: "github.com/aws/*" # updates are too frequent - - dependency-name: "antrea.io/ofnet" - - dependency-name: "antrea.io/libOpenflow" - - dependency-name: "github.com/ClickHouse/clickhouse-go/v2" # auto-upgrade involves dependency conflicts - - package-ecosystem: "github-actions" - # Workflow files stored in the default location of `.github/workflows` - directory: "/" - schedule: - interval: "daily" - open-pull-requests-limit: 5 - groups: - artifact-actions: - patterns: - - "actions/upload-artifact" - - "actions/download-artifact" diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml deleted file mode 100644 index e06ce32904a..00000000000 --- a/.github/workflows/benchmark.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Go Benchmark Test - -on: - push: - branches: - - main - -jobs: - go-benchmark-checks: - name: GoBenchmark - runs-on: ubuntu-latest - steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v4 - with: - fetch-depth: 0 - show-progress: false - - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - - name: Install benchci - run: curl -sfL https://raw.githubusercontent.com/antrea-io/benchci/main/install.sh | sudo sh -s -- -b /usr/local/bin - - - name: Run benchmark - run: benchci -config test/performance/benchmark.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 71c86d2657b..00000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,162 +0,0 @@ -name: Build and push latest image if needed - -on: - pull_request: - branches: - - main - - release-* - - feature/* - push: - branches: - - main - - release-* - - feature/* - -jobs: - check-changes: - name: Check whether tests need to be run based on diff - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - show-progress: false - - uses: antrea-io/has-changes@v2 - id: check_diff - with: - paths-ignore: docs/* ci/jenkins/* *.md hack/.notableofcontents - outputs: - has_changes: ${{ steps.check_diff.outputs.has_changes }} - - build: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build Antrea amd64 Docker image without pushing to registry - if: ${{ github.repository != 'antrea-io/antrea' || github.event_name != 'push' || github.ref != 'refs/heads/main' }} - run: | - ./hack/build-antrea-linux-all.sh --pull - - name: Build and push Antrea amd64 Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - ./hack/build-antrea-linux-all.sh --pull --push-base-images - docker tag antrea/antrea-ubuntu:latest antrea/antrea-ubuntu-amd64:latest - docker push antrea/antrea-ubuntu-amd64:latest - - name: Trigger Antrea arm builds and multi-arch manifest update - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - uses: benc-uk/workflow-dispatch@v1 - with: - repo: vmware-tanzu/antrea-build-infra - ref: refs/heads/main - workflow: Build Antrea ARM images and push manifest - token: ${{ secrets.ANTREA_BUILD_INFRA_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "antrea-repository":"antrea-io/antrea", "antrea-ref":"{0}", "docker-tag":"{1}" }}', github.ref, 'latest') }} - - build-ubi: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build Antrea UBI8 Docker image without pushing to registry - if: ${{ github.repository != 'antrea-io/antrea' || github.event_name != 'push' || github.ref != 'refs/heads/main' }} - run: | - ./hack/build-antrea-linux-all.sh --pull --distro ubi - - name: Build and push Antrea UBI8 Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - ./hack/build-antrea-linux-all.sh --pull --push-base-images --distro ubi - docker push antrea/antrea-ubi:latest - - build-scale: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build Antrea Agent Simulator Docker image - run: make build-scale-simulator - - name: Push Antrea Agent Simulator Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-ubuntu-simulator:latest - - build-windows: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [windows-2019] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build Antrea Windows Docker image - run: make build-windows - - name: Push Antrea Windows Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-windows:latest - shell: bash - - build-antrea-mc-controller: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build antrea-mc-controller Docker image - run: make build-antrea-mc-controller - - name: Push antrea-mc-controller Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-mc-controller:latest - - build-flow-aggregator: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name == 'push' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build flow-aggregator Docker image - run: make flow-aggregator-image - - name: Check flow-aggregator Docker image - run: docker run antrea/flow-aggregator --version - - name: Push flow-aggregator Docker image to registry - if: ${{ github.repository == 'antrea-io/antrea' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - run: | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/flow-aggregator:latest diff --git a/.github/workflows/build_tag.yml b/.github/workflows/build_tag.yml deleted file mode 100644 index 2eddbccb246..00000000000 --- a/.github/workflows/build_tag.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Build and push a release image - -on: - push: - tags: - - v* - -jobs: - get-version: - runs-on: [ubuntu-latest] - outputs: - version: ${{ steps.get-version.outputs.version }} - steps: - - name: Extract version from Github ref - id: get-version - env: - TAG: ${{ github.ref }} - run: | - version=${TAG:10} - echo "version=$version" >> $GITHUB_OUTPUT - - build: - runs-on: [ubuntu-latest] - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build and push Antrea Ubuntu amd64 Docker image to registry - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - VERSION: ${{ needs.get-version.outputs.version }} - run: | - ./hack/build-antrea-linux-all.sh --pull - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker tag antrea/antrea-ubuntu:"${VERSION}" antrea/antrea-ubuntu-amd64:"${VERSION}" - docker push antrea/antrea-ubuntu-amd64:"${VERSION}" - - name: Trigger Antrea arm builds and multi-arch manifest update - uses: benc-uk/workflow-dispatch@v1 - with: - repo: vmware-tanzu/antrea-build-infra - ref: refs/heads/main - workflow: Build Antrea ARM images and push manifest - token: ${{ secrets.ANTREA_BUILD_INFRA_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "antrea-repository":"antrea-io/antrea", "antrea-ref":"{0}", "docker-tag":"{1}" }}', github.ref, needs.get-version.outputs.version) }} - - build-ubi: - runs-on: [ubuntu-latest] - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build and push Antrea UBI8 amd64 Docker image to registry - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - VERSION: ${{ needs.get-version.outputs.version }} - run: | - ./hack/build-antrea-linux-all.sh --pull --distro ubi - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-ubi:"${VERSION}" - - build-windows: - runs-on: [windows-2019] - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build Antrea Windows Docker image and push to registry - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - VERSION: ${{ needs.get-version.outputs.version }} - run: | - make build-windows - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-windows:"${VERSION}" - shell: bash - - build-antrea-mc-controller: - runs-on: [ubuntu-latest] - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build antrea-mc-controller Docker image and push to registry - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - VERSION: ${{ needs.get-version.outputs.version }} - run: | - make build-antrea-mc-controller - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/antrea-mc-controller:"${VERSION}" - - build-flow-aggregator: - runs-on: [ubuntu-latest] - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Build flow-aggregator Docker image and push to registry - env: - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - VERSION: ${{ needs.get-version.outputs.version }} - run: | - make flow-aggregator-image - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - docker push antrea/flow-aggregator:"${VERSION}" diff --git a/.github/workflows/cancel_workflows.yml b/.github/workflows/cancel_workflows.yml deleted file mode 100644 index efb8c4e27de..00000000000 --- a/.github/workflows/cancel_workflows.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Cancel Workflows -on: - workflow_run: - workflows: ["Go", "Golicense", "Kind", "Build and push latest image if needed", "Antrea upgrade"] - types: - - requested -jobs: - cancel: - name: Cancel workflows - runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.event == 'pull_request' }} - steps: - - uses: styfle/cancel-workflow-action@0.12.0 - with: - all_but_latest: true - workflow_id: ${{ github.event.workflow.id }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index b50a2aa1086..00000000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,88 +0,0 @@ -name: "Golang Code Analysis" - -on: - push: - branches: [ "main", release-* ] - pull_request: - branches: [ "main" ] - -jobs: - analyze-on-linux: - name: Analyze on Linux - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'go' ] - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - show-progress: false - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - config-file: ./.github/codeql-config.yml - - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - - name: "Build Application" - run: | - make bin - cd multicluster - make build - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{matrix.language}}" - - analyze-on-windows: - name: Analyze on Windows - runs-on: windows-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'go' ] - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - show-progress: false - - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - config-file: ./.github/codeql-config.yml - - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - - name: Build Antrea windows binaries - run: make windows-bin - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{matrix.language}}" diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml deleted file mode 100644 index b6a1cdcbf6b..00000000000 --- a/.github/workflows/conformance.yml +++ /dev/null @@ -1,92 +0,0 @@ -name: Manually run upstream conformance test on Linux -run-name: Run ${{ inputs.test-suite }} test with Antrea ${{ inputs.antrea-version }} and K8s ${{ inputs.k8s-version }} - -on: - workflow_dispatch: - inputs: - antrea-version: - description: The Antrea version to test. It could be a SHA-1 value, a branch, or a tag (e.g. a7b012b, release-1.12, v1.12.0). The main branch will be used if empty. - required: false - antrea-values: - description: The Antrea Chart values. Multiple values can be separated with commas (e.g. key1=val1,key2=val2). Default configuration will be tested if empty. - required: false - k8s-version: - description: The K8s version (e.g. v1.27.1) to test. Kind's default K8s version will be used if empty. - required: false - test-suite: - description: The test suite to run. Check run-k8s-e2e-tests.sh for which test cases these values represent. - type: choice - options: - - whole-conformance - - conformance - - network-policy - - sig-network - - all - default: whole-conformance - required: true - always-upload-logs: - description: Always upload logs regardless of the test result. - type: boolean - default: false - - -jobs: - test: - name: Run tests - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - ref: ${{ inputs.antrea-version }} - fetch-depth: 0 - show-progress: false - - name: Check if it is a released version - id: check-release - run: | - if git show-ref --tags --verify --quiet refs/tags/${{ inputs.antrea-version }}; then - echo "released=true" >> $GITHUB_OUTPUT - else - echo "released=false" >> $GITHUB_OUTPUT - fi - - name: Build Antrea image if required - if: ${{ steps.check-release.outputs.released == 'false' }} - run: | - ./hack/build-antrea-linux-all.sh --pull - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Create K8s cluster - run: | - # The command also loads local antrea/antrea-ubuntu:latest into Nodes if it exists. - ./ci/kind/kind-setup.sh create kind \ - --k8s-version "${{ inputs.k8s-version }}" - - name: Install Antrea - run: | - if [ ${{ steps.check-release.outputs.released }} == 'true' ]; then - helm repo add antrea https://charts.antrea.io - helm repo update - helm install --namespace kube-system antrea antrea/antrea --version "${{ inputs.antrea-version }}" \ - --set "${{ inputs.antrea-values }}" - else - helm install --namespace kube-system antrea ./build/charts/antrea \ - --set "${{ inputs.antrea-values }}" - fi - kubectl rollout status -n kube-system ds/antrea-agent --timeout=5m - - name: Run e2e tests - run: | - ./ci/run-k8s-e2e-tests.sh "--e2e-${{ inputs.test-suite }}" - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() || inputs.always-upload-logs }} - with: - name: sonobuoy.tar.gz - path: "*_sonobuoy_*.tar.gz" - retention-days: 7 diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml deleted file mode 100644 index ebe3f1f3971..00000000000 --- a/.github/workflows/dependabot.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Dependabot Workflow - -# This workflow commits to Dependabot branches to ensure that the corresponding -# PRs can satisfy all status checks. - -# WARNING: Combining pull_request_target workflow trigger with an explicit -# checkout of an untrusted PR is a dangerous practice that may lead to -# repository compromise. -# See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ -# To prevent repository compromise, the workflow jobs must only execute on PRs -# opened by Dependabot and which are labelled correctly (note that these two -# checks are somewhat redundant since labelling PRs require write access to the -# repository). -# An alternative is to use the "two-workflow method" (see -# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/automating-dependabot-with-github-actions#handling-push-events), -# but that is more tedious to configure and should not be required here. - -on: - pull_request_target: - types: [labeled, synchronize] - -permissions: - contents: write - -jobs: - # This job ensures that "go mod tidy" is run for all Go modules included in - # this repository. - tidy: - name: Go tidiness for Dependabot PR - # 'dependencies' and 'go' are the default labels used by Dependabot when updating Go dependencies - if: ${{ github.actor == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependencies') && contains(github.event.pull_request.labels.*.name, 'go') }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - # Check out the pull request HEAD - ref: ${{ github.event.pull_request.head.sha }} - token: ${{ secrets.ANTREA_BOT_WRITE_PAT }} - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run go mod tidy - # the checks above (Github actor and PR labels) ensure that a malicious - # actor cannot open a PR with a modified "tidy" Makefile target and - # execute arbitrary code with write access and access to secrets. In - # particular, someone would need write access to the repo to add the - # "dependencies" and "go" labels. - run: make tidy - - name: Commit changes - uses: stefanzweifel/git-auto-commit-action@v5 - with: - commit_message: Go tidiness for Dependabot PR - commit_options: '--no-verify' - file_pattern: '**/go.mod **/go.sum' - disable_globbing: false diff --git a/.github/workflows/docker_update_base_windows.yml b/.github/workflows/docker_update_base_windows.yml deleted file mode 100644 index e715d3c1f17..00000000000 --- a/.github/workflows/docker_update_base_windows.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Anyone with write permissions to the antrea-io/antrea Github repository can -# trigger this workflow manually, but please check with a maintainer first. The -# workflow will build and push the antrea/base-windows image. -name: Manually update antrea/base-windows Docker image - -on: - workflow_dispatch: - inputs: - antrea-repository: - description: 'The Antrea repository to check-out; it will typically be a personal Antrea fork such as "/antrea"' - required: true - type: string - antrea-ref: - description: 'The Git ref to use when checking-out the Antrea repository, usually a branch or tag name' - required: true - type: string - push: - description: 'Whether to push built base images to the Docker registry' - required: false - default: true - type: boolean - -jobs: - build: - runs-on: windows-2019 - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - repository: ${{ github.event.inputs.antrea-repository }} - ref: ${{ github.event.inputs.antrea-ref }} - show-progress: false - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build and push Docker images - if: ${{ github.event.inputs.push }} - run: | - ./hack/build-antrea-windows-all.sh --pull --push-base-images - shell: bash - - name: Build Docker images without pushing - if: ${{ !github.event.inputs.push }} - run: | - ./hack/build-antrea-windows-all.sh --pull - shell: bash diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml deleted file mode 100644 index a5f5034cf96..00000000000 --- a/.github/workflows/go.yml +++ /dev/null @@ -1,270 +0,0 @@ -name: Go -on: - pull_request: - branches: - - main - - release-* - - feature/* - push: - branches: - - main - - release-* - - feature/* - -jobs: - check-changes: - name: Check whether tests need to be run based on diff - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - show-progress: false - - uses: antrea-io/has-changes@v2 - id: check_diff - with: - paths-ignore: docs/* ci/jenkins/* *.md hack/.notableofcontents - outputs: - has_changes: ${{ steps.check_diff.outputs.has_changes }} - - # test-unit-ubuntu and test-unit-windows are intentionally not merged into one job with os matrix, otherwise the job - # wouldn't be expanded if it's skipped and the report of the required check would be missing. - # See https://github.com/antrea-io/antrea/issues/3563. - test-unit-ubuntu: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Unit test (ubuntu-latest) - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run unit tests - run: make test-unit - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: .coverage/coverage-unit.txt - flags: unit-tests - name: codecov-unit-test - fail_ci_if_error: ${{ github.event_name == 'push' }} - - test-unit-windows: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Unit test (windows-2022) - runs-on: [windows-2022] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run unit tests - run: make test-unit - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: .coverage/coverage-unit.txt - flags: unit-tests - name: codecov-unit-test - fail_ci_if_error: ${{ github.event_name == 'push' }} - - test-integration: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Integration test - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run integration tests - run: | - ./build/images/ovs/build.sh - NO_PULL=1 make docker-test-integration - - name: Run integration tests for multicluster - run: | - cd multicluster - make test-integration - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: .coverage/coverage-integration.txt,multicluster/.coverage/coverage-integration.txt - flags: integration-tests - name: codecov-integration-test - fail_ci_if_error: ${{ github.event_name == 'push' }} - - # golangci-lint-ubuntu and golangci-lint-macos are intentionally not merged into one job with os matrix, otherwise the - # job wouldn't be expanded if it's skipped and the report of the required check would be missing. - # See https://github.com/antrea-io/antrea/issues/3563. - golangci-lint-ubuntu: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Golangci-lint (ubuntu-latest) - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run golangci-lint - run: make golangci - - golangci-lint-macos: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Golangci-lint (macos-latest) - runs-on: [macos-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run golangci-lint - run: make golangci - - bin: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Build Antrea and antctl binaries - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Build Antrea binaries for amd64 - run: GOARCH=amd64 make bin - - name: Build Antrea binaries for arm64 - run: GOARCH=arm64 make bin - - name: Build Antrea binaries for arm - run: GOARCH=arm make bin - - name: Build antctl binaries - run: make antctl - - name: Build Multi-cluster binaries - run: | - cd multicluster - make bin - - windows-bin: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Build Antrea Windows binaries - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Build Antrea windows binaries - run: make windows-bin - - tidy-codegen-manifest: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Check tidy, code generation and manifest - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - # tidy check need to be run before code generation which will regenerate codes. - - name: Check tidy - run: make test-tidy - - name: Check code generation - run: ./ci/check-codegen.sh - - name: Check manifest - run: ./ci/check-manifest.sh - - name: Check copyright - run: ./ci/check-copyright.sh - - verify: - name: Verify docs and spelling - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run verify scripts - run: make verify - - name: Checking for broken Markdown links - if: ${{ github.event_name == 'pull_request' }} - uses: gaurav-nelson/github-action-markdown-link-check@v1 - with: - # Check modified files only for pull requests. Cronjob "Verify docs" takes care of checking all markdown files. - check-modified-files-only: yes - base-branch: ${{ github.base_ref }} - config-file: 'hack/.md_links_config.json' - - name: Markdownlint - run: | - sudo npm install -g markdownlint-cli@0.38.0 - make markdownlint - - name: Checking whether autogenerated Helm chart documentation is up-to-date - working-directory: build/charts/ - run: | - make helm-docs - DIFF=$(git diff .) - if [ -n "$DIFF" ]; then - echo "The Helm chart documentation is out-of-date; please run 'make helm-docs' in 'build/charts/' and commit the changes" - exit 1 - fi - - benchmark: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - name: Go benchmark test - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run Go benchmark test - run: go test -run '^$' -bench . -benchtime 1x -timeout 10m -cpu 4 -v -benchmem ./pkg/... diff --git a/.github/workflows/golicense.yml b/.github/workflows/golicense.yml deleted file mode 100644 index c5a39fbe43d..00000000000 --- a/.github/workflows/golicense.yml +++ /dev/null @@ -1,70 +0,0 @@ -name: Golicense -on: - pull_request: - branches: - - main - - release-* - - feature/* - push: - branches: - - main - - release-* - - feature/* - release: - types: [published] - -jobs: - check-changes: - name: Check whether tests need to be run based on diff - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - show-progress: false - - uses: antrea-io/has-changes@v2 - id: check_diff - with: - paths-ignore: docs/* ci/jenkins/* *.md hack/.notableofcontents - outputs: - has_changes: ${{ steps.check_diff.outputs.has_changes }} - - golicense: - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' || github.event_name != 'pull_request' }} - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Cache licensing information for dependencies - uses: actions/cache@v3 - id: cache - env: - cache-name: cache-lichen-deps-licensing-info - with: - path: license-reports - key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('**/go.sum', 'ci/golicense/**') }} - - run: mkdir antrea-bins - - name: Build assets - run: | - export VERSION="$(head VERSION)" - ./hack/release/prepare-assets.sh ./antrea-bins - - name: Build Linux binaries - run: BINDIR=./antrea-bins make bin - - name: Run lichen - if: steps.cache.outputs.cache-hit != 'true' - run: | - mkdir license-reports - ./ci/golicense/run.sh ./antrea-bins ./license-reports - - name: Upload licensing information - if: ${{ always() }} - uses: actions/upload-artifact@v4 - with: - name: licenses.deps - path: license-reports/ALL.deps.txt - retention-days: 90 # max value diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index 83e00fd3707..8812a711d07 100644 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -13,25 +13,8 @@ on: jobs: - check-changes: - name: Check whether tests need to be run based on diff - runs-on: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - show-progress: false - - uses: antrea-io/has-changes@v2 - id: check_diff - with: - paths-ignore: docs/* ci/jenkins/* *.md hack/.notableofcontents plugins/* - outputs: - has_changes: ${{ steps.check_diff.outputs.has_changes }} - build-antrea-coverage-image: name: Build Antrea image to be used for Kind e2e tests - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} runs-on: [ubuntu-latest] steps: - uses: actions/checkout@v4 @@ -49,155 +32,6 @@ jobs: path: antrea-ubuntu.tar retention-days: 1 # minimum value, in case artifact deletion by 'artifact-cleanup' job fails - build-flow-aggregator-coverage-image: - name: Build Flow Aggregator image to be used for Kind e2e tests - needs: check-changes - if: ${{ needs.check-changes.outputs.has_changes == 'yes' }} - runs-on: [ ubuntu-latest ] - steps: - - uses: actions/checkout@v4 - with: - show-progress: false - - run: make flow-aggregator-ubuntu-coverage - - name: Save Flow Aggregator image to tarball - run: docker save -o flow-aggregator.tar antrea/flow-aggregator-coverage:latest - - name: Upload Flow Aggregator image for subsequent jobs - uses: actions/upload-artifact@v4 - with: - name: flow-aggregator-cov - path: flow-aggregator.tar - retention-days: 1 # minimum value, in case artifact deletion by 'artifact-cleanup' job fails - - test-e2e-encap: - name: E2e tests on a Kind cluster on Linux - needs: [build-antrea-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run e2e tests - run: | - mkdir log - mkdir test-e2e-encap-coverage - ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-encap-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --coverage - - name: Tar coverage files - run: tar -czf test-e2e-encap-coverage.tar.gz test-e2e-encap-coverage - - name: Upload coverage for test-e2e-encap-coverage - uses: actions/upload-artifact@v4 - with: - name: test-e2e-encap-coverage - path: test-e2e-encap-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap - directory: test-e2e-encap-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-encap.tar.gz - path: log.tar.gz - retention-days: 30 - - test-e2e-encap-non-default: - name: E2e tests on a Kind cluster on Linux with non default values (proxyAll=true, LoadBalancerMode=DSR, NodeIPAM=true) - needs: [build-antrea-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run e2e tests - run: | - mkdir log - mkdir test-e2e-encap-non-default-coverage - ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-encap-non-default-coverage ./ci/kind/test-e2e-kind.sh \ - --coverage \ - --encap-mode encap \ - --proxy-all \ - --feature-gates LoadBalancerModeDSR=true \ - --load-balancer-mode dsr \ - --node-ipam - - name: Tar coverage files - run: tar -czf test-e2e-encap-non-default-coverage.tar.gz test-e2e-encap-non-default-coverage - - name: Upload coverage for test-e2e-encap-non-default-coverage - uses: actions/upload-artifact@v4 - with: - name: test-e2e-encap-non-default-coverage - path: test-e2e-encap-non-default-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap-non-default - directory: test-e2e-encap-non-default-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-encap-non-default.tar.gz - path: log.tar.gz - retention-days: 30 - test-e2e-encap-all-features-enabled: name: E2e tests on a Kind cluster on Linux with all features enabled needs: [build-antrea-coverage-image] @@ -239,486 +73,13 @@ jobs: --node-ipam \ --multicast - name: Tar coverage files - run: tar -czf test-e2e-encap-all-features-enabled-coverage.tar.gz test-e2e-encap-all-features-enabled-coverage + run: tar -czf test-e2e-encap-all-features-enabled-coverage.tar.gz test-e2e-encap-all-features-enabled-coverage log - name: Upload coverage for test-e2e-encap-all-features-enabled-coverage uses: actions/upload-artifact@v4 with: name: test-e2e-encap-all-features-enabled-coverage path: test-e2e-encap-all-features-enabled-coverage.tar.gz retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-encap-all-features-enabled - directory: test-e2e-encap-all-features-enabled-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-encap-all-features-enabled.tar.gz - path: log.tar.gz - retention-days: 30 - - test-e2e-noencap: - name: E2e tests on a Kind cluster on Linux (noEncap) - needs: [build-antrea-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run e2e tests - run: | - mkdir log - mkdir test-e2e-noencap-coverage - ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-noencap-coverage ./ci/kind/test-e2e-kind.sh --encap-mode noEncap --coverage --skip mode-irrelevant - - name: Tar coverage files - run: tar -czf test-e2e-noencap-coverage.tar.gz test-e2e-noencap-coverage - - name: Upload coverage for test-e2e-noencap-coverage - uses: actions/upload-artifact@v4 - with: - name: test-e2e-noencap-coverage - path: test-e2e-noencap-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-noencap - directory: test-e2e-noencap-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-noencap.tar.gz - path: log.tar.gz - retention-days: 30 - - test-e2e-hybrid: - name: E2e tests on a Kind cluster on Linux (hybrid) - needs: [build-antrea-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run e2e tests - run: | - mkdir log - mkdir test-e2e-hybrid-coverage - ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-hybrid-coverage ./ci/kind/test-e2e-kind.sh --encap-mode hybrid --coverage --skip mode-irrelevant - - name: Tar coverage files - run: tar -czf test-e2e-hybrid-coverage.tar.gz test-e2e-hybrid-coverage - - name: Upload coverage for test-e2e-hybrid-coverage - uses: actions/upload-artifact@v4 - with: - name: test-e2e-hybrid-coverage - path: test-e2e-hybrid-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-hybrid - directory: test-e2e-hybrid-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-hybrid.tar.gz - path: log.tar.gz - retention-days: 30 - - test-e2e-flow-visibility: - name: E2e tests on a Kind cluster on Linux for Flow Visibility - needs: [build-antrea-coverage-image, build-flow-aggregator-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - sudo rm -rf /usr/share/dotnet - sudo rm -rf /opt/ghc - sudo rm -rf "/usr/local/share/boost" - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - sudo rm -rf "/usr/local/lib/android" - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - - name: Download Flow Aggregator image from previous job - uses: actions/download-artifact@v4 - with: - name: flow-aggregator-cov - - name: Load Flow Aggregator image - run: | - docker load -i flow-aggregator.tar - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run e2e tests - run: | - mkdir log - mkdir test-e2e-fa-coverage - ANTREA_LOG_DIR=$PWD/log ANTREA_COV_DIR=$PWD/test-e2e-fa-coverage ./ci/kind/test-e2e-kind.sh --encap-mode encap --coverage --flow-visibility --ip-family dual - - name: Tar coverage files - run: tar -czf test-e2e-fa-coverage.tar.gz test-e2e-fa-coverage - - name: Upload coverage for test-e2e-fa-coverage - uses: actions/upload-artifact@v4 - with: - name: test-e2e-fa-coverage - path: test-e2e-fa-coverage.tar.gz - retention-days: 30 - - name: Codecov - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: '*.cov.out*' - flags: kind-e2e-tests - name: codecov-test-e2e-fa - directory: test-e2e-fa-coverage - fail_ci_if_error: ${{ github.event_name == 'push' }} - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: e2e-kind-fa.tar.gz - path: log.tar.gz - retention-days: 30 - - test-network-policy-conformance-encap: - name: NetworkPolicy conformance tests on a Kind cluster on Linux - needs: [build-antrea-coverage-image] - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run NetworkPolicy conformance tests - run: | - mkdir log - ANTREA_LOG_DIR=$PWD/log ./ci/kind/test-netpol-v2-conformance-kind.sh - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: networkpolicy-conformance-kind-encap.tar.gz - path: log.tar.gz - retention-days: 30 - - test-upgrade-from-N-1: - name: Upgrade from Antrea version N-1 - needs: build-antrea-coverage-image - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run test - run: | - mkdir log - ANTREA_LOG_DIR=$PWD/log ./ci/kind/test-upgrade-antrea.sh --from-version-n-minus 1 - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: upgrade-from-antrea-version-n-1.tar.gz - path: log.tar.gz - retention-days: 30 - - test-upgrade-from-N-2: - name: Upgrade from Antrea version N-2 - needs: build-antrea-coverage-image - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run test - run: | - mkdir log - ANTREA_LOG_DIR=$PWD/log ./ci/kind/test-upgrade-antrea.sh --from-version-n-minus 2 - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: upgrade-from-antrea-version-n-2.tar.gz - path: log.tar.gz - retention-days: 30 - - test-compatible-N-1: - name: API compatible with client version N-1 - needs: build-antrea-coverage-image - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run test - run: | - mkdir log - ANTREA_LOG_DIR=$PWD/log ./ci/kind/test-upgrade-antrea.sh --from-version-n-minus 1 --controller-only - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: api-compatible-with-client-version-n-1.tar.gz - path: log.tar.gz - retention-days: 30 - - test-compatible-N-2: - name: API compatible with client version N-2 - needs: build-antrea-coverage-image - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run test - run: | - mkdir log - ANTREA_LOG_DIR=$PWD/log ./ci/kind/test-upgrade-antrea.sh --from-version-n-minus 2 --controller-only - - name: Tar log files - if: ${{ failure() }} - run: tar -czf log.tar.gz log - - name: Upload test log - uses: actions/upload-artifact@v4 - if: ${{ failure() }} - with: - name: api-compatible-with-client-version-n-2.tar.gz - path: log.tar.gz - retention-days: 30 - - validate-prometheus-metrics-doc: - name: Validate metrics in Prometheus document match running deployment's - needs: build-antrea-coverage-image - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - with: - show-progress: false - - name: Download Antrea image from previous job - uses: actions/download-artifact@v4 - with: - name: antrea-ubuntu-cov - - name: Load Antrea image - run: | - docker load -i antrea-ubuntu.tar - docker tag antrea/antrea-ubuntu-coverage:latest antrea/antrea-ubuntu:latest - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Validate document - run: | - ./ci/kind/validate-metrics-doc.sh # Runs after all other jobs in the workflow succeed and deletes Antrea Docker images uploaded as temporary # artifacts. It uses a third-party, MIT-licensed action (geekyeggo/delete-artifact). While Github @@ -728,19 +89,7 @@ jobs: name: Delete uploaded images needs: - build-antrea-coverage-image - - build-flow-aggregator-coverage-image - - test-e2e-encap - - test-e2e-encap-non-default - test-e2e-encap-all-features-enabled - - test-e2e-noencap - - test-e2e-hybrid - - test-upgrade-from-N-1 - - test-upgrade-from-N-2 - - test-compatible-N-1 - - test-compatible-N-2 - - validate-prometheus-metrics-doc - - test-e2e-flow-visibility - - test-network-policy-conformance-encap runs-on: [ubuntu-latest] steps: - name: Delete antrea-ubuntu-cov @@ -748,9 +97,3 @@ jobs: uses: geekyeggo/delete-artifact@v2 with: name: antrea-ubuntu-cov - - name: Delete flow-aggregator - if: ${{ needs.build-flow-aggregator-coverage-image.result == 'success' }} - uses: geekyeggo/delete-artifact@v2 - with: - name: flow-aggregator-cov - failOnError: false diff --git a/.github/workflows/lifecycle_management.yml b/.github/workflows/lifecycle_management.yml deleted file mode 100644 index f8d63b26a21..00000000000 --- a/.github/workflows/lifecycle_management.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: "Issues and PRs lifecycle management" -on: - schedule: - # every day at midnight - - cron: "0 0 * * *" - -jobs: - stale: - if: github.repository == 'antrea-io/antrea' - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment, or this will be closed in 90 days' - stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment, or this will be closed in 90 days' - stale-issue-label: 'lifecycle/stale' - stale-pr-label: 'lifecycle/stale' - days-before-stale: 90 - days-before-close: 90 - exempt-issue-labels: 'lifecycle/frozen' - exempt-pr-labels: 'lifecycle/frozen' - remove-stale-when-updated: true - debug-only: false - operations-per-run: 200 - skip: - if: github.repository != 'antrea-io/antrea' - runs-on: ubuntu-latest - steps: - - name: Skip - run: | - echo "Skipping lifecyle management because workflow cannot be run from fork" diff --git a/.github/workflows/netpol_cyclonus.yml b/.github/workflows/netpol_cyclonus.yml deleted file mode 100644 index 824fed5eb85..00000000000 --- a/.github/workflows/netpol_cyclonus.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Kind Netpol Cyclonus -on: - schedule: - # run once a day at midnight - - cron: '0 0 * * *' - - -jobs: - test-netpol-cyclonus: - name: Run Cyclonus network policy generator tests on Kind cluster - if: github.repository == 'antrea-io/antrea' - runs-on: [ubuntu-latest] - steps: - - name: Free disk space - # https://github.com/actions/virtual-environments/issues/709 - run: | - sudo apt-get clean - df -h - - uses: actions/checkout@v4 - - run: make - - name: Install Kind - run: | - KIND_VERSION=$(head -n1 ./ci/kind/version) - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64 - chmod +x ./kind - sudo mv kind /usr/local/bin - - name: Run cyclonus tests - working-directory: hack/netpol-generator - run: ./test-kind.sh diff --git a/.github/workflows/process_release.yml b/.github/workflows/process_release.yml deleted file mode 100644 index 5af80cadbd8..00000000000 --- a/.github/workflows/process_release.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Process new release - -on: - release: - types: [published] - -jobs: - upload-release-assets: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - # make sure the latest patch version is used - check-latest: true - - name: Build assets - env: - TAG: ${{ github.ref }} - PRERELEASE: ${{ github.event.release.prerelease }} - run: | - mkdir assets - VERSION="${TAG:10}" ./hack/release/prepare-assets.sh ./assets - - name: Upload all assets - uses: alexellis/upload-assets@0.4.0 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - asset_paths: '["./assets/*"]' - - update-website: - name: Trigger website update for release - needs: upload-release-assets - runs-on: ubuntu-latest - steps: - - id: get-version - env: - TAG: ${{ github.ref }} - run: | - version=${TAG:10} - echo "version=$version" >> $GITHUB_OUTPUT - - name: Update website source - uses: benc-uk/workflow-dispatch@v1 - with: - repo: antrea-io/website - ref: refs/heads/main - workflow: Update website source - token: ${{ secrets.ANTREA_WEBSITE_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "antrea-repository":"antrea-io/antrea", "antrea-ref":"{0}" }}', steps.get-version.outputs.version) }} - - name: Update Helm index with Antrea archive - uses: benc-uk/workflow-dispatch@v1 - with: - repo: antrea-io/website - ref: refs/heads/main - workflow: Update Helm index - token: ${{ secrets.ANTREA_WEBSITE_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "archive-url":"https://github.com/antrea-io/antrea/releases/download/{0}/antrea-chart.tgz" }}', steps.get-version.outputs.version) }} - - name: Update Helm index with Flow Aggregator archive - uses: benc-uk/workflow-dispatch@v1 - with: - repo: antrea-io/website - ref: refs/heads/main - workflow: Update Helm index - token: ${{ secrets.ANTREA_WEBSITE_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "archive-url":"https://github.com/antrea-io/antrea/releases/download/{0}/flow-aggregator-chart.tgz" }}', steps.get-version.outputs.version) }} diff --git a/.github/workflows/trivy_scan.yml b/.github/workflows/trivy_scan.yml deleted file mode 100644 index 342c56af164..00000000000 --- a/.github/workflows/trivy_scan.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: Scan Antrea Docker image for vulnerabilities every day - -on: - schedule: - # every day at 10am - - cron: '0 10 * * *' - workflow_dispatch: - inputs: - # This is useful for testing an arbitrary released version of Antrea. - # If left unset, we will use the latest release (obtained using the Github API). - antrea-version: - description: 'The released Antrea version to scan' - type: string - required: false - -jobs: - build: - if: github.repository == 'antrea-io/antrea' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Find greatest Antrea version - id: find-antrea-greatest-version - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - VERSION=${{ github.event.inputs.antrea-version }} - if [ -z "$VERSION" ]; then - VERSION=$(gh api /repos/antrea-io/antrea/releases/latest --jq '.tag_name') - fi - echo "antrea_version=$VERSION" >> $GITHUB_OUTPUT - - name: Pull Antrea Docker images - id: pull - run: | - docker pull antrea/antrea-ubuntu:latest - docker pull antrea/antrea-ubuntu:${{ steps.find-antrea-greatest-version.outputs.antrea_version }} - - name: Run Trivy vulnerability scanner on latest Antrea Docker image - if: ${{ always() && steps.pull.conclusion == 'success' }} - uses: aquasecurity/trivy-action@0.16.0 - # we cannot use .trivy.yml as we need to override some config parameters - # and that is not supported by aquasecurity/trivy-action - with: - scan-type: 'image' - image-ref: 'antrea/antrea-ubuntu:latest' - exit-code: '1' - ignore-unfixed: true - severity: 'CRITICAL,HIGH' - # whereabouts project doesn't upgrade dependencies frequently - skip-files: '/opt/cni/bin/whereabouts' - format: 'table' - output: 'trivy.latest.txt' - - name: Run Trivy vulnerability scanner on Antrea Docker image for latest released version - if: ${{ always() && steps.pull.conclusion == 'success' }} - uses: aquasecurity/trivy-action@0.16.0 - with: - scan-type: 'image' - image-ref: 'antrea/antrea-ubuntu:${{ steps.find-antrea-greatest-version.outputs.antrea_version }}' - exit-code: '1' - ignore-unfixed: true - severity: 'CRITICAL,HIGH' - skip-files: '/opt/cni/bin/whereabouts' - format: 'table' - output: 'trivy.${{ steps.find-antrea-greatest-version.outputs.antrea_version }}.txt' - - name: Upload Trivy scan reports - if: ${{ always() && steps.pull.conclusion == 'success' }} - uses: actions/upload-artifact@v4 - with: - name: trivy-scan-reports - path: trivy.*.txt - retention-days: 90 # max value - skip: - if: github.repository != 'antrea-io/antrea' - runs-on: ubuntu-latest - steps: - - name: Skip - run: | - echo "Skipping image scan because workflow cannot be run from fork" diff --git a/.github/workflows/trivy_scan_before_release.yml b/.github/workflows/trivy_scan_before_release.yml deleted file mode 100644 index 8b9fafbb682..00000000000 --- a/.github/workflows/trivy_scan_before_release.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Scan Antrea Docker image for vulnerabilities before release - -on: - pull_request: - branches: - - release-* - -jobs: - build: - if: startsWith(github.event.pull_request.title, 'Release ') - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Build Antrea Docker image - run: | - ./hack/build-antrea-linux-all.sh --pull - - name: Run Trivy vulnerability scanner on Antrea Docker image - uses: aquasecurity/trivy-action@0.16.0 - with: - scan-type: 'image' - image-ref: 'antrea/antrea-ubuntu:latest' - trivy-config: '.trivy.yml' diff --git a/.github/workflows/update_changelog.yml b/.github/workflows/update_changelog.yml deleted file mode 100644 index 0bd740a8adc..00000000000 --- a/.github/workflows/update_changelog.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: Update CHANGELOG after release - -on: - push: - tags: - - v* - -jobs: - check-version: - runs-on: [ubuntu-latest] - outputs: - version: ${{ steps.get-version.outputs.version }} - steps: - - name: Extract version from Github ref - id: get-version - env: - TAG: ${{ github.ref }} - shell: bash - run: | - version=${TAG:10} - if [[ "$version" == *-* ]]; then - echo "$version is a release candidate or a pre-release" - exit 0 - fi - echo "version=$version" >> $GITHUB_OUTPUT - - pr-update-changelog: - runs-on: [ubuntu-latest] - needs: check-version - if: ${{ needs.check-version.outputs.version != '' }} - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: main - - name: Cherry-pick changelog commit - env: - VERSION: ${{ needs.check-version.outputs.version }} - shell: bash - run: | - git config user.name github-actions - git config user.email github-actions@github.com - commit_hash=$(git log "$VERSION" --format="%H" --grep="Update CHANGELOG for $VERSION release") - if [[ -z "$commit_hash" ]]; then - echo "Cannot find commit" - exit 1 - fi - git cherry-pick "$commit_hash" - - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 - with: - token: ${{ secrets.ANTREA_BOT_WRITE_PAT }} - delete-branch: true - title: "Update CHANGELOG for ${{ needs.check-version.outputs.version }} release" - body: | - PR was opened automatically from Github Actions - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" diff --git a/.github/workflows/verify_docs.yml b/.github/workflows/verify_docs.yml deleted file mode 100644 index 66366ab9b16..00000000000 --- a/.github/workflows/verify_docs.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Verify docs - -on: - schedule: - # every day at 9am - - cron: '0 9 * * *' - -jobs: - verify: - name: Verify docs and spelling - if: github.repository == 'antrea-io/antrea' - runs-on: [ubuntu-latest] - steps: - - name: Check-out code - uses: actions/checkout@v4 - - name: Set up Go using version from go.mod - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - - name: Run verify scripts - run: make verify - - name: Checking for broken Markdown links for main branch - uses: gaurav-nelson/github-action-markdown-link-check@v1 - with: - folder-path: './docs' - file-path: './README.md, ./CHANGELOG.md, ./CONTRIBUTING.md, ./GOVERNANCE.md, ./MAINTAINERS.md, ./ROADMAP.md, ./SECURITY.md' - config-file: 'hack/.md_links_config.json' - - name: Markdownlint - run: | - sudo npm install -g markdownlint-cli@0.38.0 - make markdownlint diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml deleted file mode 100644 index e004fcc5901..00000000000 --- a/.github/workflows/website.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Update antrea.io website for main branch - -on: - push: - branches: - - main - paths: - - '**.md' - - 'docs/**' - - 'pkg/apis/**' - - 'hack/**' - -jobs: - update-website: - name: Trigger website update for main - if: github.repository == 'antrea-io/antrea' - runs-on: ubuntu-latest - steps: - - name: Update website source - uses: benc-uk/workflow-dispatch@v1 - with: - repo: antrea-io/website - ref: refs/heads/main - workflow: Update website source - token: ${{ secrets.ANTREA_WEBSITE_WORKFLOW_DISPATCH_PAT }} - inputs: ${{ format('{{ "antrea-repository":"antrea-io/antrea", "antrea-ref":"main" }}') }} diff --git a/build/charts/antrea/conf/antrea-agent.conf b/build/charts/antrea/conf/antrea-agent.conf index e93fe46c4fb..68936e4cf9d 100644 --- a/build/charts/antrea/conf/antrea-agent.conf +++ b/build/charts/antrea/conf/antrea-agent.conf @@ -79,6 +79,9 @@ featureGates: # Enable Egress traffic shaping. {{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "EgressTrafficShaping" "default" false) }} +# Allow users to protect their Kubernetes Nodes. +{{- include "featureGate" (dict "featureGates" .Values.featureGates "name" "NodeNetworkPolicy" "default" false) }} + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: {{ .Values.ovs.bridgeName | quote }} diff --git a/build/yamls/antrea-aks.yml b/build/yamls/antrea-aks.yml index 1ef2c3d9858..43a1fdfb0ae 100644 --- a/build/yamls/antrea-aks.yml +++ b/build/yamls/antrea-aks.yml @@ -5603,6 +5603,9 @@ data: # Enable Egress traffic shaping. # EgressTrafficShaping: false + # Allow users to protect their Kubernetes Nodes. + # NodeNetworkPolicy: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6895,7 +6898,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: e59e0431902646d46cba490279184fea2bdd3c8b486b5a7b1d3ece9a91614634 + checksum/config: 6ae9baebe4cc34fd846e8b760de31865cc0c3741057af862016a6e1e890fd314 labels: app: antrea component: antrea-agent @@ -7133,7 +7136,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: e59e0431902646d46cba490279184fea2bdd3c8b486b5a7b1d3ece9a91614634 + checksum/config: 6ae9baebe4cc34fd846e8b760de31865cc0c3741057af862016a6e1e890fd314 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-eks.yml b/build/yamls/antrea-eks.yml index 93b1aabb4c0..c29ecaaca1e 100644 --- a/build/yamls/antrea-eks.yml +++ b/build/yamls/antrea-eks.yml @@ -5603,6 +5603,9 @@ data: # Enable Egress traffic shaping. # EgressTrafficShaping: false + # Allow users to protect their Kubernetes Nodes. + # NodeNetworkPolicy: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6895,7 +6898,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: e59e0431902646d46cba490279184fea2bdd3c8b486b5a7b1d3ece9a91614634 + checksum/config: 6ae9baebe4cc34fd846e8b760de31865cc0c3741057af862016a6e1e890fd314 labels: app: antrea component: antrea-agent @@ -7134,7 +7137,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: e59e0431902646d46cba490279184fea2bdd3c8b486b5a7b1d3ece9a91614634 + checksum/config: 6ae9baebe4cc34fd846e8b760de31865cc0c3741057af862016a6e1e890fd314 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-gke.yml b/build/yamls/antrea-gke.yml index 3c6e6672e4d..f2e851855b5 100644 --- a/build/yamls/antrea-gke.yml +++ b/build/yamls/antrea-gke.yml @@ -5603,6 +5603,9 @@ data: # Enable Egress traffic shaping. # EgressTrafficShaping: false + # Allow users to protect their Kubernetes Nodes. + # NodeNetworkPolicy: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6895,7 +6898,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 3b1758664de8044af1aa7454c64bd1a4911750e562e1ae9375c9c16a335a469d + checksum/config: 99ec0840193c5c4e9555308b4fd6c0919fbbabd72c64294da9f07593cfe23867 labels: app: antrea component: antrea-agent @@ -7131,7 +7134,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: 3b1758664de8044af1aa7454c64bd1a4911750e562e1ae9375c9c16a335a469d + checksum/config: 99ec0840193c5c4e9555308b4fd6c0919fbbabd72c64294da9f07593cfe23867 labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea-ipsec.yml b/build/yamls/antrea-ipsec.yml index c9d98a1ad03..6875742176c 100644 --- a/build/yamls/antrea-ipsec.yml +++ b/build/yamls/antrea-ipsec.yml @@ -5616,6 +5616,9 @@ data: # Enable Egress traffic shaping. # EgressTrafficShaping: false + # Allow users to protect their Kubernetes Nodes. + # NodeNetworkPolicy: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6908,7 +6911,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: a34de3efa658ac40c9bde28e08832dd897259fdcf639beab9d4e47531d7da948 + checksum/config: bed76fae47a871cfea00357bd66a4487fa8c6e0fee3ef1f3b75a4ea753a694ef checksum/ipsec-secret: d0eb9c52d0cd4311b6d252a951126bf9bea27ec05590bed8a394f0f792dcb2a4 labels: app: antrea @@ -7190,7 +7193,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: a34de3efa658ac40c9bde28e08832dd897259fdcf639beab9d4e47531d7da948 + checksum/config: bed76fae47a871cfea00357bd66a4487fa8c6e0fee3ef1f3b75a4ea753a694ef labels: app: antrea component: antrea-controller diff --git a/build/yamls/antrea.yml b/build/yamls/antrea.yml index be479357a08..7f78db469ec 100644 --- a/build/yamls/antrea.yml +++ b/build/yamls/antrea.yml @@ -5603,6 +5603,9 @@ data: # Enable Egress traffic shaping. # EgressTrafficShaping: false + # Allow users to protect their Kubernetes Nodes. + # NodeNetworkPolicy: false + # Name of the OpenVSwitch bridge antrea-agent will create and use. # Make sure it doesn't conflict with your existing OpenVSwitch bridges. ovsBridge: "br-int" @@ -6895,7 +6898,7 @@ spec: kubectl.kubernetes.io/default-container: antrea-agent # Automatically restart Pods with a RollingUpdate if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: aa947bf5c403412b9c8cfcbcc335659992f19bd428886e80f43bafa052bac1e6 + checksum/config: 074283b29ed959f0accc34c56898b51179bcf025c646a3bd3204856e662fc1ba labels: app: antrea component: antrea-agent @@ -7131,7 +7134,7 @@ spec: annotations: # Automatically restart Pod if the ConfigMap changes # See https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments - checksum/config: aa947bf5c403412b9c8cfcbcc335659992f19bd428886e80f43bafa052bac1e6 + checksum/config: 074283b29ed959f0accc34c56898b51179bcf025c646a3bd3204856e662fc1ba labels: app: antrea component: antrea-controller diff --git a/ci/kind/test-e2e-kind.sh b/ci/kind/test-e2e-kind.sh index a1a560d1778..0c71bccbe74 100755 --- a/ci/kind/test-e2e-kind.sh +++ b/ci/kind/test-e2e-kind.sh @@ -310,7 +310,7 @@ function run_test { if [ -n "$run" ]; then RUN_OPT="-run $run" fi - go test -v -timeout=$timeout $RUN_OPT antrea.io/antrea/test/e2e $flow_visibility_args -provider=kind --logs-export-dir=$ANTREA_LOG_DIR --skip-cases=$skiplist $coverage_args + go test -v -timeout=$timeout -run TestAntreaNodeNetworkPolicy $RUN_OPT antrea.io/antrea/test/e2e $flow_visibility_args -provider=kind --logs-export-dir=$ANTREA_LOG_DIR --skip-cases=$skiplist $coverage_args } if [[ "$mode" == "" ]] || [[ "$mode" == "encap" ]]; then diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 44604492295..d9bc1f285da 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -140,6 +140,7 @@ func run(o *Options) error { enableAntreaIPAM := features.DefaultFeatureGate.Enabled(features.AntreaIPAM) enableBridgingMode := enableAntreaIPAM && o.config.EnableBridgingMode l7NetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.L7NetworkPolicy) + nodeNetworkPolicyEnabled := features.DefaultFeatureGate.Enabled(features.NodeNetworkPolicy) enableMulticlusterGW := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableGateway enableMulticlusterNP := features.DefaultFeatureGate.Enabled(features.Multicluster) && o.config.Multicluster.EnableStretchedNetworkPolicy enableFlowExporter := features.DefaultFeatureGate.Enabled(features.FlowExporter) && o.config.FlowExporter.Enable @@ -219,7 +220,13 @@ func run(o *Options) error { egressConfig := &config.EgressConfig{ ExceptCIDRs: exceptCIDRs, } - routeClient, err := route.NewClient(networkConfig, o.config.NoSNAT, o.config.AntreaProxy.ProxyAll, connectUplinkToBridge, multicastEnabled, serviceCIDRProvider) + routeClient, err := route.NewClient(networkConfig, + o.config.NoSNAT, + o.config.AntreaProxy.ProxyAll, + connectUplinkToBridge, + nodeNetworkPolicyEnabled, + multicastEnabled, + serviceCIDRProvider) if err != nil { return fmt.Errorf("error creating route client: %v", err) } @@ -462,6 +469,7 @@ func run(o *Options) error { networkPolicyController, err := networkpolicy.NewNetworkPolicyController( antreaClientProvider, ofClient, + routeClient, ifaceStore, afero.NewOsFs(), nodeKey, @@ -471,6 +479,7 @@ func run(o *Options) error { groupIDUpdates, antreaPolicyEnabled, l7NetworkPolicyEnabled, + nodeNetworkPolicyEnabled, o.enableAntreaProxy, statusManagerEnabled, multicastEnabled, diff --git a/docs/antrea-network-policy.md b/docs/antrea-network-policy.md index 814220cfb41..f9e06694e28 100644 --- a/docs/antrea-network-policy.md +++ b/docs/antrea-network-policy.md @@ -20,6 +20,7 @@ - [ACNP for IGMP traffic](#acnp-for-igmp-traffic) - [ACNP for multicast egress traffic](#acnp-for-multicast-egress-traffic) - [ACNP for HTTP traffic](#acnp-for-http-traffic) + - [ACNP for Kubernetes Node traffic](#acnp-for-kubernetes-node-traffic) - [ACNP with log settings](#acnp-with-log-settings) - [Behavior of to and from selectors](#behavior-of-to-and-from-selectors) - [Key differences from K8s NetworkPolicy](#key-differences-from-k8s-networkpolicy) @@ -524,6 +525,56 @@ spec: Please refer to [Antrea Layer 7 NetworkPolicy](antrea-l7-network-policy.md) for extra information. +#### ACNP for Kubernetes Node traffic + +```yaml +apiVersion: crd.antrea.io/v1beta1 +kind: ClusterNetworkPolicy +metadata: + name: acnp-node-egress-traffic-drop +spec: + priority: 5 + tier: securityops + appliedTo: + - nodeSelector: # Select Nodes in the cluster with label, which means the policy will be applied to the + matchLabels: # selected Nodes. + kubernetes.io/os: linux + egress: + - action: Drop + to: + - ipBlock: + cidr: 192.168.1.0/24 + ports: + - protocol: TCP + port: 80 + name: dropHTTPTrafficToCIDR +``` + +```yaml +apiVersion: crd.antrea.io/v1beta1 +kind: ClusterNetworkPolicy +metadata: + name: acnp-node-ingress-traffic-drop +spec: + priority: 5 + tier: securityops + appliedTo: + - nodeSelector: # Select Nodes in the cluster with label, which means the policy will be applied to the + matchLabels: # selected Nodes. + kubernetes.io/os: linux + ingress: + - action: Drop + from: + - ipBlock: + cidr: 192.168.1.0/24 + ports: + - protocol: TCP + port: 22 + name: dropSSHTrafficFromCIDR +``` + +Please refer to [Antrea Node NetworkPolicy](antrea-node-network-policy.md) for more information. + #### ACNP with log settings ```yaml diff --git a/docs/antrea-node-network-policy.md b/docs/antrea-node-network-policy.md new file mode 100644 index 00000000000..a00f77e20b0 --- /dev/null +++ b/docs/antrea-node-network-policy.md @@ -0,0 +1,115 @@ +# Antrea Node NetworkPolicy + +## Table of Contents + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Usage](#usage) +- [Limitations](#limitations) + + +## Introduction + +Node NetworkPolicy is designed to secure the network of Kubernetes Nodes. Starting with v1.15, Antrea introduces support +for Node NetworkPolicy, which provides the control over the network traffic in IP, transport protocol, and port grains. + +This guide demonstrates how to configure Node NetworkPolicy. + +## Prerequisites + +Node NetworkPolicy was introduced in v1.15 as an alpha feature and is disabled by default. A feature gate, `NodeNetworkPolicy`, +must be enabled in antrea-agent.conf in the `antrea-config` ConfigMap. An example configuration is as below: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: antrea-config + namespace: kube-system +data: + antrea-agent.conf: | + featureGates: + NodeNetworkPolicy: true +``` + +Alternatively, you can use the following helm installation command to configure the above options: + +```bash +helm install antrea antrea/antrea --namespace kube-system --set featureGates.NodeNetworkPolicy=true +``` + +## Usage + +Similar to layer 7 NetworkPolicy, there is no separate resource type for Node NetworkPolicy. It is one type of Antrea-native +policy applied to Kubernetes Nodes by specifying nodeSelector in the global `appliedTo` (not per-rule appliedTo). Other +fields remain the same as Antrea NetworkPolicy applied to Pods. + +An example Node NetworkPolicy that blocks ingress traffic from Pods with label `app=client` to Nodes with label +`kubernetes.io/hostname: k8s-node-control-plane`: + +```yaml +apiVersion: crd.antrea.io/v1beta1 +kind: ClusterNetworkPolicy +metadata: + name: ingress-drop-pod-to-node +spec: + priority: 5 + tier: application + appliedTo: + - nodeSelector: + matchLabels: + kubernetes.io/hostname: k8s-node-control-plane + ingress: + - name: drop-80 + action: Drop + from: + - podSelector: + matchLabels: + app: client + ports: + - protocol: TCP + port: 80 +``` + +An example Node NetworkPolicy that blocks egress traffic from Nodes with the label `kubernetes.io/hostname: k8s-node-control-plane` +to Nodes with the label `kubernetes.io/hostname: k8s-node-worker-1` and some IP blocks: + +```yaml +apiVersion: crd.antrea.io/v1beta1 +kind: ClusterNetworkPolicy +metadata: + name: egress-drop-node-to-node +spec: + priority: 5 + tier: application + appliedTo: + - nodeSelector: + matchLabels: + kubernetes.io/hostname: k8s-node-control-plane + egress: + - name: drop-22 + action: Drop + to: + - nodeSelector: + matchLabels: + kubernetes.io/hostname: k8s-node-worker-1 + - ipBlock: + cidr: 192.168.77.0/24 + ports: + - protocol: TCP + port: 22 +``` + +## Limitations + +- This feature is currently only supported for Nodes running Linux. +- The policies applied to Nodes can be only specified in global `appliedTo` field, not in per-rule `appliedTo`, a `Group` + or a `ClusterGroup`. +- Policies applied to Nodes can be only specified in `ClusterNetworkPolicy`, not in `NetworkPolicy`. +- Policies applied to Nodes cannot be applied to Pods at the same time. +- FQDN in Node NetworkPolicy is not supported at this moment. +- Layer 7 NetworkPolicy in Node NetworkPolicy is not supported at this moment. +- With misconfiguration, it is possible to block traffic between Nodes and the API server, causing the Node to be unresponsive + or blocking all traffic to/from the cluster. Please exercise caution when configuring Node NetworkPolicy. +- For egress traffic, the action `Reject` defaults to `Drop`. diff --git a/docs/feature-gates.md b/docs/feature-gates.md index bec2c838fc1..b77a9e5a604 100644 --- a/docs/feature-gates.md +++ b/docs/feature-gates.md @@ -56,6 +56,7 @@ edit the Agent configuration in the | `L7NetworkPolicy` | Agent + Controller | `false` | Alpha | v1.10 | N/A | N/A | Yes | | | `AdminNetworkPolicy` | Controller | `false` | Alpha | v1.13 | N/A | N/A | Yes | | | `EgressTrafficShaping` | Agent | `false` | Alpha | v1.14 | N/A | N/A | Yes | OVS meters should be supported | +| `NodeNetworkPolicy` | Agent | `false` | Alpha | v1.15 | N/A | N/A | Yes | | ## Description and Requirements of Features @@ -404,6 +405,14 @@ this [document](antrea-l7-network-policy.md#prerequisites) for more information The `AdminNetworkPolicy` API (which currently includes the AdminNetworkPolicy and BaselineAdminNetworkPolicy objects) complements the Antrea-native policies and help cluster administrators to set security postures in a portable manner. +### NodeNetworkPolicy + +`NodeNetworkPolicy` allow users to protect their Kubernetes Nodes. + +#### Requirements for this Feature + +This feature is only supported for Linux Nodes at the moment. + ### EgressTrafficShaping The `EgressTrafficShaping` feature gate of Antrea Agent enables traffic shaping of Egress, which could limit the diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index be911409807..e35d6b8e5a0 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -73,7 +73,7 @@ func initializeForPolicyTest(t *testing.T, data *MCTestData) { k8sUtils, err := antreae2e.NewKubernetesUtils(&d) failOnError(err, t) if clusterName != leaderCluster { - _, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true) + _, err = k8sUtils.Bootstrap(perClusterNamespaces, perNamespacePods, true, nil, nil) failOnError(err, t) } clusterK8sUtilsMap[clusterName] = k8sUtils diff --git a/pkg/agent/config/node_config.go b/pkg/agent/config/node_config.go index ebba7f3da9e..5a4ab531579 100644 --- a/pkg/agent/config/node_config.go +++ b/pkg/agent/config/node_config.go @@ -50,6 +50,11 @@ const ( L7NetworkPolicyReturnPortName = "antrea-l7-tap1" ) +const ( + NodeNetworkPolicyIngressRulesChain = "ANTREA-POL-INGRESS-RULES" + NodeNetworkPolicyEgressRulesChain = "ANTREA-POL-EGRESS-RULES" +) + var ( // VirtualServiceIPv4 or VirtualServiceIPv6 is used in the following scenarios: // - The IP is used to perform SNAT for packets of Service sourced from Antrea gateway and destined for external diff --git a/pkg/agent/controller/networkpolicy/cache.go b/pkg/agent/controller/networkpolicy/cache.go index cf8f886e8de..ac1d1c3f408 100644 --- a/pkg/agent/controller/networkpolicy/cache.go +++ b/pkg/agent/controller/networkpolicy/cache.go @@ -182,6 +182,15 @@ func (r *CompletedRule) isIGMPEgressPolicyRule() bool { return false } +func (r *CompletedRule) isNodeNetworkPolicyRule() bool { + for _, m := range r.TargetMembers { + if m.Node != nil { + return true + } + } + return false +} + // ruleCache caches Antrea AddressGroups, AppliedToGroups and NetworkPolicies, // can construct complete rules that can be used by reconciler to enforce. type ruleCache struct { diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go index 9ce8e0b4343..0c0a4764420 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go @@ -41,6 +41,7 @@ import ( "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/openflow" proxytypes "antrea.io/antrea/pkg/agent/proxy/types" + "antrea.io/antrea/pkg/agent/route" "antrea.io/antrea/pkg/agent/types" "antrea.io/antrea/pkg/apis/controlplane/install" "antrea.io/antrea/pkg/apis/controlplane/v1beta2" @@ -90,7 +91,7 @@ type packetInAction func(*ofctrl.PacketIn) error // Controller is responsible for watching Antrea AddressGroups, AppliedToGroups, // and NetworkPolicies, feeding them to ruleCache, getting dirty rules from -// ruleCache, invoking reconciler to reconcile them. +// ruleCache, invoking reconcilers to reconcile them. // // a.Feed AddressGroups,AppliedToGroups // and NetworkPolicies @@ -101,8 +102,9 @@ type packetInAction func(*ofctrl.PacketIn) error type Controller struct { // antreaPolicyEnabled indicates whether Antrea NetworkPolicy and // ClusterNetworkPolicy are enabled. - antreaPolicyEnabled bool - l7NetworkPolicyEnabled bool + antreaPolicyEnabled bool + l7NetworkPolicyEnabled bool + nodeNetworkPolicyEnabled bool // antreaProxyEnabled indicates whether Antrea proxy is enabled. antreaProxyEnabled bool // statusManagerEnabled indicates whether a statusManager is configured. @@ -123,9 +125,12 @@ type Controller struct { queue workqueue.RateLimitingInterface // ruleCache maintains the desired state of NetworkPolicy rules. ruleCache *ruleCache - // reconciler provides interfaces to reconcile the desired state of + // podReconciler provides interfaces to reconcile the desired state of // NetworkPolicy rules with the actual state of Openflow entries. - reconciler Reconciler + podReconciler Reconciler + // nodeReconciler provides interfaces to reconcile the desired state of + // NetworkPolicy rules with the actual state of iptables entries. + nodeReconciler Reconciler // l7RuleReconciler provides interfaces to reconcile the desired state of // NetworkPolicy rules which have L7 rules with the actual state of Suricata rules. l7RuleReconciler L7RuleReconciler @@ -164,6 +169,7 @@ type Controller struct { // NewNetworkPolicyController returns a new *Controller. func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, ofClient openflow.Client, + routeClient route.Interface, ifaceStore interfacestore.InterfaceStore, fs afero.Fs, nodeName string, @@ -173,6 +179,7 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, groupIDUpdates <-chan string, antreaPolicyEnabled bool, l7NetworkPolicyEnabled bool, + nodeNetworkPolicyEnabled bool, antreaProxyEnabled bool, statusManagerEnabled bool, multicastEnabled bool, @@ -187,19 +194,20 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, podNetworkWait *utilwait.Group) (*Controller, error) { idAllocator := newIDAllocator(asyncRuleDeleteInterval, dnsInterceptRuleID) c := &Controller{ - antreaClientProvider: antreaClientGetter, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "networkpolicyrule"), - ofClient: ofClient, - nodeType: nodeType, - antreaPolicyEnabled: antreaPolicyEnabled, - l7NetworkPolicyEnabled: l7NetworkPolicyEnabled, - antreaProxyEnabled: antreaProxyEnabled, - statusManagerEnabled: statusManagerEnabled, - multicastEnabled: multicastEnabled, - gwPort: gwPort, - tunPort: tunPort, - nodeConfig: nodeConfig, - podNetworkWait: podNetworkWait.Increment(), + antreaClientProvider: antreaClientGetter, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "networkpolicyrule"), + ofClient: ofClient, + nodeType: nodeType, + antreaPolicyEnabled: antreaPolicyEnabled, + l7NetworkPolicyEnabled: l7NetworkPolicyEnabled, + nodeNetworkPolicyEnabled: nodeNetworkPolicyEnabled, + antreaProxyEnabled: antreaProxyEnabled, + statusManagerEnabled: statusManagerEnabled, + multicastEnabled: multicastEnabled, + gwPort: gwPort, + tunPort: tunPort, + nodeConfig: nodeConfig, + podNetworkWait: podNetworkWait.Increment(), } if l7NetworkPolicyEnabled { @@ -217,8 +225,12 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, c.ofClient.RegisterPacketInHandler(uint8(openflow.PacketInCategoryDNS), c.fqdnController) } } - c.reconciler = newReconciler(ofClient, ifaceStore, idAllocator, c.fqdnController, groupCounters, + c.podReconciler = newPodReconciler(ofClient, ifaceStore, idAllocator, c.fqdnController, groupCounters, v4Enabled, v6Enabled, antreaPolicyEnabled, multicastEnabled) + + if c.nodeNetworkPolicyEnabled { + c.nodeReconciler = newNodeReconciler(routeClient, v4Enabled, v6Enabled) + } c.ruleCache = newRuleCache(c.enqueueRule, podUpdateSubscriber, externalEntityUpdateSubscriber, groupIDUpdates, nodeType) serializer := protobuf.NewSerializer(scheme, scheme) @@ -289,7 +301,7 @@ func NewNetworkPolicyController(antreaClientGetter agent.AntreaClientProvider, klog.ErrorS(err, "Failed to store the NetworkPolicy to file", "policyName", policy.SourceRef.ToString()) } c.ruleCache.AddNetworkPolicy(policy) - klog.InfoS("NetworkPolicy applied to Pods on this Node", "policyName", policy.SourceRef.ToString()) + klog.InfoS("NetworkPolicy applied to Pods on this Node or the Node itself", "policyName", policy.SourceRef.ToString()) return nil }, UpdateFunc: func(obj runtime.Object) error { @@ -556,7 +568,7 @@ func (c *Controller) GetNetworkPolicyByRuleFlowID(ruleFlowID uint32) *v1beta2.Ne } func (c *Controller) GetRuleByFlowID(ruleFlowID uint32) *types.PolicyRule { - rule, exists, err := c.reconciler.GetRuleByFlowID(ruleFlowID) + rule, exists, err := c.podReconciler.GetRuleByFlowID(ruleFlowID) if err != nil { klog.Errorf("Error when getting network policy by rule flow ID: %v", err) return nil @@ -623,7 +635,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) { } klog.Infof("Starting IDAllocator worker to maintain the async rule cache") - go c.reconciler.RunIDAllocatorWorker(stopCh) + go c.podReconciler.RunIDAllocatorWorker(stopCh) if c.statusManagerEnabled { go c.statusManager.Run(stopCh) @@ -733,9 +745,15 @@ func (c *Controller) syncRule(key string) error { rule, effective, realizable := c.ruleCache.GetCompletedRule(key) if !effective { klog.V(2).InfoS("Rule was not effective, removing it", "ruleID", key) - if err := c.reconciler.Forget(key); err != nil { + // Uncertain whether this rule applies to Node or Pod, but it's safe to delete it redundantly. + if err := c.podReconciler.Forget(key); err != nil { return err } + if c.nodeNetworkPolicyEnabled { + if err := c.nodeReconciler.Forget(key); err != nil { + return err + } + } if c.statusManagerEnabled { // We don't know whether this is a rule owned by Antrea Policy, but // harmless to delete it. @@ -758,6 +776,11 @@ func (c *Controller) syncRule(key string) error { return nil } + var isNodeNetworkPolicy bool + if c.nodeNetworkPolicyEnabled { + isNodeNetworkPolicy = rule.isNodeNetworkPolicyRule() + } + if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 { // Allocate VLAN ID for the L7 rule. vlanID := c.l7VlanIDAllocator.allocate(key) @@ -768,12 +791,17 @@ func (c *Controller) syncRule(key string) error { } } - err := c.reconciler.Reconcile(rule) - if c.fqdnController != nil { - // No matter whether the rule reconciliation succeeds or not, fqdnController - // needs to be notified of the status. - klog.V(2).InfoS("Rule realization was done", "ruleID", key) - c.fqdnController.notifyRuleUpdate(key, err) + var err error + if isNodeNetworkPolicy { + err = c.nodeReconciler.Reconcile(rule) + } else { + err = c.podReconciler.Reconcile(rule) + if c.fqdnController != nil { + // No matter whether the rule reconciliation succeeds or not, fqdnController + // needs to be notified of the status. + klog.V(2).InfoS("Rule realization was done", "ruleID", key) + c.fqdnController.notifyRuleUpdate(key, err) + } } if err != nil { return err @@ -793,7 +821,7 @@ func (c *Controller) syncRules(keys []string) error { klog.V(4).Infof("Finished syncing all rules before bookmark event (%v)", time.Since(startTime)) }() - var allRules []*CompletedRule + var allPodRules, allNodeRules []*CompletedRule for _, key := range keys { rule, effective, realizable := c.ruleCache.GetCompletedRule(key) // It's normal that a rule is not effective on this Node but abnormal that it is not realizable after watchers @@ -803,7 +831,11 @@ func (c *Controller) syncRules(keys []string) error { } else if !realizable { klog.Errorf("Rule %s is effective but not realizable", key) } else { - if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 { + var isNodeNetworkPolicy bool + if c.nodeNetworkPolicyEnabled { + isNodeNetworkPolicy = rule.isNodeNetworkPolicyRule() + } + if c.l7NetworkPolicyEnabled && len(rule.L7Protocols) != 0 && !isNodeNetworkPolicy { // Allocate VLAN ID for the L7 rule. vlanID := c.l7VlanIDAllocator.allocate(key) rule.L7RuleVlanID = &vlanID @@ -812,14 +844,28 @@ func (c *Controller) syncRules(keys []string) error { return err } } - allRules = append(allRules, rule) + if isNodeNetworkPolicy { + allNodeRules = append(allNodeRules, rule) + } else { + allPodRules = append(allPodRules, rule) + } + } + } + if c.nodeNetworkPolicyEnabled { + if err := c.nodeReconciler.BatchReconcile(allNodeRules); err != nil { + return err } } - if err := c.reconciler.BatchReconcile(allRules); err != nil { + if err := c.podReconciler.BatchReconcile(allPodRules); err != nil { return err } if c.statusManagerEnabled { - for _, rule := range allRules { + for _, rule := range allPodRules { + if v1beta2.IsSourceAntreaNativePolicy(rule.SourceRef) { + c.statusManager.SetRuleRealization(rule.ID, rule.PolicyUID) + } + } + for _, rule := range allNodeRules { if v1beta2.IsSourceAntreaNativePolicy(rule.SourceRef) { c.statusManager.SetRuleRealization(rule.ID, rule.PolicyUID) } diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go index 58df524a48c..82012854bee 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller_test.go @@ -78,6 +78,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) { groupCounters := []proxytypes.GroupCounter{proxytypes.NewGroupCounter(groupIDAllocator, ch2)} fs := afero.NewMemMapFs() controller, _ := NewNetworkPolicyController(&antreaClientGetter{clientset}, + nil, nil, nil, fs, @@ -88,6 +89,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) { ch2, true, true, + false, true, true, false, @@ -102,7 +104,7 @@ func newTestController() (*Controller, *fake.Clientset, *mockReconciler) { &config.NodeConfig{}, wait.NewGroup()) reconciler := newMockReconciler() - controller.reconciler = reconciler + controller.podReconciler = reconciler controller.auditLogger = nil return controller, clientset, reconciler } diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_linux.go b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go new file mode 100644 index 00000000000..c99ea6b67cb --- /dev/null +++ b/pkg/agent/controller/networkpolicy/node_reconciler_linux.go @@ -0,0 +1,709 @@ +//go:build linux +// +build linux + +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkpolicy + +import ( + "fmt" + "net" + "sort" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" + + "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/route" + "antrea.io/antrea/pkg/agent/types" + "antrea.io/antrea/pkg/agent/util/iptables" + "antrea.io/antrea/pkg/apis/controlplane/v1beta2" + secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" + "antrea.io/antrea/pkg/util/ip" +) + +const ( + prefix = "ANTREA-POL" +) + +/* +NodeNetworkPolicy datapath implementation using iptables/ip6tables involves four components: +1. Core iptables rule: + - Added to ANTREA-POL-INGRESS-RULES (ingress) or ANTREA-POL-EGRESS-RULES (egress). + - Matches an ipset created for the NodeNetworkPolicy rule as source (ingress) or destination (egress) when there are + multiple IP addresses; if there is only one address, matches the address directly. + - Targets an action (the rule with single service) or a service chain created for the NodeNetworkPolicy rule (the rule + with multiple services). +2. Service iptables chain: + - Created for the NodeNetworkPolicy rule to integrate service iptables rules if a rule has multiple services. +3. Service iptables rules: + - Added to the service chain created for the NodeNetworkPolicy rule. + - Constructed from the services of the NodeNetworkPolicy rule. +4. From/To ipset: + - Created for the NodeNetworkPolicy rule, containing all source IP addresses (ingress) or destination IP addresses (egress). + +Assuming four ingress NodeNetworkPolicy rules with IDs 1111, 2222, 3333 and 4444 prioritized in descending order. +Core iptables rules organized by priorities in ANTREA-POL-INGRESS-RULES like the following. + +If the rule has multiple source IP addresses to match, then an ipset will be created for it. The name of the ipset consists +of prefix "ANTREA-POL", rule ID and IP protocol version. + +If the rule has multiple services, an iptables chain and related rules will be created for it. The name the chain consists +of prefix "ANTREA-POL" and rule ID. + +``` +:ANTREA-POL-INGRESS-RULES +-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-1111-4 src -j ANTREA-POL-1111 -m comment --comment "Antrea: for rule 1111, policy AntreaClusterNetworkPolicy:name1" +-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-2222-4 src -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule 2222, policy AntreaClusterNetworkPolicy:name2" +-A ANTREA-POL-INGRESS-RULES -s 3.3.3.3/32 src -j ANTREA-POL-3333 -m comment --comment "Antrea: for rule 3333, policy AntreaClusterNetworkPolicy:name3" +-A ANTREA-POL-INGRESS-RULES -s 4.4.4.4/32 -p tcp --dport 80 -j ACCEPT -m comment --comment "Antrea: for rule 4444, policy AntreaClusterNetworkPolicy:name4" +``` + +For the first rule, it has multiple services and multiple source IP addresses to match, so there will be service iptables chain +and service iptables rules and ipset created for it. + +The iptables chain is like the following: + +``` +:ANTREA-POL-1111 +-A ANTREA-POL-1111 -j ACCEPT -p tcp --dport 80 +-A ANTREA-POL-1111 -j ACCEPT -p tcp --dport 443 +``` + +The ipset is like the following: + +``` +Name: ANTREA-POL-1111-4 +Type: hash:net +Revision: 6 +Header: family inet hashsize 1024 maxelem 65536 +Size in memory: 472 +References: 1 +Number of entries: 2 +Members: +1.1.1.1 +1.1.1.2 +``` + +For the second rule, it has only one service, so there will be no service iptables chain and service iptables rules created +for it. The core rule will match the service and target the action directly. The rule has multiple source IP addresses to +match, so there will be an ipset `ANTREA-POL-2222-4` created for it. + +For the third rule, it has multiple services to match, so there will be service iptables chain and service iptables rules +created for it. The rule has only one source IP address to match, so there will be no ipset created for it and just match +the source IP address directly. + +For the fourth rule, it has only one service and one source IP address to match, so there will be no service iptables chain +and service iptables rules created for it. The core rule will match the service and source IP address and target the action +directly. +*/ + +// coreIPTRule is a struct to cache the core iptables rules to guarantee the order of iptables rules. +type coreIPTRule struct { + ruleID string + priority *types.Priority + ruleStr string +} + +// coreIPTChain caches the sorted iptables rules and for a chain. +type coreIPTChain struct { + rules []*coreIPTRule + sync.Mutex +} + +func newIPTChain() *coreIPTChain { + return &coreIPTChain{} +} + +// nodePolicyLastRealized is the struct cached by nodeReconciler. It's used to track the actual state of iptables rules +// and chains we have enforced, so that we can know how to reconcile a rule when it's updated/removed. +type nodePolicyLastRealized struct { + // ipsets tracks the last realized ipset names used in core iptables rules. It cannot coexist with ipNets. + ipsets map[iptables.Protocol]string + // ipNets tracks the last realized ipNet used in core iptables rules. It cannot coexist with ipsets. + ipNets map[iptables.Protocol]string + // serviceIPTChain tracks the last realized service iptables chain if multipleServices is true. + serviceIPTChain string + // coreIPTChain tracks the last realized iptables chain where the core iptables rule is installed. + coreIPTChain string +} + +func newNodePolicyLastRealized() *nodePolicyLastRealized { + return &nodePolicyLastRealized{ + ipsets: make(map[iptables.Protocol]string), + ipNets: make(map[iptables.Protocol]string), + } +} + +type nodeReconciler struct { + ipProtocols []iptables.Protocol + routeClient route.Interface + cachedCoreIPTChains map[string]*coreIPTChain + // lastRealizeds caches the last realized rules. It's a mapping from ruleID to *nodePolicyLastRealized. + lastRealizeds sync.Map +} + +func newNodeReconciler(routeClient route.Interface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler { + var ipProtocols []iptables.Protocol + cachedCoreIPTChains := make(map[string]*coreIPTChain) + if ipv4Enabled { + ipProtocols = append(ipProtocols, iptables.ProtocolIPv4) + cachedCoreIPTChains[genCacheCategory(config.NodeNetworkPolicyIngressRulesChain, false)] = newIPTChain() + cachedCoreIPTChains[genCacheCategory(config.NodeNetworkPolicyEgressRulesChain, false)] = newIPTChain() + } + if ipv6Enabled { + ipProtocols = append(ipProtocols, iptables.ProtocolIPv6) + cachedCoreIPTChains[genCacheCategory(config.NodeNetworkPolicyIngressRulesChain, true)] = newIPTChain() + cachedCoreIPTChains[genCacheCategory(config.NodeNetworkPolicyEgressRulesChain, true)] = newIPTChain() + } + return &nodeReconciler{ + ipProtocols: ipProtocols, + routeClient: routeClient, + cachedCoreIPTChains: cachedCoreIPTChains, + } +} + +// Reconcile checks whether the provided rule have been enforced or not, and invoke the add or update method accordingly. +func (r *nodeReconciler) Reconcile(rule *CompletedRule) error { + klog.InfoS("Reconciling Node NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString()) + + value, exists := r.lastRealizeds.Load(rule.ID) + var err error + if !exists { + err = r.add(rule) + } else { + err = r.update(value.(*nodePolicyLastRealized), rule) + } + return err +} + +func (r *nodeReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) { + +} + +func (r *nodeReconciler) BatchReconcile(rules []*CompletedRule) error { + var rulesToInstall []*CompletedRule + for _, rule := range rules { + if _, exists := r.lastRealizeds.Load(rule.ID); exists { + klog.ErrorS(nil, "Rule should not have been realized yet: initialization phase", "rule", rule.ID) + } else { + rulesToInstall = append(rulesToInstall, rule) + } + } + if err := r.batchAdd(rulesToInstall); err != nil { + return err + } + return nil +} + +func (r *nodeReconciler) batchAdd(rules []*CompletedRule) error { + lastRealizeds := make(map[string]*nodePolicyLastRealized) + serviceIPTChains := make(map[iptables.Protocol][]string) + serviceIPTRules := make(map[iptables.Protocol][][]string) + ingressCoreIPTRules := make(map[iptables.Protocol][]*coreIPTRule) + egressCoreIPTRules := make(map[iptables.Protocol][]*coreIPTRule) + + for _, rule := range rules { + iptRules, lastRealized := r.computeIPTRules(rule) + ruleID := rule.ID + for ipProtocol, iptRule := range iptRules { + // Sync all ipsets. + if iptRule.IPSet != "" { + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil { + return err + } + } + // Collect all service iptables rules and chains. + if iptRule.ServiceIPTChain != "" { + serviceIPTChains[ipProtocol] = append(serviceIPTChains[ipProtocol], iptRule.ServiceIPTChain) + serviceIPTRules[ipProtocol] = append(serviceIPTRules[ipProtocol], iptRule.ServiceIPTRules) + } + + // Collect all core iptables rules. + coreIPTRule := &coreIPTRule{ruleID, iptRule.Priority, iptRule.CoreIPTRule} + if rule.Direction == v1beta2.DirectionIn { + ingressCoreIPTRules[ipProtocol] = append(ingressCoreIPTRules[ipProtocol], coreIPTRule) + } else { + egressCoreIPTRules[ipProtocol] = append(egressCoreIPTRules[ipProtocol], coreIPTRule) + } + } + lastRealizeds[ruleID] = lastRealized + } + for _, ipProtocol := range r.ipProtocols { + isIPv6 := iptables.IsIPv6Protocol(ipProtocol) + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables(serviceIPTChains[ipProtocol], serviceIPTRules[ipProtocol], isIPv6); err != nil { + return err + } + if err := r.addOrUpdateCoreIPTRules(ingressCoreIPTRules[ipProtocol], config.NodeNetworkPolicyIngressRulesChain, isIPv6, false); err != nil { + return err + } + if err := r.addOrUpdateCoreIPTRules(egressCoreIPTRules[ipProtocol], config.NodeNetworkPolicyEgressRulesChain, isIPv6, false); err != nil { + return err + } + } + + for ruleID, lastRealized := range lastRealizeds { + r.lastRealizeds.Store(ruleID, lastRealized) + } + return nil +} + +func (r *nodeReconciler) Forget(ruleID string) error { + klog.InfoS("Forgetting rule", "rule", ruleID) + + value, exists := r.lastRealizeds.Load(ruleID) + if !exists { + return nil + } + + lastRealized := value.(*nodePolicyLastRealized) + coreIPTChain := lastRealized.coreIPTChain + + for _, ipProtocol := range r.ipProtocols { + isIPv6 := iptables.IsIPv6Protocol(ipProtocol) + if err := r.deleteCoreIPRule(ruleID, coreIPTChain, isIPv6); err != nil { + return err + } + if lastRealized.ipsets[ipProtocol] != "" { + if err := r.routeClient.DeleteNodeNetworkPolicyIPSet(lastRealized.ipsets[ipProtocol], isIPv6); err != nil { + return err + } + } + if lastRealized.serviceIPTChain != "" { + if err := r.routeClient.DeleteNodeNetworkPolicyIPTables([]string{lastRealized.serviceIPTChain}, isIPv6); err != nil { + return err + } + } + } + + r.lastRealizeds.Delete(ruleID) + return nil +} + +func (r *nodeReconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) { + return nil, false, nil +} + +func (r *nodeReconciler) computeIPTRules(rule *CompletedRule) (map[iptables.Protocol]*types.NodePolicyRule, *nodePolicyLastRealized) { + ruleID := rule.ID + lastRealized := newNodePolicyLastRealized() + priority := genPriority(rule) + + var serviceIPTChain, serviceIPTRuleTarget, coreIPTRuleTarget string + var service *v1beta2.Service + if len(rule.Services) > 1 { + // If a rule has multiple services, create a chain to install iptables rules for these services, with the target + // of the services determined by the rule's action. The core iptables rule should target the chain. + serviceIPTChain = genServiceIPTRuleChain(ruleID) + serviceIPTRuleTarget = ruleActionToIPTTarget(rule.Action) + coreIPTRuleTarget = serviceIPTChain + lastRealized.serviceIPTChain = serviceIPTChain + } else { + // If a rule has no or single service, the target is determined by the rule's action, as there is no need to create + // a chain for a single-service iptables rule. + coreIPTRuleTarget = ruleActionToIPTTarget(rule.Action) + // If a rule has single service, the core iptables rule directly incorporates the service. + if len(rule.Services) == 1 { + service = &rule.Services[0] + } + } + coreIPTChain := getCoreIPTChain(rule) + coreIPTRuleComment := genCoreIPTRuleComment(ruleID, rule.SourceRef.ToString()) + lastRealized.coreIPTChain = coreIPTChain + + nodePolicyRules := make(map[iptables.Protocol]*types.NodePolicyRule) + for _, ipProtocol := range r.ipProtocols { + isIPv6 := iptables.IsIPv6Protocol(ipProtocol) + + var serviceIPTRules []string + if serviceIPTChain != "" { + serviceIPTRules = buildServiceIPTRules(ipProtocol, rule.Services, serviceIPTChain, serviceIPTRuleTarget) + } + + ipNets := getIPNetsFromRule(rule, isIPv6) + var ipNet string + var ipset string + if ipNets.Len() > 1 { + // If a rule matches multiple source or destination ipNets, create an ipset and use it in core iptables rule. + ipset = genIPSetName(ruleID, isIPv6) + lastRealized.ipsets[ipProtocol] = ipset + } else if ipNets.Len() == 1 { + // If a rule matches single source or destination, use it in core iptables rule directly. + ipNet, _ = ipNets.PopAny() + lastRealized.ipNets[ipProtocol] = ipNet + } + + coreIPTRule := buildCoreIPTRule(ipProtocol, + coreIPTChain, + ipset, + ipNet, + coreIPTRuleTarget, + coreIPTRuleComment, + service, + rule.Direction == v1beta2.DirectionIn) + + nodePolicyRules[ipProtocol] = &types.NodePolicyRule{ + IPSet: ipset, + IPSetMembers: ipNets, + IPNet: ipNet, + Priority: priority, + ServiceIPTChain: serviceIPTChain, + ServiceIPTRules: serviceIPTRules, + CoreIPTChain: coreIPTChain, + CoreIPTRule: coreIPTRule, + IsIPv6: isIPv6, + } + } + + return nodePolicyRules, lastRealized +} + +func (r *nodeReconciler) add(rule *CompletedRule) error { + klog.V(2).InfoS("Adding new rule", "rule", rule) + ruleID := rule.ID + iptRules, lastRealized := r.computeIPTRules(rule) + for _, iptRule := range iptRules { + if iptRule.IPSet != "" { + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil { + return err + } + } + if iptRule.ServiceIPTChain != "" { + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptRule.ServiceIPTChain}, [][]string{iptRule.ServiceIPTRules}, iptRule.IsIPv6); err != nil { + return err + } + } + if err := r.addOrUpdateCoreIPTRules([]*coreIPTRule{{ruleID, iptRule.Priority, iptRule.CoreIPTRule}}, iptRule.CoreIPTChain, iptRule.IsIPv6, false); err != nil { + return err + } + } + r.lastRealizeds.Store(ruleID, lastRealized) + return nil +} + +func (r *nodeReconciler) update(lastRealized *nodePolicyLastRealized, newRule *CompletedRule) error { + klog.V(2).InfoS("Updating existing rule", "rule", newRule) + ruleID := newRule.ID + newIPTRules, newLastRealized := r.computeIPTRules(newRule) + + for _, ipProtocol := range r.ipProtocols { + iptRule := newIPTRules[ipProtocol] + + prevIPNet := lastRealized.ipNets[ipProtocol] + ipNet := newLastRealized.ipNets[ipProtocol] + prevIPSet := lastRealized.ipsets[ipProtocol] + ipset := newLastRealized.ipsets[ipProtocol] + + if ipset != "" { + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPSet(iptRule.IPSet, iptRule.IPSetMembers, iptRule.IsIPv6); err != nil { + return err + } + } + if lastRealized.ipsets[ipProtocol] != "" && ipset == "" { + if err := r.routeClient.DeleteNodeNetworkPolicyIPSet(lastRealized.ipsets[ipProtocol], iptRule.IsIPv6); err != nil { + return err + } + } + if prevIPSet != ipset || prevIPNet != ipNet { + if err := r.addOrUpdateCoreIPTRules([]*coreIPTRule{{ruleID, iptRule.Priority, iptRule.CoreIPTRule}}, iptRule.CoreIPTChain, iptRule.IsIPv6, true); err != nil { + return err + } + } + } + + r.lastRealizeds.Store(ruleID, newLastRealized) + return nil +} + +func (r *nodeReconciler) addOrUpdateCoreIPTRules(iptRules []*coreIPTRule, iptChain string, isIPv6 bool, isUpdate bool) error { + if len(iptRules) == 0 { + return nil + } + + cachedCoreIPTChain := r.getCachedCoreIPTChain(iptChain, isIPv6) + cachedCoreIPTChain.Lock() + defer cachedCoreIPTChain.Unlock() + + cachedIPTRules := cachedCoreIPTChain.rules + if isUpdate { + // Build a map to store the mapping of rule ID to rule to add. + iptRulesToUpdate := make(map[string]*coreIPTRule) + for _, iptRule := range iptRules { + iptRulesToUpdate[iptRule.ruleID] = iptRule + } + // Iterate every existing rules. If an existing rule is in the iptRulesToUpdate map, replace it with the new rule. + for index, iptRule := range cachedIPTRules { + if _, exists := iptRulesToUpdate[iptRule.ruleID]; exists { + cachedIPTRules[index] = iptRulesToUpdate[iptRule.ruleID] + } + } + } else { + // If these are new rules, append the new rules then sort all rules. + cachedIPTRules = append(cachedIPTRules, iptRules...) + sort.Slice(cachedIPTRules, func(i, j int) bool { + return !cachedIPTRules[i].priority.Less(*cachedIPTRules[j].priority) + }) + } + + // Get all the sorted iptables rules and synchronize them. + var iptRuleStrs []string + for _, r := range cachedIPTRules { + iptRuleStrs = append(iptRuleStrs, r.ruleStr) + } + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptChain}, [][]string{iptRuleStrs}, isIPv6); err != nil { + return err + } + + // cache the new iptables rules. + cachedCoreIPTChain.rules = cachedIPTRules + return nil +} + +func (r *nodeReconciler) deleteCoreIPRule(ruleID string, iptChain string, isIPv6 bool) error { + cachedCoreIPTChain := r.getCachedCoreIPTChain(iptChain, isIPv6) + cachedCoreIPTChain.Lock() + defer cachedCoreIPTChain.Unlock() + + // Get all cached iptables rules, then delete the rule with the given ruleID. + cachedIPTRules := cachedCoreIPTChain.rules + var indexToDelete int + var indexExists bool + for i := 0; i < len(cachedIPTRules); i++ { + if cachedIPTRules[i].ruleID == ruleID { + indexToDelete = i + indexExists = true + break + } + } + if !indexExists { + return nil + } + + cachedIPTRules = append(cachedIPTRules[:indexToDelete], cachedIPTRules[indexToDelete+1:]...) + + // Get all the sorted iptables rules and synchronize them. + var iptRuleStrs []string + for _, r := range cachedIPTRules { + iptRuleStrs = append(iptRuleStrs, r.ruleStr) + } + if err := r.routeClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{iptChain}, [][]string{iptRuleStrs}, isIPv6); err != nil { + return err + } + + // cache the new iptables rules. + cachedCoreIPTChain.rules = cachedIPTRules + return nil +} + +func (r *nodeReconciler) getCachedCoreIPTChain(iptChain string, isIPv6 bool) *coreIPTChain { + // There are 4 categories of cached core iptables rules: + // - For IPv4, iptables rules installed in chain ANTREA-INGRESS-RULES for ingress rules. + // - For IPv6, ip6tables rules installed in chain ANTREA-INGRESS-RULES for ingress rules. + // - For IPv4, iptables rules installed in chain ANTREA-EGRESS-RULES for egress rules. + // - For IPv6, ip6tables rules installed in chain ANTREA-EGRESS-RULES for egress rules. + categoryKey := genCacheCategory(iptChain, isIPv6) + return r.cachedCoreIPTChains[categoryKey] +} + +func groupMembersToIPNets(groups v1beta2.GroupMemberSet, isIPv6 bool) sets.Set[string] { + ipNets := sets.New[string]() + suffix := "/32" + if isIPv6 { + suffix = "/128" + } + for _, member := range groups { + for _, ip := range member.IPs { + ipAddr := net.IP(ip) + if isIPv6 == utilnet.IsIPv6(ipAddr) { + ipNets.Insert(ipAddr.String() + suffix) + } + } + } + return ipNets +} + +func ipBlocksToIPNets(ipBlocks []v1beta2.IPBlock, isIPv6 bool) sets.Set[string] { + ipNets := sets.New[string]() + for _, b := range ipBlocks { + blockCIDR := ip.IPNetToNetIPNet(&b.CIDR) + if isIPv6 != utilnet.IsIPv6CIDR(blockCIDR) { + continue + } + exceptIPNets := make([]*net.IPNet, 0, len(b.Except)) + for i := range b.Except { + c := b.Except[i] + except := ip.IPNetToNetIPNet(&c) + exceptIPNets = append(exceptIPNets, except) + } + diffCIDRs, err := ip.DiffFromCIDRs(blockCIDR, exceptIPNets) + if err != nil { + klog.ErrorS(err, "Error when computing effective CIDRs by removing except IPNets from IPBlock") + continue + } + for _, d := range diffCIDRs { + ipNets.Insert(d.String()) + } + } + return ipNets +} + +func getIPNetsFromRule(rule *CompletedRule, isIPv6 bool) sets.Set[string] { + var set sets.Set[string] + if rule.Direction == v1beta2.DirectionIn { + set = groupMembersToIPNets(rule.FromAddresses, isIPv6) + set = set.Union(ipBlocksToIPNets(rule.From.IPBlocks, isIPv6)) + } else { + set = groupMembersToIPNets(rule.ToAddresses, isIPv6) + set = set.Union(ipBlocksToIPNets(rule.To.IPBlocks, isIPv6)) + } + if set.Has("0.0.0.0/0") || set.Has("::/0") { + return nil + } + return set +} + +func getCoreIPTChain(rule *CompletedRule) string { + if rule.Direction == v1beta2.DirectionIn { + return config.NodeNetworkPolicyIngressRulesChain + } + return config.NodeNetworkPolicyEgressRulesChain +} + +func buildCoreIPTRule(ipProtocol iptables.Protocol, + iptChain string, + ipset string, + ipNet string, + iptRuleTarget string, + iptRuleComment string, + service *v1beta2.Service, + isIngress bool) string { + builder := iptables.NewRuleBuilder(iptChain) + if isIngress { + if ipset != "" { + builder = builder.MatchIPSetSrc(ipset) + } else if ipNet != "" { + builder = builder.MatchCIDRSrc(ipNet) + } + } else { + if ipset != "" { + builder = builder.MatchIPSetDst(ipset) + } else if ipNet != "" { + builder = builder.MatchCIDRDst(ipNet) + } + } + if service != nil { + transProtocol := getServiceTransProtocol(service.Protocol) + switch transProtocol { + case "tcp": + fallthrough + case "udp": + fallthrough + case "sctp": + builder = builder.MatchTransProtocol(transProtocol). + MatchSrcPort(service.SrcPort, service.SrcEndPort). + MatchDstPort(service.Port, service.EndPort) + case "icmp": + builder = builder.MatchICMP(service.ICMPType, service.ICMPCode, ipProtocol) + } + } + return builder.SetTarget(iptRuleTarget). + SetComment(iptRuleComment). + Done(). + GetRule() +} + +func buildServiceIPTRules(ipProtocol iptables.Protocol, services []v1beta2.Service, iptChain string, iptRuleTarget string) []string { + var rules []string + builder := iptables.NewRuleBuilder(iptChain) + for _, svc := range services { + copiedBuilder := builder.CopyBuilder() + transProtocol := getServiceTransProtocol(svc.Protocol) + switch transProtocol { + case "tcp": + fallthrough + case "udp": + fallthrough + case "sctp": + copiedBuilder = copiedBuilder.MatchTransProtocol(transProtocol). + MatchSrcPort(svc.SrcPort, svc.SrcEndPort). + MatchDstPort(svc.Port, svc.EndPort) + case "icmp": + copiedBuilder = copiedBuilder.MatchICMP(svc.ICMPType, svc.ICMPCode, ipProtocol) + } + rules = append(rules, copiedBuilder.SetTarget(iptRuleTarget). + Done(). + GetRule()) + } + return rules +} + +func genServiceIPTRuleChain(ruleID string) string { + return fmt.Sprintf("%s-%s", prefix, strings.ToUpper(ruleID)) +} + +func genIPSetName(ruleID string, isIPv6 bool) string { + suffix := "4" + if isIPv6 { + suffix = "6" + } + return fmt.Sprintf("%s-%s-%s", prefix, strings.ToUpper(ruleID), suffix) +} + +func ruleActionToIPTTarget(ruleAction *secv1beta1.RuleAction) string { + var target string + switch *ruleAction { + case secv1beta1.RuleActionDrop: + target = iptables.DropTarget + case secv1beta1.RuleActionReject: + target = iptables.RejectTarget + case secv1beta1.RuleActionAllow: + target = iptables.AcceptTarget + default: + klog.InfoS("Unknown rule action", "action", ruleAction) + } + return target +} + +func getServiceTransProtocol(protocol *v1beta2.Protocol) string { + if protocol == nil { + return "tcp" + } + return strings.ToLower(string(*protocol)) +} + +func genPriority(rule *CompletedRule) *types.Priority { + if rule == nil { + return nil + } + return &types.Priority{ + TierPriority: *rule.TierPriority, + PolicyPriority: *rule.PolicyPriority, + RulePriority: rule.Priority, + } +} + +func genCoreIPTRuleComment(ruleID, policyName string) string { + return fmt.Sprintf("Antrea: for rule %s, policy %s", ruleID, policyName) +} + +func genCacheCategory(chain string, isIPv6 bool) string { + if isIPv6 { + return fmt.Sprintf("%s_6", chain) + } + return fmt.Sprintf("%s_4", chain) +} diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go b/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go new file mode 100644 index 00000000000..703a41dfacf --- /dev/null +++ b/pkg/agent/controller/networkpolicy/node_reconciler_linux_test.go @@ -0,0 +1,1053 @@ +//go:build linux +// +build linux + +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkpolicy + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + "k8s.io/apimachinery/pkg/util/sets" + + routetest "antrea.io/antrea/pkg/agent/route/testing" + "antrea.io/antrea/pkg/apis/controlplane/v1beta2" + secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" +) + +var ( + ruleActionAllow = secv1beta1.RuleActionAllow + + ipv4Net1 = newCIDR("192.168.1.0/24") + ipv6Net1 = newCIDR("fec0::192:168:1:0/124") + ipv4Net2 = newCIDR("192.168.1.128/25") + ipv6Net2 = newCIDR("fec0::192:168:1:1/125") + ipBlocks = v1beta2.NetworkPolicyPeer{ + IPBlocks: []v1beta2.IPBlock{ + { + CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(ipv4Net1.IP), PrefixLength: 24}, + Except: []v1beta2.IPNet{ + {IP: v1beta2.IPAddress(ipv4Net2.IP), PrefixLength: 25}, + }, + }, + { + CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(ipv6Net1.IP), PrefixLength: 124}, + Except: []v1beta2.IPNet{ + {IP: v1beta2.IPAddress(ipv6Net2.IP), PrefixLength: 125}, + }, + }, + }, + } + ipBlocksToMatchAny = v1beta2.NetworkPolicyPeer{ + IPBlocks: []v1beta2.IPBlock{ + { + CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(net.IPv4zero), PrefixLength: 0}, + }, + { + CIDR: v1beta2.IPNet{IP: v1beta2.IPAddress(net.IPv4zero), PrefixLength: 0}, + }, + }, + } + + policyPriority1 = float64(1) + tierPriority1 = int32(1) + tierPriority2 = int32(2) + + ingressRuleID1 = "ingressRule1" + ingressRuleID2 = "ingressRule2" + ingressRuleID3 = "ingressRule3" + egressRuleID1 = "egressRule1" + egressRuleID2 = "egressRule2" + ingressRule1 = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID1, + Name: "rule-01", + PolicyName: "ingress-policy", + From: ipBlocks, + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP80, serviceTCP443}, + Action: &ruleActionAllow, + Priority: 1, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority1, + SourceRef: &cnp1, + }, + FromAddresses: dualAddressGroup1, + ToAddresses: nil, + } + ingressRule2 = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID2, + Name: "rule-02", + PolicyName: "ingress-policy", + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP443}, + Action: &ruleActionAllow, + Priority: 2, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: dualAddressGroup1, + ToAddresses: nil, + } + ingressRule3 = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID3, + Name: "rule-03", + PolicyName: "ingress-policy", + From: ipBlocksToMatchAny, + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP8080}, + Action: &ruleActionAllow, + Priority: 3, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: nil, + ToAddresses: nil, + } + updatedIngressRule3WithOneFromAddress = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID3, + Name: "rule-03", + PolicyName: "ingress-policy", + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP8080}, + Action: &ruleActionAllow, + Priority: 3, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: addressGroup1, + ToAddresses: nil, + } + updatedIngressRule3WithAnotherFromAddress = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID3, + Name: "rule-03", + PolicyName: "ingress-policy", + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP8080}, + Action: &ruleActionAllow, + Priority: 3, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: addressGroup2, + ToAddresses: nil, + } + updatedIngressRule3WithMultipleFromAddresses = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID3, + Name: "rule-03", + PolicyName: "ingress-policy", + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP8080}, + Action: &ruleActionAllow, + Priority: 3, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: addressGroup2.Union(addressGroup1), + ToAddresses: nil, + } + updatedIngressRule3WithOtherMultipleFromAddresses = &CompletedRule{ + rule: &rule{ + ID: ingressRuleID3, + Name: "rule-03", + PolicyName: "ingress-policy", + Direction: v1beta2.DirectionIn, + Services: []v1beta2.Service{serviceTCP8080}, + Action: &ruleActionAllow, + Priority: 3, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + FromAddresses: addressGroup2.Union(v1beta2.NewGroupMemberSet(newAddressGroupMember("1.1.1.3"))), + ToAddresses: nil, + } + egressRule1 = &CompletedRule{ + rule: &rule{ + ID: egressRuleID1, + Name: "rule-01", + PolicyName: "egress-policy", + Direction: v1beta2.DirectionOut, + Services: []v1beta2.Service{serviceTCP80, serviceTCP443}, + Action: &ruleActionAllow, + Priority: 1, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority1, + SourceRef: &cnp1, + }, + ToAddresses: dualAddressGroup1, + FromAddresses: nil, + } + egressRule2 = &CompletedRule{ + rule: &rule{ + ID: egressRuleID2, + Name: "rule-02", + PolicyName: "egress-policy", + Direction: v1beta2.DirectionOut, + Services: []v1beta2.Service{serviceTCP443}, + Action: &ruleActionAllow, + Priority: 2, + PolicyPriority: &policyPriority1, + TierPriority: &tierPriority2, + SourceRef: &cnp1, + }, + ToAddresses: dualAddressGroup1, + FromAddresses: nil, + } +) + +func newTestNodeReconciler(mockRouteClient *routetest.MockInterface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler { + return newNodeReconciler(mockRouteClient, ipv4Enabled, ipv6Enabled) +} + +func TestNodeReconcilerReconcileAndForget(t *testing.T) { + tests := []struct { + name string + rulesToAdd []*CompletedRule + rulesToForget []string + ipv4Enabled bool + ipv6Enabled bool + expectedCalls func(mockRouteClient *routetest.MockInterfaceMockRecorder) + }{ + { + name: "IPv4, add an ingress rule, then forget it", + ipv4Enabled: true, + ipv6Enabled: false, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + serviceRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule1, + }, + rulesToForget: []string{ + ingressRuleID1, + }, + }, + { + name: "IPv6, add an egress rule, then forget it", + ipv4Enabled: false, + ipv6Enabled: true, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + serviceRules := [][]string{ + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, serviceRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESS-RULES"}, coreRules, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESS-RULES"}, [][]string{nil}, true).Times(1) + }, + rulesToAdd: []*CompletedRule{ + egressRule1, + }, + rulesToForget: []string{ + egressRuleID1, + }, + }, + { + name: "Dualstack, add an ingress rule, then forget it", + ipv4Enabled: true, + ipv6Enabled: true, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + serviceRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRulesIPv4 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesIPv6 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesIPv4, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesIPv6, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, true).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule1, + }, + rulesToForget: []string{ + ingressRuleID1, + }, + }, + { + name: "IPv4, add multiple ingress rules whose priorities are in ascending order, then forget some", + ipv4Enabled: true, + ipv6Enabled: false, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + serviceRules1 := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRules1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDeleted3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDelete2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDeleted3, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete2, false).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + ingressRule3, + }, + rulesToForget: []string{ + ingressRuleID3, + ingressRuleID2, + }, + }, + { + name: "IPv4, add multiple ingress rules whose priorities are in descending order, then forget some", + ipv4Enabled: true, + ipv6Enabled: false, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreRules3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + serviceRules1 := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRules1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDelete3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDelete1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete3, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete1, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule3, + ingressRule2, + ingressRule1, + }, + rulesToForget: []string{ + ingressRuleID3, + ingressRuleID1, + }, + }, + { + name: "IPv4, add multiple ingress rules whose priorities are in random order, then forget some", + ipv4Enabled: true, + ipv6Enabled: false, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreRules2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + serviceRules1 := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + coreRules1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDelete2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRulesDelete1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, serviceRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete2, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRulesDelete1, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule2, + ingressRule1, + ingressRule3, + }, + rulesToForget: []string{ + ingressRuleID2, + ingressRuleID1, + }, + }, + { + name: "IPv4, add an ingress rule, then update it several times, forget it finally", + ipv4Enabled: true, + ipv6Enabled: false, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreRules1 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules2 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules3 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.2/32 -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules4 := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE3-4 src -p tcp --dport 8080 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule3, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + coreRules6 := coreRules2 + coreRules7 := coreRules1 + coreRules8 := coreRules4 + coreRules9 := coreRules1 + + s1 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules1, false).Times(1) + s2 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules2, false).Times(1) + s3 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules3, false).Times(1) + s41 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.1/32", "1.1.1.2/32"), false).Times(1) + s42 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules4, false).Times(1) + s5 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.2/32", "1.1.1.3/32"), false).Times(1) + s61 := mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", false).Times(1) + s62 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules6, false).Times(1) + s7 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules7, false).Times(1) + s81 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", sets.New[string]("1.1.1.1/32", "1.1.1.2/32"), false).Times(1) + s82 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules8, false).Times(1) + s91 := mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE3-4", false).Times(1) + s92 := mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, coreRules9, false).Times(1) + s2.After(s1) + s3.After(s2) + s41.After(s3) + s42.After(s3) + s5.After(s42) + s5.After(s42) + s61.After(s5) + s62.After(s5) + s7.After(s62) + s81.After(s7) + s82.After(s7) + s91.After(s82) + s92.After(s82) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESS-RULES"}, [][]string{nil}, false).Times(1) + }, + rulesToAdd: []*CompletedRule{ + ingressRule3, + updatedIngressRule3WithOneFromAddress, + updatedIngressRule3WithAnotherFromAddress, + updatedIngressRule3WithMultipleFromAddresses, + updatedIngressRule3WithOtherMultipleFromAddresses, + updatedIngressRule3WithOneFromAddress, + ingressRule3, + updatedIngressRule3WithMultipleFromAddresses, + ingressRule3, + }, + rulesToForget: []string{ + ingressRuleID3, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := gomock.NewController(t) + mockRouteClient := routetest.NewMockInterface(controller) + r := newTestNodeReconciler(mockRouteClient, tt.ipv4Enabled, tt.ipv6Enabled) + + tt.expectedCalls(mockRouteClient.EXPECT()) + for _, rule := range tt.rulesToAdd { + assert.NoError(t, r.Reconcile(rule)) + } + for _, rule := range tt.rulesToForget { + assert.NoError(t, r.Forget(rule)) + } + }) + } +} + +func TestNodeReconcilerBatchReconcileAndForget(t *testing.T) { + tests := []struct { + name string + ipv4Enabled bool + ipv6Enabled bool + rulesToAdd []*CompletedRule + rulesToForget []string + expectedCalls func(mockRouteClient *routetest.MockInterfaceMockRecorder) + }{ + { + name: "IPv4, add ingress rules in batch, then forget one", + ipv4Enabled: true, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + }, + rulesToForget: []string{ + ingressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-INGRESS-RULES", + } + coreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-INGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + updatedCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, false).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1) + }, + }, + { + name: "IPv6, add ingress rules in batch, then forget one", + ipv6Enabled: true, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + }, + rulesToForget: []string{ + ingressRuleID2, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-INGRESS-RULES", + } + coreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-INGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + updatedCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, true).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, true).Times(1) + }, + }, + { + name: "dualstack, add ingress rules in batch, then forget one", + ipv4Enabled: true, + ipv6Enabled: true, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + }, + rulesToForget: []string{ + ingressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-INGRESS-RULES", + } + ipv4CoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + ipv6CoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-INGRESSRULE1", + } + ipv4SvcRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + ipv6SvcRules := ipv4SvcRules + updatedIPv4CoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedIPv6CoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv4SvcRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv4CoreRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv6SvcRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv6CoreRules, true).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv4CoreRules, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv6CoreRules, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1) + }, + }, + { + name: "IPv4, add egress rules in batch, then forget one", + ipv4Enabled: true, + rulesToAdd: []*CompletedRule{ + egressRule1, + egressRule2, + }, + rulesToForget: []string{ + egressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-EGRESS-RULES", + } + coreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-EGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + updatedCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, false).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1) + }, + }, + { + name: "IPv6, add egress rules in batch, then forget one", + ipv6Enabled: true, + rulesToAdd: []*CompletedRule{ + egressRule1, + egressRule2, + }, + rulesToForget: []string{ + egressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-EGRESS-RULES", + } + coreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-EGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + updatedCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, coreRules, true).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedCoreRules, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, true).Times(1) + }, + }, + { + name: "dualstack, only add egress rules, then forget one", + ipv4Enabled: true, + ipv6Enabled: true, + rulesToAdd: []*CompletedRule{ + egressRule1, + egressRule2, + }, + rulesToForget: []string{ + egressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + coreChains := []string{ + "ANTREA-POL-EGRESS-RULES", + } + ipv4CoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + ipv6CoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + svcChains := []string{ + "ANTREA-POL-EGRESSRULE1", + } + ipv4SvcRules := [][]string{ + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + ipv6SvcRules := ipv4SvcRules + updatedIPv4CoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedIPv6CoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv4SvcRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv4CoreRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, ipv6SvcRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, ipv6CoreRules, true).Times(1) + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv4CoreRules, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(coreChains, updatedIPv6CoreRules, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables(svcChains, true).Times(1) + }, + }, + { + name: "IPv4, add ingress and egress rules in batch, then forget some rules", + ipv4Enabled: true, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + egressRule1, + egressRule2, + }, + rulesToForget: []string{ + ingressRuleID1, + egressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + svcChains := []string{ + "ANTREA-POL-INGRESSRULE1", + "ANTREA-POL-EGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + ingressCoreChains := []string{"ANTREA-POL-INGRESS-RULES"} + egressCoreChains := []string{"ANTREA-POL-EGRESS-RULES"} + ingressCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-4 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + egressCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedIngressCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedEgressCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 1.1.1.1/32 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", sets.New[string]("1.1.1.1/32", "192.168.1.0/25"), false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, ingressCoreRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, egressCoreRules, false).Times(1) + + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-4", false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, false).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, updatedIngressCoreRules, false).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, updatedEgressCoreRules, false).Times(1) + }, + }, + { + name: "IPv6, add ingress and egress rules in batch, then forget some rules", + ipv6Enabled: true, + rulesToAdd: []*CompletedRule{ + ingressRule1, + ingressRule2, + egressRule1, + egressRule2, + }, + rulesToForget: []string{ + ingressRuleID1, + egressRuleID1, + }, + expectedCalls: func(mockRouteClient *routetest.MockInterfaceMockRecorder) { + svcChains := []string{ + "ANTREA-POL-INGRESSRULE1", + "ANTREA-POL-EGRESSRULE1", + } + svcRules := [][]string{ + { + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-INGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + { + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-EGRESSRULE1 -p tcp --dport 443 -j ACCEPT", + }, + } + ingressCoreChains := []string{"ANTREA-POL-INGRESS-RULES"} + egressCoreChains := []string{"ANTREA-POL-EGRESS-RULES"} + ingressCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -m set --match-set ANTREA-POL-INGRESSRULE1-6 src -j ANTREA-POL-INGRESSRULE1 -m comment --comment "Antrea: for rule ingressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + egressCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -j ANTREA-POL-EGRESSRULE1 -m comment --comment "Antrea: for rule egressRule1, policy AntreaClusterNetworkPolicy:name1"`, + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedIngressCoreRules := [][]string{ + { + `-A ANTREA-POL-INGRESS-RULES -s 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule ingressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + updatedEgressCoreRules := [][]string{ + { + `-A ANTREA-POL-EGRESS-RULES -d 2002:1a23:fb44::1/128 -p tcp --dport 443 -j ACCEPT -m comment --comment "Antrea: for rule egressRule2, policy AntreaClusterNetworkPolicy:name1"`, + }, + } + + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", sets.New[string]("2002:1a23:fb44::1/128", "fec0::192:168:1:8/125"), true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(svcChains, svcRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, ingressCoreRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, egressCoreRules, true).Times(1) + + mockRouteClient.DeleteNodeNetworkPolicyIPSet("ANTREA-POL-INGRESSRULE1-6", true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-INGRESSRULE1"}, true).Times(1) + mockRouteClient.DeleteNodeNetworkPolicyIPTables([]string{"ANTREA-POL-EGRESSRULE1"}, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(ingressCoreChains, updatedIngressCoreRules, true).Times(1) + mockRouteClient.AddOrUpdateNodeNetworkPolicyIPTables(egressCoreChains, updatedEgressCoreRules, true).Times(1) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := gomock.NewController(t) + mockRouteClient := routetest.NewMockInterface(controller) + r := newTestNodeReconciler(mockRouteClient, tt.ipv4Enabled, tt.ipv6Enabled) + + tt.expectedCalls(mockRouteClient.EXPECT()) + assert.NoError(t, r.BatchReconcile(tt.rulesToAdd)) + + for _, ruleID := range tt.rulesToForget { + assert.NoError(t, r.Forget(ruleID)) + } + }) + } +} diff --git a/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go b/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go new file mode 100644 index 00000000000..deac59eeb57 --- /dev/null +++ b/pkg/agent/controller/networkpolicy/node_reconciler_unsupported.go @@ -0,0 +1,49 @@ +//go:build !linux +// +build !linux + +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package networkpolicy + +import ( + "antrea.io/antrea/pkg/agent/route" + "antrea.io/antrea/pkg/agent/types" +) + +type nodeReconciler struct{} + +func newNodeReconciler(routeClient route.Interface, ipv4Enabled, ipv6Enabled bool) *nodeReconciler { + return &nodeReconciler{} +} + +func (r *nodeReconciler) Reconcile(rule *CompletedRule) error { + return nil +} + +func (r *nodeReconciler) BatchReconcile(rules []*CompletedRule) error { + return nil +} + +func (r *nodeReconciler) Forget(ruleID string) error { + return nil +} + +func (r *nodeReconciler) GetRuleByFlowID(ruleID uint32) (*types.PolicyRule, bool, error) { + return nil, false, nil +} + +func (r *nodeReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) { + +} diff --git a/pkg/agent/controller/networkpolicy/reconciler.go b/pkg/agent/controller/networkpolicy/pod_reconciler.go similarity index 93% rename from pkg/agent/controller/networkpolicy/reconciler.go rename to pkg/agent/controller/networkpolicy/pod_reconciler.go index a20771bc4e4..1bf64e4a84e 100644 --- a/pkg/agent/controller/networkpolicy/reconciler.go +++ b/pkg/agent/controller/networkpolicy/pod_reconciler.go @@ -98,10 +98,10 @@ func normalizeServices(services []v1beta2.Service) servicesKey { return servicesKey(b.String()) } -// lastRealized is the struct cached by reconciler. It's used to track the +// podPolicyLastRealized is the struct cached by podReconciler. It's used to track the // actual state of rules we have enforced, so that we can know how to reconcile // a rule when it's updated/removed. -// It includes the last version of CompletedRule the reconciler has realized +// It includes the last version of CompletedRule the podReconciler has realized // and the related runtime information including the ofIDs, the Openflow ports // or the IP addresses of the target Pods got from the InterfaceStore. // @@ -142,7 +142,7 @@ func normalizeServices(services []v1beta2.Service) servicesKey { // while Pod C will have another Openflow rule as it resolves "http" to 8080. // In the implementation, we group Pods by their resolved services value so Pod A and B // can be mapped to same group. -type lastRealized struct { +type podPolicyLastRealized struct { // ofIDs identifies Openflow rules in Openflow implementation. // It's a map of servicesKey to Openflow rule ID. ofIDs map[servicesKey]uint32 @@ -175,8 +175,8 @@ type lastRealized struct { groupAddresses sets.Set[string] } -func newLastRealized(rule *CompletedRule) *lastRealized { - return &lastRealized{ +func newPodPolicyLastRealized(rule *CompletedRule) *podPolicyLastRealized { + return &podPolicyLastRealized{ ofIDs: map[servicesKey]uint32{}, CompletedRule: rule, podOFPorts: map[servicesKey]sets.Set[int32]{}, @@ -194,11 +194,11 @@ type tablePriorityAssigner struct { mutex sync.RWMutex } -// reconciler implements Reconciler. +// podReconciler implements Reconciler. // Note that although its Reconcile and Forget methods are thread-safe, it's // assumed each rule can only be processed by a single client at any given // time. Different rules can be processed in parallel. -type reconciler struct { +type podReconciler struct { // ofClient is the Openflow interface. ofClient openflow.Client @@ -206,7 +206,7 @@ type reconciler struct { ifaceStore interfacestore.InterfaceStore // lastRealizeds caches the last realized rules. - // It's a mapping from ruleID to *lastRealized. + // It's a mapping from ruleID to *podPolicyLastRealized. lastRealizeds sync.Map // idAllocator provides interfaces to allocateForRule and release uint32 id. @@ -220,11 +220,11 @@ type reconciler struct { ipv6Enabled bool // fqdnController manages dns cache of FQDN rules. It provides interfaces for the - // reconciler to register FQDN policy rules and query the IP addresses corresponded + // podReconciler to register FQDN policy rules and query the IP addresses corresponded // to a FQDN. fqdnController *fqdnController - // groupCounters is a list of GroupCounter for v4 and v6 env. reconciler uses these + // groupCounters is a list of GroupCounter for v4 and v6 env. podReconciler uses these // GroupCounters to get the groupIDs of a specific Service. groupCounters []proxytypes.GroupCounter @@ -232,8 +232,8 @@ type reconciler struct { multicastEnabled bool } -// newReconciler returns a new *reconciler. -func newReconciler(ofClient openflow.Client, +// newPodReconciler returns a new *podReconciler. +func newPodReconciler(ofClient openflow.Client, ifaceStore interfacestore.InterfaceStore, idAllocator *idAllocator, fqdnController *fqdnController, @@ -242,7 +242,7 @@ func newReconciler(ofClient openflow.Client, v6Enabled bool, antreaPolicyEnabled bool, multicastEnabled bool, -) *reconciler { +) *podReconciler { priorityAssigners := map[uint8]*tablePriorityAssigner{} if antreaPolicyEnabled { for _, table := range openflow.GetAntreaPolicyBaselineTierTables() { @@ -268,7 +268,7 @@ func newReconciler(ofClient openflow.Client, } } } - reconciler := &reconciler{ + reconciler := &podReconciler{ ofClient: ofClient, ifaceStore: ifaceStore, lastRealizeds: sync.Map{}, @@ -288,14 +288,14 @@ func newReconciler(ofClient openflow.Client, // RunIDAllocatorWorker runs the worker that deletes the rules from the cache in // idAllocator. -func (r *reconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) { +func (r *podReconciler) RunIDAllocatorWorker(stopCh <-chan struct{}) { r.idAllocator.runWorker(stopCh) } -// Reconcile checks whether the provided rule have been enforced or not, and +// Reconcile checks whether the provided rule has been enforced or not, and // invoke the add or update method accordingly. -func (r *reconciler) Reconcile(rule *CompletedRule) error { - klog.InfoS("Reconciling NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString()) +func (r *podReconciler) Reconcile(rule *CompletedRule) error { + klog.InfoS("Reconciling Pod NetworkPolicy rule", "rule", rule.ID, "policy", rule.SourceRef.ToString()) var err error var ofPriority *uint16 @@ -319,7 +319,7 @@ func (r *reconciler) Reconcile(rule *CompletedRule) error { if !exists { ofRuleInstallErr = r.add(rule, ofPriority, ruleTable) } else { - ofRuleInstallErr = r.update(value.(*lastRealized), rule, ofPriority, ruleTable) + ofRuleInstallErr = r.update(value.(*podPolicyLastRealized), rule, ofPriority, ruleTable) } if ofRuleInstallErr != nil && ofPriority != nil && !registeredBefore { priorityAssigner.assigner.release(*ofPriority) @@ -327,7 +327,7 @@ func (r *reconciler) Reconcile(rule *CompletedRule) error { return ofRuleInstallErr } -func (r *reconciler) getRuleType(rule *CompletedRule) ruleType { +func (r *podReconciler) getRuleType(rule *CompletedRule) ruleType { if !r.multicastEnabled { return unicast } @@ -349,7 +349,7 @@ func (r *reconciler) getRuleType(rule *CompletedRule) ruleType { // getOFRuleTable retrieves the OpenFlow table to install the CompletedRule. // The decision is made based on whether the rule is created for an ACNP/ANNP, and // the Tier of that NetworkPolicy. -func (r *reconciler) getOFRuleTable(rule *CompletedRule) uint8 { +func (r *podReconciler) getOFRuleTable(rule *CompletedRule) uint8 { rType := r.getRuleType(rule) var ruleTables []*openflow.Table var tableID uint8 @@ -388,7 +388,7 @@ func (r *reconciler) getOFRuleTable(rule *CompletedRule) uint8 { // getOFPriority retrieves the OFPriority for the input CompletedRule to be installed, // and re-arranges installed priorities on OVS if necessary. -func (r *reconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *tablePriorityAssigner) (*uint16, bool, error) { +func (r *podReconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *tablePriorityAssigner) (*uint16, bool, error) { // IGMP Egress policy is enforced in userspace via packet-in message, there won't be OpenFlow // rules created for such rules. Therefore, assigning priority is not required. if !rule.isAntreaNetworkPolicyRule() || rule.isIGMPEgressPolicyRule() { @@ -431,7 +431,7 @@ func (r *reconciler) getOFPriority(rule *CompletedRule, tableID uint8, pa *table // BatchReconcile reconciles the desired state of the provided CompletedRules // with the actual state of Openflow entries in batch. It should only be invoked // if all rules are newly added without last realized status. -func (r *reconciler) BatchReconcile(rules []*CompletedRule) error { +func (r *podReconciler) BatchReconcile(rules []*CompletedRule) error { var rulesToInstall []*CompletedRule var priorities []*uint16 prioritiesByTable := map[uint8][]*uint16{} @@ -471,7 +471,7 @@ func (r *reconciler) BatchReconcile(rules []*CompletedRule) error { // registerOFPriorities constructs a Priority type for each CompletedRule in the input list, // and registers those Priorities with appropriate tablePriorityAssigner based on Tier. -func (r *reconciler) registerOFPriorities(rules []*CompletedRule) error { +func (r *podReconciler) registerOFPriorities(rules []*CompletedRule) error { prioritiesToRegister := map[uint8][]types.Priority{} for _, rule := range rules { // IGMP Egress policy is enforced in userspace via packet-in message, there won't be OpenFlow @@ -495,7 +495,7 @@ func (r *reconciler) registerOFPriorities(rules []*CompletedRule) error { } // add converts CompletedRule to PolicyRule(s) and invokes installOFRule to install them. -func (r *reconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) error { +func (r *podReconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) error { klog.V(2).InfoS("Adding new rule", "rule", rule) ofRuleByServicesMap, lastRealized := r.computeOFRulesForAdd(rule, ofPriority, table) for svcKey, ofRule := range ofRuleByServicesMap { @@ -517,9 +517,9 @@ func (r *reconciler) add(rule *CompletedRule, ofPriority *uint16, table uint8) e return nil } -func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint16, table uint8) ( - map[servicesKey]*types.PolicyRule, *lastRealized) { - lastRealized := newLastRealized(rule) +func (r *podReconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint16, table uint8) ( + map[servicesKey]*types.PolicyRule, *podPolicyLastRealized) { + lastRealized := newPodPolicyLastRealized(rule) // TODO: Handle the case that the following processing fails or partially succeeds. r.lastRealizeds.Store(rule.ID, lastRealized) @@ -561,7 +561,7 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1 svcGroupIDs := r.getSvcGroupIDs(members) toAddresses = svcGroupIDsToOFAddresses(svcGroupIDs) // If rule is applied to Services, there will be only one svcKey, which is "", in - // membersByServicesMap. So lastRealized.serviceGroupIDs won't be overwritten in + // membersByServicesMap. So podPolicyLastRealized.serviceGroupIDs won't be overwritten in // this for-loop. lastRealized.serviceGroupIDs = svcGroupIDs } else { @@ -672,8 +672,8 @@ func (r *reconciler) computeOFRulesForAdd(rule *CompletedRule, ofPriority *uint1 } // batchAdd converts CompletedRules to PolicyRules and invokes BatchInstallPolicyRuleFlows to install them. -func (r *reconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) error { - lastRealizeds := make([]*lastRealized, len(rules)) +func (r *podReconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) error { + lastRealizeds := make([]*podPolicyLastRealized, len(rules)) ofIDUpdateMaps := make([]map[servicesKey]uint32, len(rules)) var allOFRules []*types.PolicyRule @@ -711,7 +711,7 @@ func (r *reconciler) batchAdd(rules []*CompletedRule, ofPriorities []*uint16) er // update calculates the difference of Addresses between oldRule and newRule, // and invokes Openflow client's methods to reconcile them. -func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, ofPriority *uint16, table uint8) error { +func (r *podReconciler) update(lastRealized *podPolicyLastRealized, newRule *CompletedRule, ofPriority *uint16, table uint8) error { klog.V(2).InfoS("Updating existing rule", "rule", newRule) // staleOFIDs tracks servicesKey that are no long needed. // Firstly fill it with the last realized ofIDs. @@ -871,7 +871,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, LogLabel: newRule.LogLabel, } // If the PolicyRule for the original services doesn't exist and IPBlocks is present, it means the - // reconciler hasn't installed flows for IPBlocks, then it must be added to the new PolicyRule. + // podReconciler hasn't installed flows for IPBlocks, then it must be added to the new PolicyRule. if svcKey == originalSvcKey && len(newRule.To.IPBlocks) > 0 { to := ipBlocksToOFAddresses(newRule.To.IPBlocks, r.ipv4Enabled, r.ipv6Enabled, false) ofRule.To = append(ofRule.To, to...) @@ -943,7 +943,7 @@ func (r *reconciler) update(lastRealized *lastRealized, newRule *CompletedRule, return nil } -func (r *reconciler) installOFRule(ofRule *types.PolicyRule) error { +func (r *podReconciler) installOFRule(ofRule *types.PolicyRule) error { klog.V(2).InfoS("Installing ofRule", "id", ofRule.FlowID, "direction", ofRule.Direction, "from", len(ofRule.From), "to", len(ofRule.To), "service", len(ofRule.Service)) if err := r.ofClient.InstallPolicyRuleFlows(ofRule); err != nil { r.idAllocator.forgetRule(ofRule.FlowID) @@ -952,7 +952,7 @@ func (r *reconciler) installOFRule(ofRule *types.PolicyRule) error { return nil } -func (r *reconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedTo []types.Address, deletedFrom []types.Address, deletedTo []types.Address, priority *uint16, enableLogging, isMCNPRule bool) error { +func (r *podReconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedTo []types.Address, deletedFrom []types.Address, deletedTo []types.Address, priority *uint16, enableLogging, isMCNPRule bool) error { klog.V(2).InfoS("Updating ofRule", "id", ofID, "addedFrom", len(addedFrom), "addedTo", len(addedTo), "deletedFrom", len(deletedFrom), "deletedTo", len(deletedTo)) // TODO: This might be unnecessarily complex and hard for error handling, consider revising the Openflow interfaces. if len(addedFrom) > 0 { @@ -978,7 +978,7 @@ func (r *reconciler) updateOFRule(ofID uint32, addedFrom []types.Address, addedT return nil } -func (r *reconciler) uninstallOFRule(ofID uint32, table uint8) error { +func (r *podReconciler) uninstallOFRule(ofID uint32, table uint8) error { klog.V(2).InfoS("Uninstalling ofRule", "id", ofID) stalePriorities, err := r.ofClient.UninstallPolicyRuleFlows(ofID) if err != nil { @@ -1003,7 +1003,7 @@ func (r *reconciler) uninstallOFRule(ofID uint32, table uint8) error { // Forget invokes UninstallPolicyRuleFlows to uninstall Openflow entries // associated with the provided ruleID if it was enforced before. -func (r *reconciler) Forget(ruleID string) error { +func (r *podReconciler) Forget(ruleID string) error { klog.InfoS("Forgetting rule", "rule", ruleID) value, exists := r.lastRealizeds.Load(ruleID) @@ -1012,7 +1012,7 @@ func (r *reconciler) Forget(ruleID string) error { return nil } - lastRealized := value.(*lastRealized) + lastRealized := value.(*podPolicyLastRealized) table := r.getOFRuleTable(lastRealized.CompletedRule) priorityAssigner, exists := r.priorityAssigners[table] if exists { @@ -1033,7 +1033,7 @@ func (r *reconciler) Forget(ruleID string) error { return nil } -func (r *reconciler) isIGMPRule(rule *CompletedRule) bool { +func (r *podReconciler) isIGMPRule(rule *CompletedRule) bool { isIGMP := false if len(rule.Services) > 0 && (rule.Services[0].Protocol != nil) && (*rule.Services[0].Protocol == v1beta2.ProtocolIGMP) { @@ -1042,11 +1042,11 @@ func (r *reconciler) isIGMPRule(rule *CompletedRule) bool { return isIGMP } -func (r *reconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) { +func (r *podReconciler) GetRuleByFlowID(ruleFlowID uint32) (*types.PolicyRule, bool, error) { return r.idAllocator.getRuleFromAsyncCache(ruleFlowID) } -func (r *reconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32] { +func (r *podReconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32] { ofPorts := sets.New[int32]() for _, m := range members { var entityName, ns string @@ -1071,7 +1071,7 @@ func (r *reconciler) getOFPorts(members v1beta2.GroupMemberSet) sets.Set[int32] return ofPorts } -func (r *reconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] { +func (r *podReconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] { ips := sets.New[string]() for _, m := range members { var entityName, ns string @@ -1100,7 +1100,7 @@ func (r *reconciler) getIPs(members v1beta2.GroupMemberSet) sets.Set[string] { return ips } -func (r *reconciler) getSvcGroupIDs(members v1beta2.GroupMemberSet) sets.Set[int64] { +func (r *podReconciler) getSvcGroupIDs(members v1beta2.GroupMemberSet) sets.Set[int64] { var svcRefs []v1beta2.ServiceReference for _, m := range members { if m.Service != nil { @@ -1162,7 +1162,7 @@ func ofPortsToOFAddresses(ofPorts sets.Set[int32]) []types.Address { return addresses } -func (r *reconciler) svcRefsToGroupIDs(svcRefs []v1beta2.ServiceReference) sets.Set[int64] { +func (r *podReconciler) svcRefsToGroupIDs(svcRefs []v1beta2.ServiceReference) sets.Set[int64] { groupIDs := sets.New[int64]() for _, svcRef := range svcRefs { for _, groupCounter := range r.groupCounters { diff --git a/pkg/agent/controller/networkpolicy/reconciler_test.go b/pkg/agent/controller/networkpolicy/pod_reconciler_test.go similarity index 98% rename from pkg/agent/controller/networkpolicy/reconciler_test.go rename to pkg/agent/controller/networkpolicy/pod_reconciler_test.go index 0b6cbc58f30..ec31137a918 100644 --- a/pkg/agent/controller/networkpolicy/reconciler_test.go +++ b/pkg/agent/controller/networkpolicy/pod_reconciler_test.go @@ -107,12 +107,12 @@ func newCIDR(cidrStr string) *net.IPNet { return tmpIPNet } -func newTestReconciler(t *testing.T, controller *gomock.Controller, ifaceStore interfacestore.InterfaceStore, ofClient *openflowtest.MockClient, v4Enabled, v6Enabled bool) *reconciler { +func newTestReconciler(t *testing.T, controller *gomock.Controller, ifaceStore interfacestore.InterfaceStore, ofClient *openflowtest.MockClient, v4Enabled, v6Enabled bool) *podReconciler { f, _ := newMockFQDNController(t, controller, nil) ch := make(chan string, 100) groupIDAllocator := openflow.NewGroupAllocator() groupCounters := []proxytypes.GroupCounter{proxytypes.NewGroupCounter(groupIDAllocator, ch)} - r := newReconciler(ofClient, ifaceStore, newIDAllocator(testAsyncDeleteInterval), f, groupCounters, v4Enabled, v6Enabled, true, false) + r := newPodReconciler(ofClient, ifaceStore, newIDAllocator(testAsyncDeleteInterval), f, groupCounters, v4Enabled, v6Enabled, true, false) return r } @@ -120,14 +120,14 @@ func TestReconcilerForget(t *testing.T) { prepareMockTables() tests := []struct { name string - lastRealizeds map[string]*lastRealized + lastRealizeds map[string]*podPolicyLastRealized args string expectedOFRuleIDs []uint32 wantErr bool }{ { "unknown-rule", - map[string]*lastRealized{ + map[string]*podPolicyLastRealized{ "foo": { ofIDs: map[servicesKey]uint32{servicesKey1: 8}, CompletedRule: &CompletedRule{ @@ -141,7 +141,7 @@ func TestReconcilerForget(t *testing.T) { }, { "known-single-ofrule", - map[string]*lastRealized{ + map[string]*podPolicyLastRealized{ "foo": { ofIDs: map[servicesKey]uint32{servicesKey1: 8}, CompletedRule: &CompletedRule{ @@ -155,7 +155,7 @@ func TestReconcilerForget(t *testing.T) { }, { "known-multiple-ofrule", - map[string]*lastRealized{ + map[string]*podPolicyLastRealized{ "foo": { ofIDs: map[servicesKey]uint32{servicesKey1: 8, servicesKey2: 9}, CompletedRule: &CompletedRule{ @@ -169,7 +169,7 @@ func TestReconcilerForget(t *testing.T) { }, { "known-multiple-ofrule-cnp", - map[string]*lastRealized{ + map[string]*podPolicyLastRealized{ "foo": { ofIDs: map[servicesKey]uint32{servicesKey1: 8, servicesKey2: 9}, CompletedRule: &CompletedRule{ @@ -864,7 +864,7 @@ func TestReconcilerReconcileServiceRelatedRule(t *testing.T) { } } -// TestReconcileWithTransientError ensures the reconciler can reconcile a rule properly after the first attempt meets +// TestReconcileWithTransientError ensures the podReconciler can reconcile a rule properly after the first attempt meets // transient error. // The input rule is an egress rule with named port, applying to 3 Pods and 1 IPBlock. The first 2 Pods have different // port numbers for the named port and the 3rd Pod cannot resolve it. @@ -922,10 +922,10 @@ func TestReconcileWithTransientError(t *testing.T) { mockOFClient.EXPECT().InstallPolicyRuleFlows(gomock.Any()).Return(transientError).Times(1) err := r.Reconcile(egressRule) assert.Error(t, err) - // Ensure the openflow ID is not persistent in lastRealized and is released to idAllocator upon error. + // Ensure the openflow ID is not persistent in podPolicyLastRealized and is released to idAllocator upon error. value, exists := r.lastRealizeds.Load(egressRule.ID) assert.True(t, exists) - assert.Empty(t, value.(*lastRealized).ofIDs) + assert.Empty(t, value.(*podPolicyLastRealized).ofIDs) assert.Equal(t, 1, r.idAllocator.deleteQueue.Len()) // Make the second call success. @@ -961,10 +961,10 @@ func TestReconcileWithTransientError(t *testing.T) { } err = r.Reconcile(egressRule) assert.NoError(t, err) - // Ensure the openflow IDs are persistent in lastRealized and are not released to idAllocator upon success. + // Ensure the openflow IDs are persistent in podPolicyLastRealized and are not released to idAllocator upon success. value, exists = r.lastRealizeds.Load(egressRule.ID) assert.True(t, exists) - assert.Len(t, value.(*lastRealized).ofIDs, 3) + assert.Len(t, value.(*podPolicyLastRealized).ofIDs, 3) // Ensure the number of released IDs doesn't change. assert.Equal(t, 1, r.idAllocator.deleteQueue.Len()) @@ -1075,7 +1075,7 @@ func TestReconcilerBatchReconcile(t *testing.T) { r := newTestReconciler(t, controller, ifaceStore, mockOFClient, true, true) if tt.numInstalledRules > 0 { // BatchInstall should skip rules already installed - r.lastRealizeds.Store(tt.args[0].ID, newLastRealized(tt.args[0])) + r.lastRealizeds.Store(tt.args[0].ID, newPodPolicyLastRealized(tt.args[0])) } // TODO: mock idAllocator and priorityAssigner mockOFClient.EXPECT().BatchInstallPolicyRuleFlows(gomock.Any()). diff --git a/pkg/agent/route/interfaces.go b/pkg/agent/route/interfaces.go index 30b86097bc3..a214386d59a 100644 --- a/pkg/agent/route/interfaces.go +++ b/pkg/agent/route/interfaces.go @@ -18,6 +18,8 @@ import ( "net" "time" + "k8s.io/apimachinery/pkg/util/sets" + "antrea.io/antrea/pkg/agent/config" binding "antrea.io/antrea/pkg/ovs/openflow" ) @@ -90,4 +92,16 @@ type Interface interface { // ClearConntrackEntryForService deletes a conntrack entry for a Service connection. ClearConntrackEntryForService(svcIP net.IP, svcPort uint16, endpointIP net.IP, protocol binding.Protocol) error + + // AddOrUpdateNodeNetworkPolicyIPSet adds or updates ipset created for NodeNetworkPolicy. + AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error + + // DeleteNodeNetworkPolicyIPSet deletes ipset created for NodeNetworkPolicy. + DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error + + // AddOrUpdateNodeNetworkPolicyIPTables adds or updates iptables chains and rules within the chains for NodeNetworkPolicy. + AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error + + // DeleteNodeNetworkPolicyIPTables deletes iptables chains and rules within the chains for NodeNetworkPolicy. + DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error } diff --git a/pkg/agent/route/route_linux.go b/pkg/agent/route/route_linux.go index e4c6c4dfcec..2c168058684 100644 --- a/pkg/agent/route/route_linux.go +++ b/pkg/agent/route/route_linux.go @@ -72,11 +72,15 @@ const ( antreaForwardChain = "ANTREA-FORWARD" antreaPreRoutingChain = "ANTREA-PREROUTING" antreaPostRoutingChain = "ANTREA-POSTROUTING" + antreaInputChain = "ANTREA-INPUT" antreaOutputChain = "ANTREA-OUTPUT" antreaMangleChain = "ANTREA-MANGLE" serviceIPv4CIDRKey = "serviceIPv4CIDRKey" serviceIPv6CIDRKey = "serviceIPv6CIDRKey" + + privilegedNodeNetworkPolicyIngressRulesChain = "ANTREA-POL-PRI-INGRESS-RULES" + privilegedNodeNetworkPolicyEgressRulesChain = "ANTREA-POL-PRI-EGRESS-RULES" ) // Client implements Interface. @@ -107,11 +111,12 @@ type Client struct { // markToSNATIP caches marks to SNAT IPs. It's used in Egress feature. markToSNATIP sync.Map // iptablesInitialized is used to notify when iptables initialization is done. - iptablesInitialized chan struct{} - proxyAll bool - connectUplinkToBridge bool - multicastEnabled bool - isCloudEKS bool + iptablesInitialized chan struct{} + proxyAll bool + connectUplinkToBridge bool + multicastEnabled bool + isCloudEKS bool + nodeNetworkPolicyEnabled bool // serviceRoutes caches ip routes about Services. serviceRoutes sync.Map // serviceNeighbors caches neighbors. @@ -126,20 +131,44 @@ type Client struct { clusterNodeIP6s sync.Map // The latest calculated Service CIDRs can be got from serviceCIDRProvider. serviceCIDRProvider servicecidr.Interface + // nodeNetworkPolicyIPSetsIPv4 caches all existing IPv4 ipsets for NodeNetworkPolicy. + nodeNetworkPolicyIPSetsIPv4 sync.Map + // nodeNetworkPolicyIPSetsIPv6 caches all existing IPv6 ipsets for NodeNetworkPolicy. + nodeNetworkPolicyIPSetsIPv6 sync.Map + // nodeNetworkPolicyIPSetsIPv4 caches all existing IPv4 iptables chains and rules for NodeNetworkPolicy. + nodeNetworkPolicyIPTablesIPv4 sync.Map + // nodeNetworkPolicyIPSetsIPv6 caches all existing IPv4 iptables chains and rules for NodeNetworkPolicy. + nodeNetworkPolicyIPTablesIPv6 sync.Map + // fixedNodeNetworkPolicyIPTablesIPv4 stores the IPv4 iptables rules that should be created before adding + // NodeNetworkPolicy rules. They should be also deleted after all NodeNetworkPolicy rules are removed. + fixedNodeNetworkPolicyIPTablesIPv4 []string + // fixedNodeNetworkPolicyIPTablesIPv6 stores the IPv6 iptables rules that should be created before adding + // NodeNetworkPolicy rules. They should be also deleted after all NodeNetworkPolicy rules are removed. + fixedNodeNetworkPolicyIPTablesIPv6 []string + // fixedNodeNetworkPolicyChains stores the iptables chains that should be created before adding NodeNetworkPolicy + // rules. They should be also deleted after all NodeNetworkPolicy rules are removed. + fixedNodeNetworkPolicyChains []string } // NewClient returns a route client. -func NewClient(networkConfig *config.NetworkConfig, noSNAT, proxyAll, connectUplinkToBridge, multicastEnabled bool, serviceCIDRProvider servicecidr.Interface) (*Client, error) { +func NewClient(networkConfig *config.NetworkConfig, + noSNAT bool, + proxyAll bool, + connectUplinkToBridge bool, + nodeNetworkPolicyEnabled bool, + multicastEnabled bool, + serviceCIDRProvider servicecidr.Interface) (*Client, error) { return &Client{ - networkConfig: networkConfig, - noSNAT: noSNAT, - proxyAll: proxyAll, - multicastEnabled: multicastEnabled, - connectUplinkToBridge: connectUplinkToBridge, - ipset: ipset.NewClient(), - netlink: &netlink.Handle{}, - isCloudEKS: env.IsCloudEKS(), - serviceCIDRProvider: serviceCIDRProvider, + networkConfig: networkConfig, + noSNAT: noSNAT, + proxyAll: proxyAll, + multicastEnabled: multicastEnabled, + connectUplinkToBridge: connectUplinkToBridge, + nodeNetworkPolicyEnabled: nodeNetworkPolicyEnabled, + ipset: ipset.NewClient(), + netlink: &netlink.Handle{}, + isCloudEKS: env.IsCloudEKS(), + serviceCIDRProvider: serviceCIDRProvider, }, nil } @@ -200,6 +229,10 @@ func (c *Client) Initialize(nodeConfig *config.NodeConfig, done func()) error { return fmt.Errorf("failed to initialize Service IP routes: %v", err) } } + // Build privileged iptables rules for NodeNetworkPolicy. + if c.nodeNetworkPolicyEnabled { + c.initNodeNetworkPolicy() + } return nil } @@ -396,6 +429,33 @@ func (c *Client) syncIPSet() error { }) } + c.nodeNetworkPolicyIPSetsIPv4.Range(func(key, value any) bool { + ipsetName := key.(string) + ipsetEntries := value.(sets.Set[string]) + if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, false); err != nil { + return false + } + for ipsetEntry := range ipsetEntries { + if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil { + return false + } + } + return true + }) + c.nodeNetworkPolicyIPSetsIPv6.Range(func(key, value any) bool { + ipsetName := key.(string) + ipsetEntries := value.(sets.Set[string]) + if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, true); err != nil { + return false + } + for ipsetEntry := range ipsetEntries { + if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil { + return false + } + } + return true + }) + return nil } @@ -482,18 +542,19 @@ func (c *Client) writeEKSNATRules(iptablesData *bytes.Buffer) { }...) } +// Create the antrea managed chains and link them to built-in chains. +// We cannot use iptables-restore for these jump rules because there +// are non antrea managed rules in built-in chains. +type jumpRule struct { + table string + srcChain string + dstChain string + comment string +} + // syncIPTables ensure that the iptables infrastructure we use is set up. // It's idempotent and can safely be called on every startup. func (c *Client) syncIPTables() error { - // Create the antrea managed chains and link them to built-in chains. - // We cannot use iptables-restore for these jump rules because there - // are non antrea managed rules in built-in chains. - type jumpRule struct { - table string - srcChain string - dstChain string - comment string - } jumpRules := []jumpRule{ {iptables.RawTable, iptables.PreRoutingChain, antreaPreRoutingChain, "Antrea: jump to Antrea prerouting rules"}, {iptables.RawTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"}, @@ -508,6 +569,10 @@ func (c *Client) syncIPTables() error { if c.proxyAll { jumpRules = append(jumpRules, jumpRule{iptables.NATTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"}) } + if c.nodeNetworkPolicyEnabled { + jumpRules = append(jumpRules, jumpRule{iptables.FilterTable, iptables.InputChain, antreaInputChain, "Antrea: jump to Antrea input rules"}) + jumpRules = append(jumpRules, jumpRule{iptables.FilterTable, iptables.OutputChain, antreaOutputChain, "Antrea: jump to Antrea output rules"}) + } for _, rule := range jumpRules { if err := c.iptables.EnsureChain(iptables.ProtocolDual, rule.table, rule.dstChain); err != nil { return err @@ -531,6 +596,21 @@ func (c *Client) syncIPTables() error { return true }) + nodeNetworkPolicyIPTablesIPv4 := map[string][]string{} + nodeNetworkPolicyIPTablesIPv6 := map[string][]string{} + c.nodeNetworkPolicyIPTablesIPv4.Range(func(key, value interface{}) bool { + chain := key.(string) + rules := value.([]string) + nodeNetworkPolicyIPTablesIPv4[chain] = rules + return true + }) + c.nodeNetworkPolicyIPTablesIPv6.Range(func(key, value interface{}) bool { + chain := key.(string) + rules := value.([]string) + nodeNetworkPolicyIPTablesIPv6[chain] = rules + return true + }) + // Use iptables-restore to configure IPv4 settings. if c.networkConfig.IPv4Enabled { iptablesData := c.restoreIptablesData(c.nodeConfig.PodIPv4CIDR, @@ -541,6 +621,8 @@ func (c *Client) syncIPTables() error { config.VirtualNodePortDNATIPv4, config.VirtualServiceIPv4, snatMarkToIPv4, + nodeNetworkPolicyIPTablesIPv4, + c.fixedNodeNetworkPolicyIPTablesIPv4, false) // Setting --noflush to keep the previous contents (i.e. non antrea managed chains) of the tables. @@ -559,6 +641,8 @@ func (c *Client) syncIPTables() error { config.VirtualNodePortDNATIPv6, config.VirtualServiceIPv6, snatMarkToIPv6, + nodeNetworkPolicyIPTablesIPv6, + c.fixedNodeNetworkPolicyIPTablesIPv6, true) // Setting --noflush to keep the previous contents (i.e. non antrea managed chains) of the tables. if err := c.iptables.Restore(iptablesData.String(), false, true); err != nil { @@ -577,6 +661,8 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet, nodePortDNATVirtualIP, serviceVirtualIP net.IP, snatMarkToIP map[uint32]net.IP, + nodeNetWorkPolicyIPTables map[string][]string, + fixedNodeNetWorkPolicyIPTables []string, isIPv6 bool) *bytes.Buffer { // Create required rules in the antrea chains. // Use iptables-restore as it flushes the involved chains and creates the desired rules @@ -623,7 +709,7 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet, "-m", "comment", "--comment", `"Antrea: drop Pod multicast traffic forwarded via underlay network"`, "-m", "set", "--match-set", clusterNodeIPSet, "src", "-d", types.McastCIDR.String(), - "-j", iptables.DROPTarget, + "-j", iptables.DropTarget, }...) } } @@ -665,6 +751,17 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet, writeLine(iptablesData, "*filter") writeLine(iptablesData, iptables.MakeChainLine(antreaForwardChain)) + + for _, chain := range c.fixedNodeNetworkPolicyChains { + writeLine(iptablesData, iptables.MakeChainLine(chain)) + } + for chain := range nodeNetWorkPolicyIPTables { + // Skip these two chains since they are included in fixedNodeNetworkPolicyChains. + if chain == config.NodeNetworkPolicyIngressRulesChain || chain == config.NodeNetworkPolicyEgressRulesChain { + continue + } + writeLine(iptablesData, iptables.MakeChainLine(chain)) + } writeLine(iptablesData, []string{ "-A", antreaForwardChain, "-m", "comment", "--comment", `"Antrea: accept packets from local Pods"`, @@ -694,6 +791,14 @@ func (c *Client) restoreIptablesData(podCIDR *net.IPNet, "-j", iptables.AcceptTarget, }...) } + for _, rule := range fixedNodeNetWorkPolicyIPTables { + writeLine(iptablesData, rule) + } + for _, rules := range nodeNetWorkPolicyIPTables { + for _, rule := range rules { + writeLine(iptablesData, rule) + } + } writeLine(iptablesData, "COMMIT") writeLine(iptablesData, "*nat") @@ -863,6 +968,97 @@ func (c *Client) initServiceIPRoutes() error { return nil } +func (c *Client) initNodeNetworkPolicy() { + c.fixedNodeNetworkPolicyChains = []string{ + antreaInputChain, + antreaOutputChain, + privilegedNodeNetworkPolicyIngressRulesChain, + privilegedNodeNetworkPolicyEgressRulesChain, + config.NodeNetworkPolicyIngressRulesChain, + config.NodeNetworkPolicyEgressRulesChain, + } + c.buildFixedNodeNetworkPolicyIPTablesRules(c.networkConfig.IPv4Enabled, c.networkConfig.IPv6Enabled) +} + +func (c *Client) buildFixedNodeNetworkPolicyIPTablesRules(ipv4Enabled, ipv6Enabled bool) { + var ipProtocols []iptables.Protocol + if ipv4Enabled { + ipProtocols = append(ipProtocols, iptables.ProtocolIPv4) + } + if ipv6Enabled { + ipProtocols = append(ipProtocols, iptables.ProtocolIPv6) + } + + antreaInputChainRules := []string{ + iptables.NewRuleBuilder(antreaInputChain). + SetComment("Antrea: jump to privileged ingress NodeNetworkPolicy rules"). + SetTarget(privilegedNodeNetworkPolicyIngressRulesChain). + Done(). + GetRule(), + iptables.NewRuleBuilder(antreaInputChain). + SetComment("Antrea: jump to ingress NodeNetworkPolicy rules"). + SetTarget(config.NodeNetworkPolicyIngressRulesChain). + Done(). + GetRule(), + } + antreaOutputChainRules := []string{ + iptables.NewRuleBuilder(antreaOutputChain). + SetComment("Antrea: jump to privileged egress NodeNetworkPolicy rules"). + SetTarget(privilegedNodeNetworkPolicyEgressRulesChain). + Done(). + GetRule(), + iptables.NewRuleBuilder(antreaOutputChain). + SetComment("Antrea: jump to egress NodeNetworkPolicy rules"). + SetTarget(config.NodeNetworkPolicyEgressRulesChain). + Done(). + GetRule(), + } + privilegedIngressChainRules := []string{ + iptables.NewRuleBuilder(privilegedNodeNetworkPolicyIngressRulesChain). + MatchEstablishedOrRelated(). + SetComment("Antrea: allow ingress established or related packets"). + SetTarget(iptables.AcceptTarget). + Done(). + GetRule(), + iptables.NewRuleBuilder(privilegedNodeNetworkPolicyIngressRulesChain). + MatchInputInterface("lo"). + SetComment("Antrea: allow ingress packets from loopback"). + SetTarget(iptables.AcceptTarget). + Done(). + GetRule(), + } + privilegedEgressChainRules := []string{ + iptables.NewRuleBuilder(privilegedNodeNetworkPolicyEgressRulesChain). + MatchEstablishedOrRelated(). + SetComment("Antrea: allow egress established or related packets"). + SetTarget(iptables.AcceptTarget). + Done(). + GetRule(), + iptables.NewRuleBuilder(privilegedNodeNetworkPolicyEgressRulesChain). + MatchOutputInterface("lo"). + SetComment("Antrea: allow egress packets to loopback"). + SetTarget(iptables.AcceptTarget). + Done(). + GetRule(), + } + for _, ipProtocol := range ipProtocols { + if ipProtocol == iptables.ProtocolIPv6 { + c.fixedNodeNetworkPolicyIPTablesIPv6 = append(c.fixedNodeNetworkPolicyIPTablesIPv6, antreaInputChainRules...) + c.fixedNodeNetworkPolicyIPTablesIPv6 = append(c.fixedNodeNetworkPolicyIPTablesIPv6, privilegedIngressChainRules...) + + c.fixedNodeNetworkPolicyIPTablesIPv6 = append(c.fixedNodeNetworkPolicyIPTablesIPv6, antreaOutputChainRules...) + c.fixedNodeNetworkPolicyIPTablesIPv6 = append(c.fixedNodeNetworkPolicyIPTablesIPv6, privilegedEgressChainRules...) + } + if ipProtocol == iptables.ProtocolIPv4 { + c.fixedNodeNetworkPolicyIPTablesIPv4 = append(c.fixedNodeNetworkPolicyIPTablesIPv4, antreaInputChainRules...) + c.fixedNodeNetworkPolicyIPTablesIPv4 = append(c.fixedNodeNetworkPolicyIPTablesIPv4, privilegedIngressChainRules...) + + c.fixedNodeNetworkPolicyIPTablesIPv4 = append(c.fixedNodeNetworkPolicyIPTablesIPv4, antreaOutputChainRules...) + c.fixedNodeNetworkPolicyIPTablesIPv4 = append(c.fixedNodeNetworkPolicyIPTablesIPv4, privilegedEgressChainRules...) + } + } +} + // Reconcile removes orphaned podCIDRs from ipset and removes routes to orphaned podCIDRs // based on the desired podCIDRs. func (c *Client) Reconcile(podCIDRs []string) error { @@ -1700,3 +1896,102 @@ func generateNeigh(ip net.IP, linkIndex int) *netlink.Neigh { HardwareAddr: globalVMAC, } } + +func (c *Client) AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error { + var prevIPSetEntries sets.Set[string] + if isIPv6 { + if value, ok := c.nodeNetworkPolicyIPSetsIPv6.Load(ipsetName); ok { + prevIPSetEntries = value.(sets.Set[string]) + } + } else { + if value, ok := c.nodeNetworkPolicyIPSetsIPv4.Load(ipsetName); ok { + prevIPSetEntries = value.(sets.Set[string]) + } + } + ipsetEntriesToAdd := ipsetEntries.Difference(prevIPSetEntries) + ipsetEntriesToDelete := prevIPSetEntries.Difference(ipsetEntries) + + if err := c.ipset.CreateIPSet(ipsetName, ipset.HashNet, isIPv6); err != nil { + return err + } + for ipsetEntry := range ipsetEntriesToAdd { + if err := c.ipset.AddEntry(ipsetName, ipsetEntry); err != nil { + return err + } + } + for ipsetEntry := range ipsetEntriesToDelete { + if err := c.ipset.DelEntry(ipsetName, ipsetEntry); err != nil { + return err + } + } + + if isIPv6 { + c.nodeNetworkPolicyIPSetsIPv6.Store(ipsetName, ipsetEntries) + } else { + c.nodeNetworkPolicyIPSetsIPv4.Store(ipsetName, ipsetEntries) + } + return nil +} + +func (c *Client) DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error { + if err := c.ipset.DestroyIPSet(ipsetName); err != nil { + return err + } + if isIPv6 { + c.nodeNetworkPolicyIPSetsIPv6.Delete(ipsetName) + } else { + c.nodeNetworkPolicyIPSetsIPv4.Delete(ipsetName) + } + return nil +} + +func (c *Client) AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error { + iptablesData := bytes.NewBuffer(nil) + + writeLine(iptablesData, "*filter") + for _, iptablesChain := range iptablesChains { + writeLine(iptablesData, iptables.MakeChainLine(iptablesChain)) + } + for _, rules := range iptablesRules { + for _, rule := range rules { + writeLine(iptablesData, rule) + } + } + writeLine(iptablesData, "COMMIT") + + if err := c.iptables.Restore(iptablesData.String(), false, isIPv6); err != nil { + return err + } + + for index, iptablesChain := range iptablesChains { + if isIPv6 { + c.nodeNetworkPolicyIPTablesIPv6.Store(iptablesChain, iptablesRules[index]) + } else { + c.nodeNetworkPolicyIPTablesIPv4.Store(iptablesChain, iptablesRules[index]) + } + } + return nil +} + +func (c *Client) DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error { + ipProtocol := iptables.ProtocolIPv4 + if isIPv6 { + ipProtocol = iptables.ProtocolIPv6 + } + + for _, iptablesChain := range iptablesChains { + if err := c.iptables.DeleteChain(ipProtocol, iptables.FilterTable, iptablesChain); err != nil { + return err + } + } + + for _, iptablesChain := range iptablesChains { + if isIPv6 { + c.nodeNetworkPolicyIPTablesIPv6.Delete(iptablesChain) + } else { + c.nodeNetworkPolicyIPTablesIPv4.Delete(iptablesChain) + } + } + + return nil +} diff --git a/pkg/agent/route/route_linux_test.go b/pkg/agent/route/route_linux_test.go index e9f14ade813..4d7b9bae901 100644 --- a/pkg/agent/route/route_linux_test.go +++ b/pkg/agent/route/route_linux_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" "go.uber.org/mock/gomock" + "k8s.io/apimachinery/pkg/util/sets" "antrea.io/antrea/pkg/agent/config" servicecidrtest "antrea.io/antrea/pkg/agent/servicecidr/testing" @@ -32,13 +33,32 @@ import ( "antrea.io/antrea/pkg/agent/util/iptables" iptablestest "antrea.io/antrea/pkg/agent/util/iptables/testing" netlinktest "antrea.io/antrea/pkg/agent/util/netlink/testing" + agentconfig "antrea.io/antrea/pkg/config/agent" "antrea.io/antrea/pkg/ovs/openflow" "antrea.io/antrea/pkg/ovs/ovsconfig" "antrea.io/antrea/pkg/util/ip" ) var ( - nodeConfig = &config.NodeConfig{GatewayConfig: &config.GatewayConfig{LinkIndex: 10}} + nodeConfig = &config.NodeConfig{GatewayConfig: &config.GatewayConfig{LinkIndex: 10}} + nodeNetworkPolicyConfig = &agentconfig.NodeNetworkPolicyConfig{ + PrivilegedRules: []agentconfig.PrivilegedRule{ + { + Direction: "ingress", + Protocol: "tcp", + IPFamilies: "ipv4", + CIDR: "192.168.1.0/24", + Ports: []string{"22"}, + Description: "allow ingress IPv4 SSH traffic", + }, + { + Direction: "egress", + Protocol: "", + Ports: []string{"53"}, + Description: "allow egress DNS traffic", + }, + }, + } externalIPv4Addr1 = "1.1.1.1" externalIPv4Addr2 = "1.1.1.2" @@ -235,22 +255,26 @@ func TestSyncIPSet(t *testing.T) { func TestSyncIPTables(t *testing.T) { tests := []struct { - name string - isCloudEKS bool - proxyAll bool - multicastEnabled bool - connectUplinkToBridge bool - networkConfig *config.NetworkConfig - nodeConfig *config.NodeConfig - nodePortsIPv4 []string - nodePortsIPv6 []string - markToSNATIP map[uint32]string - expectedCalls func(iptables *iptablestest.MockInterfaceMockRecorder) + name string + isCloudEKS bool + proxyAll bool + multicastEnabled bool + connectUplinkToBridge bool + nodeNetworkPolicyEnabled bool + nodeNetworkPolicyConfig *agentconfig.NodeNetworkPolicyConfig + networkConfig *config.NetworkConfig + nodeConfig *config.NodeConfig + nodePortsIPv4 []string + nodePortsIPv6 []string + markToSNATIP map[uint32]string + expectedCalls func(iptables *iptablestest.MockInterfaceMockRecorder) }{ { - name: "encap,egress=true,multicastEnabled=true,proxyAll=true", - proxyAll: true, - multicastEnabled: true, + name: "encap,egress=true,multicastEnabled=true,proxyAll=true,nodeNetworkPolicy=true", + proxyAll: true, + multicastEnabled: true, + nodeNetworkPolicyEnabled: true, + nodeNetworkPolicyConfig: nodeNetworkPolicyConfig, networkConfig: &config.NetworkConfig{ TrafficEncapMode: config.TrafficEncapModeEncap, TunnelType: ovsconfig.GeneveTunnel, @@ -285,6 +309,10 @@ func TestSyncIPTables(t *testing.T) { mockIPTables.AppendRule(iptables.ProtocolDual, iptables.NATTable, iptables.PreRoutingChain, []string{"-j", antreaPreRoutingChain, "-m", "comment", "--comment", "Antrea: jump to Antrea prerouting rules"}) mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.NATTable, antreaOutputChain) mockIPTables.AppendRule(iptables.ProtocolDual, iptables.NATTable, iptables.OutputChain, []string{"-j", antreaOutputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea output rules"}) + mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.FilterTable, antreaInputChain) + mockIPTables.AppendRule(iptables.ProtocolDual, iptables.FilterTable, iptables.InputChain, []string{"-j", antreaInputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea input rules"}) + mockIPTables.EnsureChain(iptables.ProtocolDual, iptables.FilterTable, antreaOutputChain) + mockIPTables.AppendRule(iptables.ProtocolDual, iptables.FilterTable, iptables.OutputChain, []string{"-j", antreaOutputChain, "-m", "comment", "--comment", "Antrea: jump to Antrea output rules"}) mockIPTables.Restore(`*raw :ANTREA-PREROUTING - [0:0] :ANTREA-OUTPUT - [0:0] @@ -299,8 +327,23 @@ COMMIT COMMIT *filter :ANTREA-FORWARD - [0:0] +:ANTREA-INPUT - [0:0] +:ANTREA-OUTPUT - [0:0] +:ANTREA-POL-PRI-INGRESS-RULES - [0:0] +:ANTREA-POL-PRI-EGRESS-RULES - [0:0] +:ANTREA-POL-INGRESS-RULES - [0:0] +:ANTREA-POL-EGRESS-RULES - [0:0] -A ANTREA-FORWARD -m comment --comment "Antrea: accept packets from local Pods" -i antrea-gw0 -j ACCEPT -A ANTREA-FORWARD -m comment --comment "Antrea: accept packets to local Pods" -o antrea-gw0 -j ACCEPT +-A ANTREA-INPUT -m comment --comment "Antrea: jump to privileged ingress NodeNetworkPolicy rules" -j ANTREA-POL-PRI-INGRESS-RULES +-A ANTREA-INPUT -m comment --comment "Antrea: jump to ingress NodeNetworkPolicy rules" -j ANTREA-POL-INGRESS-RULES +-A ANTREA-POL-PRI-INGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow ingress established or related packets" -j ACCEPT +-A ANTREA-POL-PRI-INGRESS-RULES -i lo -m comment --comment "Antrea: allow ingress packets from loopback" -j ACCEPT +-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to privileged egress NodeNetworkPolicy rules" -j ANTREA-POL-PRI-EGRESS-RULES +-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to egress NodeNetworkPolicy rules" -j ANTREA-POL-EGRESS-RULES +-A ANTREA-POL-PRI-EGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow egress established or related packets" -j ACCEPT +-A ANTREA-POL-PRI-EGRESS-RULES -o lo -m comment --comment "Antrea: allow egress packets to loopback" -j ACCEPT +-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule" COMMIT *nat :ANTREA-PREROUTING - [0:0] @@ -328,8 +371,23 @@ COMMIT COMMIT *filter :ANTREA-FORWARD - [0:0] +:ANTREA-INPUT - [0:0] +:ANTREA-OUTPUT - [0:0] +:ANTREA-POL-PRI-INGRESS-RULES - [0:0] +:ANTREA-POL-PRI-EGRESS-RULES - [0:0] +:ANTREA-POL-INGRESS-RULES - [0:0] +:ANTREA-POL-EGRESS-RULES - [0:0] -A ANTREA-FORWARD -m comment --comment "Antrea: accept packets from local Pods" -i antrea-gw0 -j ACCEPT -A ANTREA-FORWARD -m comment --comment "Antrea: accept packets to local Pods" -o antrea-gw0 -j ACCEPT +-A ANTREA-INPUT -m comment --comment "Antrea: jump to privileged ingress NodeNetworkPolicy rules" -j ANTREA-POL-PRI-INGRESS-RULES +-A ANTREA-INPUT -m comment --comment "Antrea: jump to ingress NodeNetworkPolicy rules" -j ANTREA-POL-INGRESS-RULES +-A ANTREA-POL-PRI-INGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow ingress established or related packets" -j ACCEPT +-A ANTREA-POL-PRI-INGRESS-RULES -i lo -m comment --comment "Antrea: allow ingress packets from loopback" -j ACCEPT +-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to privileged egress NodeNetworkPolicy rules" -j ANTREA-POL-PRI-EGRESS-RULES +-A ANTREA-OUTPUT -m comment --comment "Antrea: jump to egress NodeNetworkPolicy rules" -j ANTREA-POL-EGRESS-RULES +-A ANTREA-POL-PRI-EGRESS-RULES -m conntrack --ctstate ESTABLISHED,RELATED -m comment --comment "Antrea: allow egress established or related packets" -j ACCEPT +-A ANTREA-POL-PRI-EGRESS-RULES -o lo -m comment --comment "Antrea: allow egress packets to loopback" -j ACCEPT +-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule" COMMIT *nat :ANTREA-PREROUTING - [0:0] @@ -485,17 +543,24 @@ COMMIT ctrl := gomock.NewController(t) mockIPTables := iptablestest.NewMockInterface(ctrl) c := &Client{iptables: mockIPTables, - networkConfig: tt.networkConfig, - nodeConfig: tt.nodeConfig, - proxyAll: tt.proxyAll, - isCloudEKS: tt.isCloudEKS, - multicastEnabled: tt.multicastEnabled, - connectUplinkToBridge: tt.connectUplinkToBridge, - markToSNATIP: sync.Map{}, + networkConfig: tt.networkConfig, + nodeConfig: tt.nodeConfig, + proxyAll: tt.proxyAll, + isCloudEKS: tt.isCloudEKS, + multicastEnabled: tt.multicastEnabled, + connectUplinkToBridge: tt.connectUplinkToBridge, + nodeNetworkPolicyEnabled: tt.nodeNetworkPolicyEnabled, } for mark, snatIP := range tt.markToSNATIP { c.markToSNATIP.Store(mark, net.ParseIP(snatIP)) } + if tt.nodeNetworkPolicyEnabled { + c.initNodeNetworkPolicy() + c.nodeNetworkPolicyIPTablesIPv4.Store(config.NodeNetworkPolicyIngressRulesChain, []string{ + `-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"`}) + c.nodeNetworkPolicyIPTablesIPv6.Store(config.NodeNetworkPolicyIngressRulesChain, []string{ + `-A ANTREA-POL-INGRESS-RULES -j ACCEPT -m comment --comment "mock rule"`}) + } tt.expectedCalls(mockIPTables.EXPECT()) assert.NoError(t, c.syncIPTables()) }) @@ -1721,3 +1786,225 @@ func TestAddAndDeleteNodeIP(t *testing.T) { }) } } + +func TestAddAndDeleteNodeNetworkPolicyIPSet(t *testing.T) { + ipv4SetName := "TEST-IPSET-4" + ipv4Net1 := "1.1.1.1/32" + ipv4Net2 := "2.2.2.2/32" + ipv4Net3 := "3.3.3.3/32" + ipv6SetName := "TEST-IPSET-6" + ipv6Net1 := "fec0::1111/128" + ipv6Net2 := "fec0::2222/128" + ipv6Net3 := "fec0::3333/128" + + tests := []struct { + name string + ipsetName string + prevIPSetEntries sets.Set[string] + curIPSetEntries sets.Set[string] + isIPv6 bool + expectedCalls func(mockIPSet *ipsettest.MockInterfaceMockRecorder) + }{ + { + name: "IPv4, add an ipset and delete it", + ipsetName: ipv4SetName, + curIPSetEntries: sets.New[string](ipv4Net1, ipv4Net3), + isIPv6: false, + expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) { + mockIPSet.CreateIPSet(ipv4SetName, ipset.HashNet, false).Times(1) + mockIPSet.AddEntry(ipv4SetName, ipv4Net1).Times(1) + mockIPSet.AddEntry(ipv4SetName, ipv4Net3).Times(1) + mockIPSet.DestroyIPSet(ipv4SetName).Times(1) + }, + }, + { + name: "IPv4, update an ipset and delete it", + ipsetName: ipv4SetName, + prevIPSetEntries: sets.New[string](ipv4Net1, ipv4Net2), + curIPSetEntries: sets.New[string](ipv4Net1, ipv4Net3), + isIPv6: false, + expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) { + mockIPSet.CreateIPSet(ipv4SetName, ipset.HashNet, false).Times(1) + mockIPSet.AddEntry(ipv4SetName, ipv4Net3).Times(1) + mockIPSet.DelEntry(ipv4SetName, ipv4Net2).Times(1) + mockIPSet.DestroyIPSet(ipv4SetName).Times(1) + }, + }, + { + name: "IPv6, add an ipset and delete it", + ipsetName: ipv6SetName, + curIPSetEntries: sets.New[string](ipv6Net1, ipv6Net3), + isIPv6: true, + expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) { + mockIPSet.CreateIPSet(ipv6SetName, ipset.HashNet, true).Times(1) + mockIPSet.AddEntry(ipv6SetName, ipv6Net1).Times(1) + mockIPSet.AddEntry(ipv6SetName, ipv6Net3).Times(1) + mockIPSet.DestroyIPSet(ipv6SetName).Times(1) + }, + }, + { + name: "IPv6, update an ipset and delete it", + ipsetName: ipv6SetName, + prevIPSetEntries: sets.New[string](ipv6Net1, ipv6Net2), + curIPSetEntries: sets.New[string](ipv6Net1, ipv6Net3), + isIPv6: true, + expectedCalls: func(mockIPSet *ipsettest.MockInterfaceMockRecorder) { + mockIPSet.CreateIPSet(ipv6SetName, ipset.HashNet, true).Times(1) + mockIPSet.AddEntry(ipv6SetName, ipv6Net3).Times(1) + mockIPSet.DelEntry(ipv6SetName, ipv6Net2).Times(1) + mockIPSet.DestroyIPSet(ipv6SetName).Times(1) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + mockIPSet := ipsettest.NewMockInterface(ctrl) + c := &Client{ipset: mockIPSet} + tt.expectedCalls(mockIPSet.EXPECT()) + + if tt.prevIPSetEntries != nil { + if tt.isIPv6 { + c.nodeNetworkPolicyIPSetsIPv6.Store(tt.ipsetName, tt.prevIPSetEntries) + } else { + c.nodeNetworkPolicyIPSetsIPv4.Store(tt.ipsetName, tt.prevIPSetEntries) + } + } + + assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPSet(tt.ipsetName, tt.curIPSetEntries, tt.isIPv6)) + var exists bool + if tt.isIPv6 { + _, exists = c.nodeNetworkPolicyIPSetsIPv6.Load(tt.ipsetName) + } else { + _, exists = c.nodeNetworkPolicyIPSetsIPv4.Load(tt.ipsetName) + } + assert.True(t, exists) + + assert.NoError(t, c.DeleteNodeNetworkPolicyIPSet(tt.ipsetName, tt.isIPv6)) + if tt.isIPv6 { + _, exists = c.nodeNetworkPolicyIPSetsIPv6.Load(tt.ipsetName) + } else { + _, exists = c.nodeNetworkPolicyIPSetsIPv4.Load(tt.ipsetName) + } + assert.False(t, exists) + }) + } +} + +func TestAddAndDeleteNodeNetworkPolicyIPTables(t *testing.T) { + ingressChain := config.NodeNetworkPolicyIngressRulesChain + ingressRules := []string{ + "-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT", + } + svcChain := "ANTREA-POL-12619C0214FB0845" + svcRules := []string{ + "-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT", + "-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT", + } + + tests := []struct { + name string + isIPv6 bool + expectedCalls func(mockIPTables *iptablestest.MockInterfaceMockRecorder) + expectedRules map[string][]string + }{ + { + name: "IPv4", + isIPv6: false, + expectedCalls: func(mockIPTables *iptablestest.MockInterfaceMockRecorder) { + mockIPTables.Restore(`*filter +:ANTREA-POL-INGRESS-RULES - [0:0] +-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT +COMMIT +`, false, false) + mockIPTables.Restore(`*filter +:ANTREA-POL-12619C0214FB0845 - [0:0] +-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT +-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT +COMMIT +`, false, false) + mockIPTables.DeleteChain(iptables.ProtocolIPv4, iptables.FilterTable, svcChain).Times(1) + mockIPTables.Restore(`*filter +:ANTREA-POL-INGRESS-RULES - [0:0] +COMMIT +`, false, false) + }, + }, + + { + name: "IPv6", + isIPv6: true, + expectedCalls: func(mockIPTables *iptablestest.MockInterfaceMockRecorder) { + mockIPTables.Restore(`*filter +:ANTREA-POL-INGRESS-RULES - [0:0] +-A ANTREA-POL-INGRESS-RULES -p tcp --dport 80 -j ACCEPT +COMMIT +`, false, true) + mockIPTables.Restore(`*filter +:ANTREA-POL-12619C0214FB0845 - [0:0] +-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 80 -j ACCEPT +-A ANTREA-POL-12619C0214FB0845 -p tcp --dport 443 -j ACCEPT +COMMIT +`, false, true) + mockIPTables.DeleteChain(iptables.ProtocolIPv6, iptables.FilterTable, svcChain).Times(1) + mockIPTables.Restore(`*filter +:ANTREA-POL-INGRESS-RULES - [0:0] +COMMIT +`, false, true) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + mockIPTables := iptablestest.NewMockInterface(ctrl) + c := &Client{iptables: mockIPTables, + networkConfig: &config.NetworkConfig{ + IPv4Enabled: true, + IPv6Enabled: true, + }, + } + c.initNodeNetworkPolicy() + + tt.expectedCalls(mockIPTables.EXPECT()) + + assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{ingressChain}, [][]string{ingressRules}, tt.isIPv6)) + var gotRules any + var exists bool + if tt.isIPv6 { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(ingressChain) + } else { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(ingressChain) + } + assert.True(t, exists) + assert.EqualValues(t, ingressRules, gotRules) + + assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{svcChain}, [][]string{svcRules}, tt.isIPv6)) + if tt.isIPv6 { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(svcChain) + } else { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(svcChain) + } + assert.True(t, exists) + assert.EqualValues(t, svcRules, gotRules) + + assert.NoError(t, c.DeleteNodeNetworkPolicyIPTables([]string{svcChain}, tt.isIPv6)) + if tt.isIPv6 { + _, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(svcChain) + } else { + _, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(svcChain) + } + assert.False(t, exists) + + assert.NoError(t, c.AddOrUpdateNodeNetworkPolicyIPTables([]string{ingressChain}, [][]string{nil}, tt.isIPv6)) + if tt.isIPv6 { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv6.Load(ingressChain) + } else { + gotRules, exists = c.nodeNetworkPolicyIPTablesIPv4.Load(ingressChain) + } + assert.True(t, exists) + assert.EqualValues(t, []string(nil), gotRules) + }) + } +} diff --git a/pkg/agent/route/route_windows.go b/pkg/agent/route/route_windows.go index 7a67d6475b7..487df061708 100644 --- a/pkg/agent/route/route_windows.go +++ b/pkg/agent/route/route_windows.go @@ -71,7 +71,13 @@ type Client struct { } // NewClient returns a route client. -func NewClient(networkConfig *config.NetworkConfig, noSNAT, proxyAll, connectUplinkToBridge, multicastEnabled bool, serviceCIDRProvider servicecidr.Interface) (*Client, error) { +func NewClient(networkConfig *config.NetworkConfig, + noSNAT bool, + proxyAll bool, + connectUplinkToBridge bool, + nodeNetworkPolicyEnabled bool, + multicastEnabled bool, + serviceCIDRProvider servicecidr.Interface) (*Client, error) { return &Client{ networkConfig: networkConfig, nodeRoutes: &sync.Map{}, @@ -573,3 +579,19 @@ func (c *Client) DeleteRouteForLink(dstCIDR *net.IPNet, linkIndex int) error { func (c *Client) ClearConntrackEntryForService(svcIP net.IP, svcPort uint16, endpointIP net.IP, protocol binding.Protocol) error { return errors.New("ClearConntrackEntryForService is not implemented on Windows") } + +func (c *Client) AddOrUpdateNodeNetworkPolicyIPSet(ipsetName string, ipsetEntries sets.Set[string], isIPv6 bool) error { + return errors.New("AddOrUpdateNodeNetworkPolicyIPSet is not implemented on Windows") +} + +func (c *Client) DeleteNodeNetworkPolicyIPSet(ipsetName string, isIPv6 bool) error { + return errors.New("DeleteNodeNetworkPolicyIPSet is not implemented on Windows") +} + +func (c *Client) AddOrUpdateNodeNetworkPolicyIPTables(iptablesChains []string, iptablesRules [][]string, isIPv6 bool) error { + return errors.New("AddOrUpdateNodeNetworkPolicyIPTables is not implemented on Windows") +} + +func (c *Client) DeleteNodeNetworkPolicyIPTables(iptablesChains []string, isIPv6 bool) error { + return errors.New("DeleteNodeNetworkPolicyIPTables is not implemented on Windows") +} diff --git a/pkg/agent/route/route_windows_test.go b/pkg/agent/route/route_windows_test.go index c29afd469dc..516e019838b 100644 --- a/pkg/agent/route/route_windows_test.go +++ b/pkg/agent/route/route_windows_test.go @@ -61,7 +61,7 @@ func TestRouteOperation(t *testing.T) { gwIP2 := net.ParseIP("192.168.3.1") _, destCIDR2, _ := net.ParseCIDR(dest2) - client, err := NewClient(&config.NetworkConfig{}, true, false, false, false, nil) + client, err := NewClient(&config.NetworkConfig{}, true, false, false, false, false, nil) require.Nil(t, err) called := false diff --git a/pkg/agent/route/testing/mock_route.go b/pkg/agent/route/testing/mock_route.go index c6a3e39921f..2f1999fd0e5 100644 --- a/pkg/agent/route/testing/mock_route.go +++ b/pkg/agent/route/testing/mock_route.go @@ -30,6 +30,7 @@ import ( config "antrea.io/antrea/pkg/agent/config" openflow "antrea.io/antrea/pkg/ovs/openflow" gomock "go.uber.org/mock/gomock" + sets "k8s.io/apimachinery/pkg/util/sets" ) // MockInterface is a mock of Interface interface. @@ -97,6 +98,34 @@ func (mr *MockInterfaceMockRecorder) AddNodePort(arg0, arg1, arg2 any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNodePort", reflect.TypeOf((*MockInterface)(nil).AddNodePort), arg0, arg1, arg2) } +// AddOrUpdateNodeNetworkPolicyIPSet mocks base method. +func (m *MockInterface) AddOrUpdateNodeNetworkPolicyIPSet(arg0 string, arg1 sets.Set[string], arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddOrUpdateNodeNetworkPolicyIPSet", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddOrUpdateNodeNetworkPolicyIPSet indicates an expected call of AddOrUpdateNodeNetworkPolicyIPSet. +func (mr *MockInterfaceMockRecorder) AddOrUpdateNodeNetworkPolicyIPSet(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateNodeNetworkPolicyIPSet", reflect.TypeOf((*MockInterface)(nil).AddOrUpdateNodeNetworkPolicyIPSet), arg0, arg1, arg2) +} + +// AddOrUpdateNodeNetworkPolicyIPTables mocks base method. +func (m *MockInterface) AddOrUpdateNodeNetworkPolicyIPTables(arg0 []string, arg1 [][]string, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddOrUpdateNodeNetworkPolicyIPTables", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddOrUpdateNodeNetworkPolicyIPTables indicates an expected call of AddOrUpdateNodeNetworkPolicyIPTables. +func (mr *MockInterfaceMockRecorder) AddOrUpdateNodeNetworkPolicyIPTables(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateNodeNetworkPolicyIPTables", reflect.TypeOf((*MockInterface)(nil).AddOrUpdateNodeNetworkPolicyIPTables), arg0, arg1, arg2) +} + // AddRouteForLink mocks base method. func (m *MockInterface) AddRouteForLink(arg0 *net.IPNet, arg1 int) error { m.ctrl.T.Helper() @@ -181,6 +210,34 @@ func (mr *MockInterfaceMockRecorder) DeleteLocalAntreaFlexibleIPAMPodRule(arg0 a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLocalAntreaFlexibleIPAMPodRule", reflect.TypeOf((*MockInterface)(nil).DeleteLocalAntreaFlexibleIPAMPodRule), arg0) } +// DeleteNodeNetworkPolicyIPSet mocks base method. +func (m *MockInterface) DeleteNodeNetworkPolicyIPSet(arg0 string, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNodeNetworkPolicyIPSet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNodeNetworkPolicyIPSet indicates an expected call of DeleteNodeNetworkPolicyIPSet. +func (mr *MockInterfaceMockRecorder) DeleteNodeNetworkPolicyIPSet(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeNetworkPolicyIPSet", reflect.TypeOf((*MockInterface)(nil).DeleteNodeNetworkPolicyIPSet), arg0, arg1) +} + +// DeleteNodeNetworkPolicyIPTables mocks base method. +func (m *MockInterface) DeleteNodeNetworkPolicyIPTables(arg0 []string, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNodeNetworkPolicyIPTables", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNodeNetworkPolicyIPTables indicates an expected call of DeleteNodeNetworkPolicyIPTables. +func (mr *MockInterfaceMockRecorder) DeleteNodeNetworkPolicyIPTables(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeNetworkPolicyIPTables", reflect.TypeOf((*MockInterface)(nil).DeleteNodeNetworkPolicyIPTables), arg0, arg1) +} + // DeleteNodePort mocks base method. func (m *MockInterface) DeleteNodePort(arg0 []net.IP, arg1 uint16, arg2 openflow.Protocol) error { m.ctrl.T.Helper() diff --git a/pkg/agent/types/networkpolicy.go b/pkg/agent/types/networkpolicy.go index 7722941cfcb..22aa1c4a68c 100644 --- a/pkg/agent/types/networkpolicy.go +++ b/pkg/agent/types/networkpolicy.go @@ -15,6 +15,8 @@ package types import ( + "k8s.io/apimachinery/pkg/util/sets" + "antrea.io/antrea/pkg/apis/controlplane/v1beta2" secv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" binding "antrea.io/antrea/pkg/ovs/openflow" @@ -75,6 +77,18 @@ type Address interface { GetValue() interface{} } +type NodePolicyRule struct { + IPSet string + IPSetMembers sets.Set[string] + IPNet string + Priority *Priority + ServiceIPTChain string + ServiceIPTRules []string + CoreIPTChain string + CoreIPTRule string + IsIPv6 bool +} + // PolicyRule groups configurations to set up conjunctive match for egress/ingress policy rules. type PolicyRule struct { Direction v1beta2.Direction diff --git a/pkg/agent/util/iptables/builder.go b/pkg/agent/util/iptables/builder.go new file mode 100644 index 00000000000..31946c989ad --- /dev/null +++ b/pkg/agent/util/iptables/builder.go @@ -0,0 +1,212 @@ +//go:build !windows +// +build !windows + +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +type iptablesRule struct { + chain string + + specs *strings.Builder +} + +type iptablesRuleBuilder struct { + iptablesRule +} + +func NewRuleBuilder(chain string) IPTablesRuleBuilder { + builder := &iptablesRuleBuilder{ + iptablesRule{ + chain: chain, + specs: &strings.Builder{}, + }, + } + return builder +} + +func (b *iptablesRuleBuilder) writeSpec(spec string) { + b.specs.WriteString(spec) + b.specs.WriteByte(' ') +} + +func (b *iptablesRuleBuilder) MatchCIDRSrc(cidr string) IPTablesRuleBuilder { + if cidr == "" || cidr == "0.0.0.0/0" || cidr == "::/0" { + return b + } + matchStr := fmt.Sprintf("-s %s", cidr) + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchCIDRDst(cidr string) IPTablesRuleBuilder { + if cidr == "" || cidr == "0.0.0.0/0" || cidr == "::/0" { + return b + } + matchStr := fmt.Sprintf("-d %s", cidr) + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchIPSetSrc(ipset string) IPTablesRuleBuilder { + if ipset == "" { + return b + } + matchStr := fmt.Sprintf("-m set --match-set %s src", ipset) + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchIPSetDst(ipset string) IPTablesRuleBuilder { + if ipset == "" { + return b + } + matchStr := fmt.Sprintf("-m set --match-set %s dst", ipset) + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchTransProtocol(protocol string) IPTablesRuleBuilder { + if protocol == "" { + return b + } + matchStr := fmt.Sprintf("-p %s", protocol) + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchDstPort(port *intstr.IntOrString, endPort *int32) IPTablesRuleBuilder { + if port == nil { + return b + } + var matchStr string + if endPort != nil { + matchStr = fmt.Sprintf("--dport %s:%d", port.String(), *endPort) + } else { + matchStr = fmt.Sprintf("--dport %s", port.String()) + } + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchSrcPort(port, endPort *int32) IPTablesRuleBuilder { + if port == nil { + return b + } + var matchStr string + if endPort != nil { + matchStr = fmt.Sprintf("--sport %d:%d", *port, *endPort) + } else { + matchStr = fmt.Sprintf("--sport %d", *port) + } + b.writeSpec(matchStr) + return b +} + +func (b *iptablesRuleBuilder) MatchICMP(icmpType, icmpCode *int32, ipProtocol Protocol) IPTablesRuleBuilder { + parts := []string{"-p"} + icmpTypeStr := "icmp" + if ipProtocol != ProtocolIPv4 { + icmpTypeStr = "icmpv6" + } + parts = append(parts, icmpTypeStr) + + if icmpType != nil { + icmpTypeFlag := "--icmp-type" + if ipProtocol != ProtocolIPv4 { + icmpTypeFlag = "--icmpv6-type" + } + + if icmpCode != nil { + parts = append(parts, icmpTypeFlag, fmt.Sprintf("%d/%d", *icmpType, *icmpCode)) + } else { + parts = append(parts, icmpTypeFlag, strconv.Itoa(int(*icmpType))) + } + } + b.writeSpec(strings.Join(parts, " ")) + return b +} + +func (b *iptablesRuleBuilder) MatchEstablishedOrRelated() IPTablesRuleBuilder { + b.writeSpec("-m conntrack --ctstate ESTABLISHED,RELATED") + return b +} + +func (b *iptablesRuleBuilder) MatchInputInterface(interfaceName string) IPTablesRuleBuilder { + if interfaceName == "" { + return b + } + specStr := fmt.Sprintf("-i %s", interfaceName) + b.writeSpec(specStr) + return b +} + +func (b *iptablesRuleBuilder) MatchOutputInterface(interfaceName string) IPTablesRuleBuilder { + if interfaceName == "" { + return b + } + specStr := fmt.Sprintf("-o %s", interfaceName) + b.writeSpec(specStr) + return b +} + +func (b *iptablesRuleBuilder) SetTarget(target string) IPTablesRuleBuilder { + if target == "" { + return b + } + targetStr := fmt.Sprintf("-j %s", target) + b.writeSpec(targetStr) + return b +} + +func (b *iptablesRuleBuilder) SetComment(comment string) IPTablesRuleBuilder { + if comment == "" { + return b + } + + commentStr := fmt.Sprintf("-m comment --comment \"%s\"", comment) + b.writeSpec(commentStr) + return b +} + +func (b *iptablesRuleBuilder) CopyBuilder() IPTablesRuleBuilder { + var copiedSpec strings.Builder + copiedSpec.Grow(b.specs.Len()) + copiedSpec.WriteString(b.specs.String()) + builder := &iptablesRuleBuilder{ + iptablesRule{ + chain: b.chain, + specs: &copiedSpec, + }, + } + return builder +} + +func (b *iptablesRuleBuilder) Done() IPTablesRule { + return &b.iptablesRule +} + +func (e *iptablesRule) GetRule() string { + ruleStr := fmt.Sprintf("-A %s %s", e.chain, e.specs.String()) + return ruleStr[:len(ruleStr)-1] +} diff --git a/pkg/agent/util/iptables/builder_test.go b/pkg/agent/util/iptables/builder_test.go new file mode 100644 index 00000000000..c3da571a9a9 --- /dev/null +++ b/pkg/agent/util/iptables/builder_test.go @@ -0,0 +1,135 @@ +//go:build !windows +// +build !windows + +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var ( + ipsetAlfa = "alfa" + ipsetBravo = "bravo" + eth0 = "eth0" + eth1 = "eth1" + port8080 = &intstr.IntOrString{Type: intstr.Int, IntVal: 8080} + port137 = &intstr.IntOrString{Type: intstr.Int, IntVal: 137} + port139 = int32(139) + port40000 = int32(40000) + port50000 = int32(50000) + icmpType0 = int32(0) + icmpCode0 = int32(0) + cidr = "192.168.1.0/24" +) + +func TestBuilders(t *testing.T) { + testCases := []struct { + name string + chain string + buildFunc func(IPTablesRuleBuilder) IPTablesRule + expected string + }{ + { + name: "Accept TCP destination 8080 in FORWARD", + chain: ForwardChain, + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchIPSetSrc(ipsetAlfa). + MatchIPSetDst(ipsetBravo). + MatchInputInterface(eth0). + MatchTransProtocol(ProtocolTCP). + MatchDstPort(port8080, nil). + MatchCIDRSrc(cidr). + SetComment("Accept TCP 8080"). + SetTarget(AcceptTarget). + Done() + }, + expected: `-A FORWARD -m set --match-set alfa src -m set --match-set bravo dst -i eth0 -p tcp --dport 8080 -s 192.168.1.0/24 -m comment --comment "Accept TCP 8080" -j ACCEPT`, + }, + { + name: "Drop UDP destination 137-139 in INPUT", + chain: "INPUT", + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchIPSetSrc(ipsetAlfa). + MatchInputInterface(eth0). + MatchTransProtocol(ProtocolUDP). + MatchDstPort(port137, &port139). + MatchCIDRDst(cidr). + SetComment("Drop UDP 137-139"). + SetTarget(DropTarget). + Done() + }, + expected: `-A INPUT -m set --match-set alfa src -i eth0 -p udp --dport 137:139 -d 192.168.1.0/24 -m comment --comment "Drop UDP 137-139" -j DROP`, + }, + { + name: "Reject SCTP source 40000-50000 in OUTPUT", + chain: OutputChain, + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchOutputInterface(eth1). + MatchTransProtocol(ProtocolSCTP). + MatchSrcPort(&port40000, &port50000). + SetComment("Drop SCTP 40000-50000"). + SetTarget(DropTarget). + Done() + }, + expected: `-A OUTPUT -o eth1 -p sctp --sport 40000:50000 -m comment --comment "Drop SCTP 40000-50000" -j DROP`, + }, + { + name: "Accept ICMP IPv4", + chain: ForwardChain, + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchInputInterface(eth0). + MatchICMP(&icmpType0, &icmpCode0, ProtocolIPv4). + SetTarget(AcceptTarget). + Done() + }, + expected: `-A FORWARD -i eth0 -p icmp --icmp-type 0/0 -j ACCEPT`, + }, + { + name: "Accept ICMP IPv6", + chain: ForwardChain, + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchInputInterface(eth0). + MatchICMP(&icmpType0, nil, ProtocolIPv6). + SetTarget(AcceptTarget). + Done() + }, + expected: `-A FORWARD -i eth0 -p icmpv6 --icmpv6-type 0 -j ACCEPT`, + }, + { + name: "Accept packets of established TCP connections", + chain: InputChain, + buildFunc: func(builder IPTablesRuleBuilder) IPTablesRule { + return builder.MatchTransProtocol(ProtocolTCP). + MatchEstablishedOrRelated(). + SetTarget(AcceptTarget). + Done() + }, + expected: `-A INPUT -p tcp -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + builder := NewRuleBuilder(tc.chain) + rule := tc.buildFunc(builder) + assert.Equal(t, tc.expected, rule.GetRule()) + }) + } +} diff --git a/pkg/agent/util/iptables/iptables.go b/pkg/agent/util/iptables/iptables.go index 9514c16008d..d436f80afbd 100644 --- a/pkg/agent/util/iptables/iptables.go +++ b/pkg/agent/util/iptables/iptables.go @@ -26,6 +26,7 @@ import ( "github.com/blang/semver" "github.com/coreos/go-iptables/iptables" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" ) @@ -36,7 +37,7 @@ const ( RawTable = "raw" AcceptTarget = "ACCEPT" - DROPTarget = "DROP" + DropTarget = "DROP" MasqueradeTarget = "MASQUERADE" MarkTarget = "MARK" ReturnTarget = "RETURN" @@ -44,8 +45,10 @@ const ( NoTrackTarget = "NOTRACK" SNATTarget = "SNAT" DNATTarget = "DNAT" + RejectTarget = "REJECT" PreRoutingChain = "PREROUTING" + InputChain = "INPUT" ForwardChain = "FORWARD" PostRoutingChain = "POSTROUTING" OutputChain = "OUTPUT" @@ -71,6 +74,14 @@ const ( ProtocolIPv6 ) +const ( + ProtocolTCP = "tcp" + ProtocolUDP = "udp" + ProtocolSCTP = "sctp" + ProtocolICMP = "icmp" + ProtocolICMPv6 = "icmp6" +) + // https://netfilter.org/projects/iptables/files/changes-iptables-1.6.2.txt: // iptables-restore: support acquiring the lock. var restoreWaitSupportedMinVersion = semver.Version{Major: 1, Minor: 6, Patch: 2} @@ -95,6 +106,28 @@ type Interface interface { Save() ([]byte, error) } +type IPTablesRuleBuilder interface { + MatchCIDRSrc(cidr string) IPTablesRuleBuilder + MatchCIDRDst(cidr string) IPTablesRuleBuilder + MatchIPSetSrc(ipset string) IPTablesRuleBuilder + MatchIPSetDst(ipset string) IPTablesRuleBuilder + MatchTransProtocol(protocol string) IPTablesRuleBuilder + MatchDstPort(port *intstr.IntOrString, endPort *int32) IPTablesRuleBuilder + MatchSrcPort(port, endPort *int32) IPTablesRuleBuilder + MatchICMP(icmpType, icmpCode *int32, ipProtocol Protocol) IPTablesRuleBuilder + MatchEstablishedOrRelated() IPTablesRuleBuilder + MatchInputInterface(interfaceName string) IPTablesRuleBuilder + MatchOutputInterface(interfaceName string) IPTablesRuleBuilder + SetTarget(target string) IPTablesRuleBuilder + SetComment(comment string) IPTablesRuleBuilder + CopyBuilder() IPTablesRuleBuilder + Done() IPTablesRule +} + +type IPTablesRule interface { + GetRule() string +} + type Client struct { ipts map[Protocol]*iptables.IPTables // restoreWaitSupported indicates whether iptables-restore (or ip6tables-restore) supports --wait flag. @@ -352,3 +385,7 @@ func (c *Client) Save() ([]byte, error) { func MakeChainLine(chain string) string { return fmt.Sprintf(":%s - [0:0]", chain) } + +func IsIPv6Protocol(protocol Protocol) bool { + return protocol == ProtocolIPv6 +} diff --git a/pkg/apiserver/handlers/featuregates/handler_test.go b/pkg/apiserver/handlers/featuregates/handler_test.go index e7a3d9c1f76..35be12220c0 100644 --- a/pkg/apiserver/handlers/featuregates/handler_test.go +++ b/pkg/apiserver/handlers/featuregates/handler_test.go @@ -66,6 +66,7 @@ func Test_getGatesResponse(t *testing.T) { {Component: "agent", Name: "Multicast", Status: multicastStatus, Version: "BETA"}, {Component: "agent", Name: "Multicluster", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "NetworkPolicyStats", Status: "Enabled", Version: "BETA"}, + {Component: "agent", Name: "NodeNetworkPolicy", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "NodePortLocal", Status: "Enabled", Version: "GA"}, {Component: "agent", Name: "SecondaryNetwork", Status: "Disabled", Version: "ALPHA"}, {Component: "agent", Name: "ServiceExternalIP", Status: "Disabled", Version: "ALPHA"}, @@ -198,6 +199,7 @@ func Test_getControllerGatesResponse(t *testing.T) { {Component: "controller", Name: "Multicluster", Status: "Disabled", Version: "ALPHA"}, {Component: "controller", Name: "NetworkPolicyStats", Status: "Enabled", Version: "BETA"}, {Component: "controller", Name: "NodeIPAM", Status: "Enabled", Version: "BETA"}, + {Component: "controller", Name: "NodeNetworkPolicy", Status: "Disabled", Version: "ALPHA"}, {Component: "controller", Name: "ServiceExternalIP", Status: "Disabled", Version: "ALPHA"}, {Component: "controller", Name: "SupportBundleCollection", Status: "Disabled", Version: "ALPHA"}, {Component: "controller", Name: "Traceflow", Status: "Enabled", Version: "BETA"}, diff --git a/pkg/config/agent/config.go b/pkg/config/agent/config.go index ed57bd00db1..f8d8694d77a 100644 --- a/pkg/config/agent/config.go +++ b/pkg/config/agent/config.go @@ -205,6 +205,8 @@ type AgentConfig struct { // second(pps) and the burst size will be automatically set to twice the rate. // When the rate and burst size are exceeded, new packets will be dropped. PacketInRate int `yaml:"packetInRate,omitempty"` + // NodeNetworkPolicy includes the privileged ingress and egress rules for NodeNetworkPolicy. + NodeNetworkPolicy NodeNetworkPolicyConfig `yaml:"nodeNetworkPolicy,omitempty"` } type AntreaProxyConfig struct { @@ -404,3 +406,23 @@ type OVSBridgeConfig struct { // only a single physical interface is supported. PhysicalInterfaces []string `yaml:"physicalInterfaces,omitempty"` } + +type PrivilegedRule struct { + // The direction value can be "ingress" or "egress". + Direction string `yaml:"direction,omitempty"` + // The IP families of the rule. Supported values are "ipv4", "ipv6" and "" (both). + IPFamilies string `yaml:"ipFamily,omitempty"` + // The protocol which traffic must match. Supported values are "tcp", "udp", "" (both). + Protocol string `yaml:"protocol,omitempty"` + // CIDR marks the destination CIDR for egress and source CIDR for ingress. It can be "" which means allow all addresses. + CIDR string `json:"cidr,omitempty"` + // The destination port list of the given protocol. It can be nil which means allow all ports. + Ports []string `yaml:"ports,omitempty"` + // Description is the explanation of the rule. + Description string `yaml:"description,omitempty"` +} + +// NodeNetworkPolicyConfig includes the privileged rules. +type NodeNetworkPolicyConfig struct { + PrivilegedRules []PrivilegedRule `yaml:"privilegedRules,omitempty"` +} diff --git a/pkg/features/antrea_features.go b/pkg/features/antrea_features.go index e2b5cb801c8..620d6ed1d2f 100644 --- a/pkg/features/antrea_features.go +++ b/pkg/features/antrea_features.go @@ -146,6 +146,10 @@ const ( // alpha: v1.14 // Enable Egress traffic shaping. EgressTrafficShaping featuregate.Feature = "EgressTrafficShaping" + + // alpha: v1.15 + // Allow users to protect their Kubernetes Nodes. + NodeNetworkPolicy featuregate.Feature = "NodeNetworkPolicy" ) var ( @@ -184,6 +188,7 @@ var ( LoadBalancerModeDSR: {Default: false, PreRelease: featuregate.Alpha}, AdminNetworkPolicy: {Default: false, PreRelease: featuregate.Alpha}, EgressTrafficShaping: {Default: false, PreRelease: featuregate.Alpha}, + NodeNetworkPolicy: {Default: false, PreRelease: featuregate.Alpha}, } // AgentGates consists of all known feature gates for the Antrea Agent. @@ -211,6 +216,7 @@ var ( Traceflow, TrafficControl, EgressTrafficShaping, + NodeNetworkPolicy, ) // ControllerGates consists of all known feature gates for the Antrea Controller. @@ -229,6 +235,7 @@ var ( ServiceExternalIP, SupportBundleCollection, Traceflow, + NodeNetworkPolicy, ) // UnsupportedFeaturesOnWindows records the features not supported on @@ -255,6 +262,7 @@ var ( LoadBalancerModeDSR: {}, CleanupStaleUDPSvcConntrack: {}, EgressTrafficShaping: {}, + NodeNetworkPolicy: {}, } // supportedFeaturesOnExternalNode records the features supported on an external // Node. Antrea Agent checks the enabled features if it is running on an diff --git a/test/e2e/antreaipam_anp_test.go b/test/e2e/antreaipam_anp_test.go index e5c0ef8ce02..b6fd4a162d6 100644 --- a/test/e2e/antreaipam_anp_test.go +++ b/test/e2e/antreaipam_anp_test.go @@ -44,7 +44,6 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { // It should be empty every time when "initializeAntreaIPAM" is performed, otherwise there will be unexpected // results. allPods = []Pod{} - podsByNamespace = make(map[string][]Pod) for _, ns := range antreaIPAMNamespaces { namespaces[ns] = ns @@ -53,7 +52,6 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { for _, podName := range pods { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) } } @@ -61,9 +59,9 @@ func initializeAntreaIPAM(t *testing.T, data *TestData) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true) + _, err = k8sUtils.Bootstrap(regularNamespaces, pods, true, nil, nil) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, false) + ips, err := k8sUtils.Bootstrap(namespaces, pods, false, nil, nil) failOnError(err, t) podIPs = ips } @@ -195,18 +193,18 @@ func testAntreaIPAMACNP(t *testing.T, protocol e2eutils.AntreaPolicyProtocol, ac SetAppliedToGroup([]e2eutils.ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "c"}}}) if isIngress { builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) builder2.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) builder3.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) } else { builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) builder2.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) builder3.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, - nil, nil, false, nil, ruleAction, "", "", nil) + nil, nil, nil, nil, false, nil, ruleAction, "", "", nil) } reachability := NewReachability(allPods, action) diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index 27c5ae0cb28..ef3d78ee252 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -47,13 +47,13 @@ import ( // common for all tests. var ( allPods []Pod - podsByNamespace map[string][]Pod k8sUtils *KubernetesUtils allTestList []*TestCase pods []string namespaces map[string]string podIPs map[string][]string p80, p81, p8080, p8081, p8082, p8085, p6443 int32 + nodes map[string]string ) const ( @@ -125,12 +125,10 @@ func initialize(t *testing.T, data *TestData) { // It should be empty every time when "initialize" is performed, otherwise there will be unexpected // results. allPods = []Pod{} - podsByNamespace = make(map[string][]Pod) for _, podName := range pods { for _, ns := range namespaces { allPods = append(allPods, NewPod(ns, podName)) - podsByNamespace[ns] = append(podsByNamespace[ns], NewPod(ns, podName)) } } skipIfAntreaPolicyDisabled(t) @@ -139,7 +137,7 @@ func initialize(t *testing.T, data *TestData) { // k8sUtils is a global var k8sUtils, err = NewKubernetesUtils(data) failOnError(err, t) - ips, err := k8sUtils.Bootstrap(namespaces, pods, true) + ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nil, nil) failOnError(err, t) podIPs = ips } @@ -242,14 +240,14 @@ func testUpdateValidationInvalidACNP(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(1.0) builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err != nil { failOnError(fmt.Errorf("create ACNP acnp-applied-to-update failed: %v", err), t) } builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, nil, - nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) + nil, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}, crdv1beta1.RuleActionAllow, "", "", nil) acnp = builder.Get() if _, err := k8sUtils.CreateOrUpdateACNP(acnp); err == nil { // Above update of ACNP must fail as it is an invalid spec. @@ -407,8 +405,8 @@ func testACNPAllowXBtoA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-a"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Connected) @@ -445,22 +443,22 @@ func testACNPSourcePort(t *testing.T) { builder = builder.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-source-port"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) @@ -512,8 +510,8 @@ func testACNPAllowXBtoYA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-ya"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Connected) @@ -544,15 +542,15 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { builder1 = builder1.SetName("acnp-priority2"). SetPriority(2). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority1"). SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence. reachabilityBothACNP := NewReachability(allPods, Dropped) @@ -595,10 +593,10 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol builder = builder.SetName("acnp-allow-x-ingress-y-egress-z"). SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) testStep := []*TestStep{ @@ -632,8 +630,8 @@ func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { builder = builder.SetName("acnp-deny-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -665,7 +663,7 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { builder = builder.SetName("acnp-deny-ingress-to-x"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "drop-all-ingress", nil) reachability := NewReachability(allPods, Connected) @@ -696,8 +694,8 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) reachability1.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) @@ -749,8 +747,8 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-ya-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) @@ -787,7 +785,7 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["y"]}}}) builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) reachability.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["y"]+"/a"), Dropped) @@ -818,8 +816,8 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-a-to-z"). SetPriority(1.0) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -853,8 +851,8 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -890,8 +888,8 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -941,8 +939,8 @@ func testACNPClusterGroupUpdate(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -991,8 +989,8 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ @@ -1039,8 +1037,8 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { NSSelector: map[string]string{"ns": namespaces["x"]}, }, }) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) cp := []*CustomProbe{ { SourcePod: CustomPod{ @@ -1115,10 +1113,10 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { NSSelector: map[string]string{"ns": namespaces["y"]}, }, }) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) reachability := NewReachability(allPods, Connected) reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) @@ -1718,8 +1716,8 @@ func testBaselineNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) // create a K8s NetworkPolicy for Pods in namespace x to allow ingress traffic from Pods in the same namespace, @@ -1762,7 +1760,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { time.Sleep(networkPolicyDelay) } -// testACNPPriorityOverride tests priority overriding in three Policies. Those three Policies are applied in a specific order to +// testACNPPriorityOverride tests priority overriding in three ACNPs. Those three ACNPs are applied in a specific order to // test priority reassignment, and each controls a smaller set of traffic patterns as priority increases. func testACNPPriorityOverride(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} @@ -1770,24 +1768,24 @@ func testACNPPriorityOverride(t *testing.T) { SetPriority(1.001). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority2"). SetPriority(1.002). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-priority3"). SetPriority(1.003). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) @@ -1836,8 +1834,8 @@ func testACNPPriorityOverride(t *testing.T) { executeTests(t, testCase) } -// testACNPTierOverride tests tier priority overriding in three Policies. -// Each ACNP controls a smaller set of traffic patterns as tier priority increases. +// testACNPTierOverride tests tier priority overriding in three ACNPs. Each ACNP controls a smaller set of traffic patterns +// as tier priority increases. func testACNPTierOverride(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-tier-emergency"). @@ -1845,8 +1843,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-securityops"). @@ -1854,8 +1852,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(10). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} builder3 = builder3.SetName("acnp-tier-application"). @@ -1863,8 +1861,8 @@ func testACNPTierOverride(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) @@ -1912,8 +1910,8 @@ func testACNPTierOverride(t *testing.T) { executeTests(t, testCase) } -// testACNPTierOverride tests tier priority overriding in three Policies with custom created tiers. -// Each ACNP controls a smaller set of traffic patterns as tier priority increases. +// testACNPTierOverride tests tier priority overriding in three ACNPs with custom created tiers. Each ACNP controls a +// smaller set of traffic patterns as tier priority increases. func testACNPCustomTiers(t *testing.T) { k8sUtils.DeleteTier("high-priority") k8sUtils.DeleteTier("low-priority") @@ -1929,8 +1927,8 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-tier-low"). @@ -1938,8 +1936,8 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) reachabilityTwoACNPs.Expect(Pod(namespaces["z"]+"/a"), Pod(namespaces["x"]+"/b"), Dropped) @@ -1963,7 +1961,7 @@ func testACNPCustomTiers(t *testing.T) { {"ACNP Custom Tier priority", testStepTwoACNP}, } executeTests(t, testCase) - // Cleanup customed tiers. ACNPs created in those tiers need to be deleted first. + // Cleanup customized tiers. ACNPs created in those tiers need to be deleted first. failOnError(k8sUtils.CleanACNPs(), t) failOnError(k8sUtils.DeleteTier("high-priority"), t) failOnError(k8sUtils.DeleteTier("low-priority"), t) @@ -1977,8 +1975,8 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 = builder1.SetName("acnp-drop"). SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-allow"). @@ -1986,8 +1984,8 @@ func testACNPPriorityConflictingRule(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) reachabilityBothACNP.ExpectEgressToNamespace(Pod(namespaces["z"]+"/a"), namespaces["x"], Dropped) @@ -2010,30 +2008,30 @@ func testACNPPriorityConflictingRule(t *testing.T) { executeTests(t, testCase) } -// testACNPPriorityConflictingRule tests that if there are two rules in the cluster that conflicts with -// each other, the rule with higher precedence will prevail. +// testACNPRulePriority tests that if there are two rules in the cluster that conflicts with each other, the rule with +// higher precedence will prevail. func testACNPRulePriority(t *testing.T) { builder1 := &ClusterNetworkPolicySpecBuilder{} // acnp-deny will apply to all pods in namespace x builder1 = builder1.SetName("acnp-deny"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} // acnp-allow will also apply to all pods in namespace x builder2 = builder2.SetName("acnp-allow"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) - // This rule should take no effect as it will be overridden by the first rule of cnp-drop - builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + // This rule should take no effect as it will be overridden by the first rule of cnp-deny + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied reachabilityBothACNP := NewReachability(allPods, Connected) @@ -2063,8 +2061,8 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) @@ -2095,8 +2093,8 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) @@ -2120,14 +2118,14 @@ func testACNPRejectEgress(t *testing.T) { executeTests(t, testCase) } -// testACNPRejectIngress tests that an ACNP is able to reject egress traffic from pods labelled A to namespace Z. +// testACNPRejectIngress tests that an ACNP is able to reject ingress traffic from pods labelled A to namespace Z. func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) reachability.ExpectIngressFromNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Rejected) @@ -2184,10 +2182,10 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder1 = builder1.SetName("acnp-reject-egress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "agnhost-client"}}}) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc1.Spec.Selector, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, svc2.Spec.Selector, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpEgress := builder1.Get() k8sUtils.CreateOrUpdateACNP(acnpEgress) @@ -2211,8 +2209,8 @@ func testRejectServiceTraffic(t *testing.T, data *TestData, clientNamespace, ser builder2 = builder2.SetName("acnp-reject-ingress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: svc1.Spec.Selector}, {PodSelector: svc2.Spec.Selector}}) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) acnpIngress := builder2.Get() k8sUtils.CreateOrUpdateACNP(acnpIngress) @@ -2302,10 +2300,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-reject-ingress-double-dir"). SetPriority(1.0) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, - nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, - nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, + nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, + nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder1.Get(), testcases) @@ -2313,10 +2311,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-reject-egress-double-dir"). SetPriority(1.0) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, - nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, - nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, + nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1beta1.RuleActionReject, "", "", nil) + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, + nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder2.Get(), testcases) @@ -2325,10 +2323,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder3 = builder3.SetName("acnp-reject-server-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder3.Get(), testcases) @@ -2337,10 +2335,10 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData, clientNamespace, ser builder4 = builder4.SetName("acnp-reject-client-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) - builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) runTestsWithACNP(builder4.Get(), testcases) } @@ -2623,8 +2621,8 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder = builder.SetName(npName). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", ruleName, nil) builder.AddEgressLogging(logLabel) npRef := fmt.Sprintf("AntreaClusterNetworkPolicy:%s", npName) @@ -2820,10 +2818,10 @@ func testAppliedToPerRule(t *testing.T) { cnpATGrp2 := ACNPAppliedToSpec{ PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": namespaces["y"]}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) - builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["z"]}, - nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1beta1.RuleActionDrop, "", "", nil) + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["z"]}, + nil, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) reachability2.Expect(Pod(namespaces["x"]+"/b"), Pod(namespaces["x"]+"/a"), Dropped) @@ -2861,7 +2859,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cg2Name, "", nil) // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y. @@ -2914,8 +2912,8 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) - builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["y"]}, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["y"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. testStep3 := &TestStep{ @@ -2956,7 +2954,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["z"]}}}). - AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) // Pods in Namespace z should not allow traffic from Pods backing svc1 (label pod=a) in Namespace x. @@ -3066,8 +3064,8 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { NSSelector: map[string]string{"ns": namespaces["y"]}, }, }) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) @@ -3115,9 +3113,9 @@ func testACNPNamespaceIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) // deny ingress traffic except from own namespace, which is always allowed. - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, nil, crdv1beta1.RuleActionAllow, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Dropped) @@ -3136,9 +3134,9 @@ func testACNPNamespaceIsolation(t *testing.T) { builder2 = builder2.SetName("test-acnp-ns-isolation-applied-to-per-rule"). SetTier("baseline"). SetPriority(1.0) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionAllow, "", "", nil) - builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}, crdv1beta1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) @@ -3171,9 +3169,9 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { SetTier("securityops"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, nil, crdv1beta1.RuleActionPass, "", "", nil) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) // deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s // NetworkPolicies to regulate intra-Namespace traffic) @@ -3531,7 +3529,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { builder = builder.SetName("acnp-service-account"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": serverName}}}) - builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", sa) acnp := builder.Get() @@ -3745,10 +3743,10 @@ func testACNPICMPSupport(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("test-acnp-icmp"). SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) - builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) - builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, - nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, nil, nil, map[string]string{"antrea-e2e": server0Name}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) testcases := []podToAddrTestStep{} if clusterInfo.podV4NetworkCIDR != "" { @@ -3848,8 +3846,8 @@ func testACNPNodePortServiceSupport(t *testing.T, data *TestData, serverNamespac }, }, }) - builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, - nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) acnp, err := k8sUtils.CreateOrUpdateACNP(builder.Get()) failOnError(err, t) @@ -3940,8 +3938,8 @@ func testACNPIGMPQuery(t *testing.T, data *TestData, acnpName, caseName, groupAd // create acnp with ingress rule for IGMP query igmpType := crdv1alpha1.IGMPQuery - builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil, - nil, nil, false, nil, action, "", "", nil) + builder.AddIngress(ProtocolIGMP, nil, nil, nil, nil, nil, &igmpType, &queryGroupAddress, nil, nil, nil, nil, + nil, nil, nil, false, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) defer data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Delete(context.TODO(), acnp.Name, metav1.DeleteOptions{}) @@ -4021,8 +4019,8 @@ func testACNPMulticastEgress(t *testing.T, data *TestData, acnpName, caseName, g builder = builder.SetName(acnpName).SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": label}}}) cidr := mc.group.String() + "/32" - builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, - nil, nil, false, nil, action, "", "", nil) + builder.AddEgress(ProtocolUDP, nil, nil, nil, nil, nil, nil, nil, &cidr, nil, nil, nil, + nil, nil, nil, false, nil, action, "", "", nil) acnp := builder.Get() _, err = k8sUtils.CreateOrUpdateACNP(acnp) if err != nil { @@ -4489,8 +4487,8 @@ func TestAntreaPolicyStatus(t *testing.T) { acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": namespaces["x"]}, - nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, nil, map[string]string{"ns": namespaces["x"]}, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) acnp := acnpBuilder.Get() log.Debugf("creating ACNP %v", acnp.Name) _, err = data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Create(context.TODO(), acnp, metav1.CreateOptions{}) diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index f7a117b9255..06cca769de6 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -487,7 +487,12 @@ func (data *TestData) CreateOrUpdateNamespace(n string, labels map[string]string } // CreateOrUpdateDeployment is a convenience function for idempotent setup of deployments -func (data *TestData) CreateOrUpdateDeployment(ns, deploymentName string, replicas int32, labels map[string]string) (*appsv1.Deployment, error) { +func (data *TestData) CreateOrUpdateDeployment(ns string, + deploymentName string, + replicas int32, + labels map[string]string, + nodeName string, + hostNetwork bool) (*appsv1.Deployment, error) { zero := int64(0) log.Infof("Creating/updating Deployment '%s/%s'", ns, deploymentName) makeContainerSpec := func(port int32, protocol v1.Protocol) v1.Container { @@ -535,6 +540,8 @@ func (data *TestData) CreateOrUpdateDeployment(ns, deploymentName string, replic Namespace: ns, }, Spec: v1.PodSpec{ + NodeName: nodeName, + HostNetwork: hostNetwork, TerminationGracePeriodSeconds: &zero, Containers: []v1.Container{ makeContainerSpec(80, "ALL"), @@ -1054,6 +1061,7 @@ func (k *KubernetesUtils) waitForHTTPServers(allPods []Pod) error { serversAreReady := func() bool { reachability := NewReachability(allPods, Connected) + k.Validate(allPods, reachability, []int32{80, 81, 8080, 8081, 8082, 8083, 8084, 8085}, utils.ProtocolTCP) if _, wrong, _ := reachability.Summary(); wrong != 0 { return false @@ -1160,18 +1168,26 @@ func (k *KubernetesUtils) ValidateRemoteCluster(remoteCluster *KubernetesUtils, } } -func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool) (map[string][]string, error) { - for _, ns := range namespaces { +func (k *KubernetesUtils) Bootstrap(namespaces map[string]string, pods []string, createNamespaces bool, nodeNames map[string]string, hostNetworks map[string]bool) (map[string][]string, error) { + for key, ns := range namespaces { if createNamespaces { _, err := k.CreateOrUpdateNamespace(ns, map[string]string{"ns": ns}) if err != nil { return nil, fmt.Errorf("unable to create/update ns %s: %w", ns, err) } } + var nodeName string + var hostNetwork bool + if nodeNames != nil { + nodeName = nodeNames[key] + } + if hostNetworks != nil { + hostNetwork = hostNetworks[key] + } for _, pod := range pods { log.Infof("Creating/updating Pod '%s/%s'", ns, pod) deployment := ns + pod - _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod}) + _, err := k.CreateOrUpdateDeployment(ns, deployment, 1, map[string]string{"pod": pod, "app": pod}, nodeName, hostNetwork) if err != nil { return nil, fmt.Errorf("unable to create/update Deployment '%s/%s': %w", ns, pod, err) } diff --git a/test/e2e/nodenetworkpolicy_test.go b/test/e2e/nodenetworkpolicy_test.go new file mode 100644 index 00000000000..ddb71534b02 --- /dev/null +++ b/test/e2e/nodenetworkpolicy_test.go @@ -0,0 +1,938 @@ +// Copyright 2024 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" + "antrea.io/antrea/pkg/features" + . "antrea.io/antrea/test/e2e/utils" +) + +const labelNodeHostname = "kubernetes.io/hostname" + +func initializeAntreaNodeNetworkPolicy(t *testing.T, data *TestData, toHostNetworkPod bool) { + p80 = 80 + p81 = 81 + p8080 = 8080 + p8081 = 8081 + p8082 = 8082 + p8085 = 8085 + pods = []string{"a"} + suffix := randName("") + namespaces = make(map[string]string) + namespaces["x"] = "x-" + suffix + namespaces["y"] = "y-" + suffix + nodes = make(map[string]string) + nodes["x"] = controlPlaneNodeName() + nodes["y"] = workerNodeName(1) + hostNetworks := make(map[string]bool) + hostNetworks["x"] = true + if toHostNetworkPod { + hostNetworks["y"] = true + } else { + hostNetworks["y"] = false + namespaces["z"] = "z-" + suffix + nodes["z"] = workerNodeName(1) + hostNetworks["z"] = false + } + allPods = []Pod{} + + for _, podName := range pods { + for _, ns := range namespaces { + allPods = append(allPods, NewPod(ns, podName)) + } + } + + var err error + // k8sUtils is a global var + k8sUtils, err = NewKubernetesUtils(data) + failOnError(err, t) + ips, err := k8sUtils.Bootstrap(namespaces, pods, true, nodes, hostNetworks) + failOnError(err, t) + podIPs = ips +} + +func skipIfNodeNetworkPolicyDisabled(tb testing.TB) { + skipIfFeatureDisabled(tb, features.NodeNetworkPolicy, true, false) +} + +func TestAntreaNodeNetworkPolicy(t *testing.T) { + skipIfAntreaPolicyDisabled(t) + skipIfNodeNetworkPolicyDisabled(t) + skipIfHasWindowsNodes(t) + skipIfNumNodesLessThan(t, 2) + + data, err := setupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer teardownTest(t, data) + + initializeAntreaNodeNetworkPolicy(t, data, true) + + t.Run("Case=ACNPAllowNoDefaultIsolationTCP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolTCP) }) + t.Run("Case=ACNPAllowNoDefaultIsolationUDP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolUDP) }) + t.Run("Case=ACNPAllowNoDefaultIsolationSCTP", func(t *testing.T) { testNodeACNPAllowNoDefaultIsolation(t, ProtocolSCTP) }) + t.Run("Case=ACNPDropEgress", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolTCP) }) + t.Run("Case=ACNPDropEgressUDP", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolUDP) }) + t.Run("Case=ACNPDropEgressSCTP", func(t *testing.T) { testNodeACNPDropEgress(t, ProtocolSCTP) }) + t.Run("Case=ACNPDropIngress", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolTCP) }) + t.Run("Case=ACNPDropIngressUDP", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolUDP) }) + t.Run("Case=ACNPDropIngressSCTP", func(t *testing.T) { testNodeACNPDropIngress(t, ProtocolSCTP) }) + t.Run("Case=ACNPPortRange", func(t *testing.T) { testNodeACNPPortRange(t) }) + t.Run("Case=ACNPSourcePort", func(t *testing.T) { testNodeACNPSourcePort(t) }) + t.Run("Case=ACNPRejectEgress", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolTCP) }) + t.Run("Case=ACNPRejectEgressUDP", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolUDP) }) + t.Run("Case=ACNPRejectEgressSCTP", func(t *testing.T) { testNodeACNPRejectEgress(t, ProtocolSCTP) }) + t.Run("Case=ACNPRejectIngress", func(t *testing.T) { testNodeACNPRejectIngress(t, ProtocolTCP) }) + t.Run("Case=ACNPRejectIngressUDP", func(t *testing.T) { testNodeACNPRejectIngress(t, ProtocolUDP) }) + t.Run("Case=ACNPNoEffectOnOtherProtocols", func(t *testing.T) { testNodeACNPNoEffectOnOtherProtocols(t) }) + t.Run("Case=ACNPPriorityOverride", func(t *testing.T) { testNodeACNPPriorityOverride(t) }) + t.Run("Case=ACNPTierOverride", func(t *testing.T) { testNodeACNPTierOverride(t) }) + t.Run("Case=ACNPCustomTiers", func(t *testing.T) { testNodeACNPCustomTiers(t) }) + t.Run("Case=ACNPPriorityConflictingRule", func(t *testing.T) { testNodeACNPPriorityConflictingRule(t) }) + + k8sUtils.Cleanup(namespaces) + + initializeAntreaNodeNetworkPolicy(t, data, false) + + t.Run("Case=ACNPNamespaceIsolation", func(t *testing.T) { testNodeACNPNamespaceIsolation(t) }) + t.Run("Case=ACNPClusterGroupUpdate", func(t *testing.T) { testNodeACNPClusterGroupUpdate(t) }) + t.Run("Case=ACNPClusterGroupRefRuleIPBlocks", func(t *testing.T) { testNodeACNPClusterGroupRefRuleIPBlocks(t) }) + t.Run("Case=ACNPNestedClusterGroup", func(t *testing.T) { testNodeACNPNestedClusterGroupCreateAndUpdate(t, data) }) + t.Run("Case=ACNPNestedIPBlockClusterGroup", func(t *testing.T) { testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t) }) + + k8sUtils.Cleanup(namespaces) +} + +// testNodeACNPAllowNoDefaultIsolation tests that no default isolation rules are created for ACNPs applied to Node. +func testNodeACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { + // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be + // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on + // different Node VMs which are themselves on different ESX hosts. We are + // investigating the issue and disabling the tests for IPv6 clusters in the + // meantime. + skipIfIPv6Cluster(t) + } + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("acnp-allow-x-from-y-ingress"). + SetPriority(1.1). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder1.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-allow-x-to-y-egress"). + SetPriority(1.1). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder2.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + reachability := NewReachability(allPods, Connected) + testStep := []*TestStep{ + { + "Port 81", + reachability, + []metav1.Object{builder1.Get(), builder2.Get()}, + []int32{81}, + protocol, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Allow No Default Isolation", testStep}, + } + executeTests(t, testCase) +} + +// testNodeACNPDropEgress tests that an ACNP applied to Node is able to drop egress traffic from Node x to Node y. +func testNodeACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { + // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be + // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on + // different Node VMs which are themselves on different ESX hosts. We are + // investigating the issue and disabling the tests for IPv6 clusters in the + // meantime. + skipIfIPv6Cluster(t) + } + if protocol == ProtocolUDP { + // For UDP, when action `Drop` is specified in an egress rule, we got the unexpected message like the follows: + // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted + t.Skip("Skipping test as dropping UDP egress traffic in doesn't return the expected stdout or stderr message") + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-drop-x-to-y-egress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + protocol, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Egress From Node:x to Node:y", testStep}, + } + executeTests(t, testCase) +} + +// testNodeACNPDropIngress tests that an ACNP applied to Node is able to drop ingress traffic from Node y to Node x. +func testNodeACNPDropIngress(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { + // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be + // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on + // different Node VMs which are themselves on different ESX hosts. We are + // investigating the issue and disabling the tests for IPv6 clusters in the + // meantime. + skipIfIPv6Cluster(t) + } + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-drop-x-from-y-ingress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + protocol, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Ingress From Node:y to Node:x", testStep}, + } + executeTests(t, testCase) +} + +// testACNPPortRange tests the port range in an ACNP applied to Node can work. +func testNodeACNPPortRange(t *testing.T) { + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-drop-x-to-y-egress-port-range"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8082, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "acnp-port-range", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + testSteps := []*TestStep{ + { + fmt.Sprintf("ACNP Drop Ports 8080:8082"), + reachability, + []metav1.Object{builder.Get()}, + []int32{8080, 8081, 8082}, + ProtocolTCP, + 0, + nil, + }, + } + + testCase := []*TestCase{ + {"ACNP Drop Egress From Node:x to Node:y with a portRange", testSteps}, + } + executeTests(t, testCase) +} + +// testNodeACNPSourcePort tests ACNP applied to Node source port filtering. The agnhost image used in E2E tests uses +// ephemeral ports to initiate TCP connections, which should be 32768–60999 by default (https://en.wikipedia.org/wiki/Ephemeral_port). +// This test retrieves the port range from the client Pod and uses it in sourcePort and sourceEndPort of an ACNP rule to +// verify that packets can be matched by source port. +func testNodeACNPSourcePort(t *testing.T) { + portStart, portEnd, err := k8sUtils.getTCPv4SourcePortRangeFromPod(namespaces["x"], "a") + failOnError(err, t) + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-source-port"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddIngressForSrcPort(ProtocolTCP, nil, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-source-port"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder2.AddIngressForSrcPort(ProtocolTCP, &p80, nil, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + builder3 := &ClusterNetworkPolicySpecBuilder{} + builder3 = builder3.SetName("acnp-source-port"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder3.AddIngressForSrcPort(ProtocolTCP, &p80, &p81, &portStart, &portEnd, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + // After adding the dst port constraint of port 80, traffic on port 81 should not be affected. + updatedReachability := NewReachability(allPods, Connected) + + testSteps := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + { + "Port 81", + updatedReachability, + []metav1.Object{builder2.Get()}, + []int32{81}, + ProtocolTCP, + 0, + nil, + }, + { + "Port range 80-81", + reachability, + []metav1.Object{builder3.Get()}, + []int32{80, 81}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Node:y to Node:x based on source port", testSteps}, + } + executeTests(t, testCase) +} + +// testNodeACNPRejectEgress tests that an ACNP applied to Node drops egress traffic from Node x to Node y when the action +// is REJECT, due to that when using the iptables action REJECT in the OUTPUT chain of the filter table, it defaults to +// the DROP action as an ICMP error message cannot be sent from a host to itself. +func testNodeACNPRejectEgress(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { + // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be + // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on + // different Node VMs which are themselves on different ESX hosts. We are + // investigating the issue and disabling the tests for IPv6 clusters in the + // meantime. + skipIfIPv6Cluster(t) + } + if protocol == ProtocolUDP { + // For UDP, when action `Reject` is specified in an egress rule, we got the unexpected message like the follows: + // UNKNOWN: write udp 172.18.0.3:58150->172.18.0.2:80: write: operation not permitted + t.Skip("Skipping test as dropping UDP egress traffic in doesn't return the expected stdout or stderr message") + } + + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-reject-x-to-y-egress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + + reachability := NewReachability(allPods, Connected) + + expectedResult := Rejected + // For SCTP, when the `Rejected` is specified in an egress rule, it behaves identical to `Dropped`. + if protocol == ProtocolSCTP { + expectedResult = Dropped + } + + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), expectedResult) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + protocol, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Reject Egress From Node:x to Node:y", testStep}, + } + executeTests(t, testCase) +} + +// testNodeACNPRejectIngress tests that an ACNP applied Node to is able to reject ingress traffic from Node y to Node x. +func testNodeACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-reject-x-from-y-ingress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionReject, "", "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Rejected) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get()}, + []int32{80}, + protocol, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Reject ingress from Node:y to Node:x", testStep}, + } + executeTests(t, testCase) +} + +// testNodeACNPNoEffectOnOtherProtocols tests that an ACNP applied Node which drops TCP traffic won't affect other protocols (e.g. UDP). +func testNodeACNPNoEffectOnOtherProtocols(t *testing.T) { + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-drop-x-from-y-ingress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachability1 := NewReachability(allPods, Connected) + reachability1.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + + reachability2 := NewReachability(allPods, Connected) + + testStep := []*TestStep{ + { + "Port 80", + reachability1, + []metav1.Object{builder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + { + "Port 80", + reachability2, + []metav1.Object{builder.Get()}, + []int32{80}, + ProtocolUDP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Ingress From Node:y to Node:x TCP Not UDP", testStep}, + } + executeTests(t, testCase) +} + +// testNodeACNPPriorityOverride tests priority overriding in three ACNPs applied to Node. Those three ACNPs are synced in +// a specific order to test priority reassignment, and each controls a smaller set of traffic patterns as priority increases. +func testNodeACNPPriorityOverride(t *testing.T) { + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("acnp-priority1"). + SetPriority(1.001). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Highest priority. Drops traffic from y to x. + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-priority2"). + SetPriority(1.002). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Medium priority. Allows traffic from y to x. + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + builder3 := &ClusterNetworkPolicySpecBuilder{} + builder3 = builder3.SetName("acnp-priority3"). + SetPriority(1.003). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Lowest priority. Drops traffic from y to x. + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachabilityTwoACNPs := NewReachability(allPods, Connected) + + reachabilityAllACNPs := NewReachability(allPods, Connected) + reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + + testStepTwoACNP := []*TestStep{ + { + "Two Policies with different priorities", + reachabilityTwoACNPs, + []metav1.Object{builder3.Get(), builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + // Create the Policies in specific order to make sure that priority re-assignments work as expected. + testStepAll := []*TestStep{ + { + "All three Policies", + reachabilityAllACNPs, + []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP PriorityOverride Intermediate", testStepTwoACNP}, + {"ACNP PriorityOverride All", testStepAll}, + } + executeTests(t, testCase) +} + +// testNodeACNPTierOverride tests tier priority overriding in three ACNPs applied to Node. Each ACNP controls a smaller +// set of traffic patterns as tier priority increases. +func testNodeACNPTierOverride(t *testing.T) { + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("acnp-tier-emergency"). + SetTier("emergency"). + SetPriority(100). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Highest priority tier. Drops traffic from y to x. + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-tier-securityops"). + SetTier("securityops"). + SetPriority(10). + SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": namespaces["x"]}}}) + // Medium priority tier. Allows traffic from y to x. + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + builder3 := &ClusterNetworkPolicySpecBuilder{} + builder3 = builder3.SetName("acnp-tier-application"). + SetTier("application"). + SetPriority(1). + SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": namespaces["x"]}}}) + // Lowest priority tier. Drops traffic from y to x. + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachabilityTwoACNPs := NewReachability(allPods, Connected) + + reachabilityAllACNPs := NewReachability(allPods, Connected) + reachabilityAllACNPs.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + + testStepTwoACNP := []*TestStep{ + { + "Two Policies in different tiers", + reachabilityTwoACNPs, + []metav1.Object{builder3.Get(), builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testStepAll := []*TestStep{ + { + "All three Policies in different tiers", + reachabilityAllACNPs, + []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP TierOverride Intermediate", testStepTwoACNP}, + {"ACNP TierOverride All", testStepAll}, + } + executeTests(t, testCase) +} + +// testNodeACNPCustomTiers tests tier priority overriding in two ACNPs applied to Node with custom created tiers. Each ACNP +// controls a smaller set of traffic patterns as tier priority increases. +func testNodeACNPCustomTiers(t *testing.T) { + k8sUtils.DeleteTier("high-priority") + k8sUtils.DeleteTier("low-priority") + // Create two custom tiers with tier priority immediately next to each other. + _, err := k8sUtils.CreateNewTier("high-priority", 245) + failOnError(err, t) + _, err = k8sUtils.CreateNewTier("low-priority", 246) + failOnError(err, t) + + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("acnp-tier-high"). + SetTier("high-priority"). + SetPriority(100). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Medium priority tier. Allows traffic from y to x. + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-tier-low"). + SetTier("low-priority"). + SetPriority(1). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // Lowest priority tier. Drops traffic from y to x. + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachabilityOneACNP := NewReachability(allPods, Connected) + reachabilityOneACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + testStepOneACNP := []*TestStep{ + { + "One Policy", + reachabilityOneACNP, + []metav1.Object{builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + + reachabilityTwoACNPs := NewReachability(allPods, Connected) + testStepTwoACNP := []*TestStep{ + { + "Two Policies in different tiers", + reachabilityTwoACNPs, + []metav1.Object{builder2.Get(), builder1.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Custom Tier priority with one policy", testStepOneACNP}, + {"ACNP Custom Tier priority with two policies", testStepTwoACNP}, + } + executeTests(t, testCase) + // Cleanup customized tiers. ACNPs created in those tiers need to be deleted first. + failOnError(k8sUtils.CleanACNPs(), t) + failOnError(k8sUtils.DeleteTier("high-priority"), t) + failOnError(k8sUtils.DeleteTier("low-priority"), t) + time.Sleep(networkPolicyDelay) +} + +// testNodeACNPPriorityConflictingRule tests that if there are two ACNPs applied to Node in the cluster with rules that +// conflicts with each other, the ACNP with higher priority will prevail. +func testNodeACNPPriorityConflictingRule(t *testing.T) { + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("acnp-drop"). + SetPriority(1). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + builder2 := &ClusterNetworkPolicySpecBuilder{} + builder2 = builder2.SetName("acnp-allow"). + SetPriority(2). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, + // but cnp-allow has lower priority. + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{labelNodeHostname: nodes["y"]}, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionAllow, "", "", nil) + + reachabilityBothACNP := NewReachability(allPods, Connected) + reachabilityBothACNP.Expect(Pod(namespaces["y"]+"/a"), Pod(namespaces["x"]+"/a"), Dropped) + testStep := []*TestStep{ + { + "Both ACNP", + reachabilityBothACNP, + []metav1.Object{builder1.Get(), builder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Priority Conflicting Rule", testStep}, + } + executeTests(t, testCase) +} + +func testNodeACNPNamespaceIsolation(t *testing.T) { + builder1 := &ClusterNetworkPolicySpecBuilder{} + builder1 = builder1.SetName("test-acnp-ns-isolation"). + SetTier("baseline"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder1.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, map[string]string{"ns": namespaces["y"]}, nil, nil, nil, + false, nil, crdv1beta1.RuleActionDrop, "", "", nil) + + reachability1 := NewReachability(allPods, Connected) + reachability1.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + testStep1 := &TestStep{ + "Port 80", + reachability1, + []metav1.Object{builder1.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + + testCase := []*TestCase{ + {"ACNP Namespace isolation for namespace y", []*TestStep{testStep1}}, + } + executeTests(t, testCase) +} + +func testNodeACNPClusterGroupUpdate(t *testing.T) { + cgName := "cg-ns-z-then-y" + cgBuilder := &ClusterGroupSpecBuilder{} + cgBuilder = cgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + // Update CG NS selector to group Pods from Namespace Y + updatedCgBuilder := &ClusterGroupSpecBuilder{} + updatedCgBuilder = updatedCgBuilder.SetName(cgName).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + + updatedReachability := NewReachability(allPods, Connected) + updatedReachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{cgBuilder.Get(), builder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + { + "Port 80 - update", + updatedReachability, + []metav1.Object{updatedCgBuilder.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Egress From Node:x to ClusterGroup with NS:z updated to ClusterGroup with NS:y", testStep}, + } + executeTests(t, testCase) +} + +func testNodeACNPClusterGroupRefRuleIPBlocks(t *testing.T) { + podYAIP, _ := podIPs[namespaces["y"]+"/a"] + podZAIP, _ := podIPs[namespaces["z"]+"/a"] + // There are three situations of a Pod's IP(s): + // 1. Only one IPv4 address. + // 2. Only one IPv6 address. + // 3. One IPv4 and one IPv6 address, and we don't know the order in list. + // We need to add all IP(s) of Pods as CIDR to IPBlock. + genCIDR := func(ip string) string { + if strings.Contains(ip, ".") { + return ip + "/32" + } + return ip + "/128" + } + var ipBlock1, ipBlock2 []crdv1beta1.IPBlock + for i := 0; i < len(podYAIP); i++ { + ipBlock1 = append(ipBlock1, crdv1beta1.IPBlock{CIDR: genCIDR(podYAIP[i])}) + ipBlock2 = append(ipBlock2, crdv1beta1.IPBlock{CIDR: genCIDR(podZAIP[i])}) + } + + cgName := "cg-ipblocks-pod-in-ns-y" + cgBuilder := &ClusterGroupSpecBuilder{} + cgBuilder = cgBuilder.SetName(cgName). + SetIPBlocks(ipBlock1) + cgName2 := "cg-ipblock-pod-in-ns-z" + cgBuilder2 := &ClusterGroupSpecBuilder{} + cgBuilder2 = cgBuilder2.SetName(cgName2). + SetIPBlocks(ipBlock2) + + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-deny-x-to-yz-egress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName, "", nil) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgName2, "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + testStep := []*TestStep{ + { + "Port 80", + reachability, + []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + }, + } + testCase := []*TestCase{ + {"ACNP Drop Egress From Node x to Pod y/a and z/a to ClusterGroup with ipBlocks", testStep}, + } + executeTests(t, testCase) +} + +func testNodeACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { + cg1Name := "cg-1" + cgBuilder1 := &ClusterGroupSpecBuilder{} + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetNamespaceSelector(map[string]string{"ns": namespaces["y"]}, nil) + cgNestedName := "cg-nested" + cgBuilderNested := &ClusterGroupSpecBuilder{} + cgBuilderNested = cgBuilderNested.SetName(cgNestedName).SetChildGroups([]string{cg1Name}) + + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}). + AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + false, nil, crdv1beta1.RuleActionDrop, cgNestedName, "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["y"], Dropped) + testStep1 := &TestStep{ + "Port 80", + reachability, + // Note in this testcase the ClusterGroup is created after the ACNP + []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilderNested.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + + cg2Name := "cg-2" + cgBuilder2 := &ClusterGroupSpecBuilder{} + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetNamespaceSelector(map[string]string{"ns": namespaces["z"]}, nil) + cgBuilderNested = cgBuilderNested.SetChildGroups([]string{cg2Name}) + reachability2 := NewReachability(allPods, Connected) + reachability2.ExpectEgressToNamespace(Pod(namespaces["x"]+"/a"), namespaces["z"], Dropped) + testStep2 := &TestStep{ + "Port 80 updated", + reachability2, + []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + + testSteps := []*TestStep{testStep1, testStep2} + testCase := []*TestCase{ + {"ACNP nested ClusterGroup create and update", testSteps}, + } + executeTestsWithData(t, testCase, data) +} + +func testNodeACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { + podYAIP, _ := podIPs[namespaces["y"]+"/a"] + podZAIP, _ := podIPs[namespaces["z"]+"/a"] + genCIDR := func(ip string) string { + switch IPFamily(ip) { + case "v4": + return ip + "/32" + case "v6": + return ip + "/128" + default: + return "" + } + } + cg1Name, cg2Name := "cg-y", "cg-z" + cgParentName := "cg-parent" + var ipBlockYA, ipBlockZA []crdv1beta1.IPBlock + for i := 0; i < len(podYAIP); i++ { + ipBlockYA = append(ipBlockYA, crdv1beta1.IPBlock{CIDR: genCIDR(podYAIP[i])}) + ipBlockZA = append(ipBlockZA, crdv1beta1.IPBlock{CIDR: genCIDR(podZAIP[i])}) + } + cgBuilder1 := &ClusterGroupSpecBuilder{} + cgBuilder1 = cgBuilder1.SetName(cg1Name).SetIPBlocks(ipBlockYA) + cgBuilder2 := &ClusterGroupSpecBuilder{} + cgBuilder2 = cgBuilder2.SetName(cg2Name).SetIPBlocks(ipBlockZA) + cgParent := &ClusterGroupSpecBuilder{} + cgParent = cgParent.SetName(cgParentName).SetChildGroups([]string{cg1Name, cg2Name}) + + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("acnp-deny-x-to-yz-egress"). + SetPriority(1.0). + SetAppliedToGroup([]ACNPAppliedToSpec{{NodeSelector: map[string]string{labelNodeHostname: nodes["x"]}}}) + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, + nil, nil, nil, false, nil, crdv1beta1.RuleActionDrop, cgParentName, "", nil) + + reachability := NewReachability(allPods, Connected) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + reachability.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["z"]+"/a"), Dropped) + testStep := &TestStep{ + "Port 80", + reachability, + []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + + cgParent = cgParent.SetChildGroups([]string{cg1Name}) + + reachability2 := NewReachability(allPods, Connected) + reachability2.Expect(Pod(namespaces["x"]+"/a"), Pod(namespaces["y"]+"/a"), Dropped) + testStep2 := &TestStep{ + "Port 80, updated", + reachability2, + []metav1.Object{cgParent.Get()}, + []int32{80}, + ProtocolTCP, + 0, + nil, + } + + testCase := []*TestCase{ + {"ACNP Drop Ingress From Node x to Pod y/a and z/a with nested ClusterGroup with ipBlocks", []*TestStep{testStep, testStep2}}, + } + executeTests(t, testCase) +} diff --git a/test/e2e/utils/cnp_spec_builder.go b/test/e2e/utils/cnp_spec_builder.go index 83d0ee424bb..0c6acdc7585 100644 --- a/test/e2e/utils/cnp_spec_builder.go +++ b/test/e2e/utils/cnp_spec_builder.go @@ -27,12 +27,14 @@ type ClusterNetworkPolicySpecBuilder struct { } type ACNPAppliedToSpec struct { - PodSelector map[string]string - NSSelector map[string]string - PodSelectorMatchExp []metav1.LabelSelectorRequirement - NSSelectorMatchExp []metav1.LabelSelectorRequirement - Group string - Service *crdv1beta1.NamespacedName + PodSelector map[string]string + NodeSelector map[string]string + NSSelector map[string]string + PodSelectorMatchExp []metav1.LabelSelectorRequirement + NodeSelectorMatchExp []metav1.LabelSelectorRequirement + NSSelectorMatchExp []metav1.LabelSelectorRequirement + Group string + Service *crdv1beta1.NamespacedName } func (b *ClusterNetworkPolicySpecBuilder) Get() *crdv1beta1.ClusterNetworkPolicy { @@ -67,37 +69,54 @@ func (b *ClusterNetworkPolicySpecBuilder) SetTier(tier string) *ClusterNetworkPo func (b *ClusterNetworkPolicySpecBuilder) SetAppliedToGroup(specs []ACNPAppliedToSpec) *ClusterNetworkPolicySpecBuilder { for _, spec := range specs { - appliedToPeer := b.GetAppliedToPeer(spec.PodSelector, spec.NSSelector, spec.PodSelectorMatchExp, spec.NSSelectorMatchExp, spec.Group, spec.Service) + appliedToPeer := b.GetAppliedToPeer(spec.PodSelector, + spec.NodeSelector, + spec.NSSelector, + spec.PodSelectorMatchExp, + spec.NodeSelectorMatchExp, + spec.NSSelectorMatchExp, + spec.Group, + spec.Service) b.Spec.AppliedTo = append(b.Spec.AppliedTo, appliedToPeer) } return b } func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[string]string, + nodeSelector map[string]string, nsSelector map[string]string, podSelectorMatchExp []metav1.LabelSelectorRequirement, + nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, appliedToCG string, service *crdv1beta1.NamespacedName) crdv1beta1.AppliedTo { - var ps *metav1.LabelSelector - var ns *metav1.LabelSelector + var podSel *metav1.LabelSelector + var nodeSel *metav1.LabelSelector + var nsSel *metav1.LabelSelector if podSelector != nil || podSelectorMatchExp != nil { - ps = &metav1.LabelSelector{ + podSel = &metav1.LabelSelector{ MatchLabels: podSelector, MatchExpressions: podSelectorMatchExp, } } + if nodeSelector != nil || nodeSelectorMatchExp != nil { + nodeSel = &metav1.LabelSelector{ + MatchLabels: nodeSelector, + MatchExpressions: nodeSelectorMatchExp, + } + } if nsSelector != nil || nsSelectorMatchExp != nil { - ns = &metav1.LabelSelector{ + nsSel = &metav1.LabelSelector{ MatchLabels: nsSelector, MatchExpressions: nsSelectorMatchExp, } } peer := crdv1beta1.AppliedTo{ - PodSelector: ps, - NamespaceSelector: ns, + PodSelector: podSel, + NodeSelector: nodeSel, + NamespaceSelector: nsSel, } if appliedToCG != "" { peer.Group = appliedToCG @@ -110,12 +129,13 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, - groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { - var pSel *metav1.LabelSelector - var nSel *metav1.LabelSelector + var podSel *metav1.LabelSelector + var nodeSel *metav1.LabelSelector + var nsSel *metav1.LabelSelector var ns *crdv1beta1.PeerNamespaces var appliedTos []crdv1beta1.AppliedTo matchSelf := crdv1beta1.NamespaceMatchSelf @@ -125,13 +145,19 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol } if podSelector != nil || podSelectorMatchExp != nil { - pSel = &metav1.LabelSelector{ + podSel = &metav1.LabelSelector{ MatchLabels: podSelector, MatchExpressions: podSelectorMatchExp, } } + if nodeSelector != nil || nodeSelectorMatchExp != nil { + nodeSel = &metav1.LabelSelector{ + MatchLabels: nodeSelector, + MatchExpressions: nodeSelectorMatchExp, + } + } if nsSelector != nil || nsSelectorMatchExp != nil { - nSel = &metav1.LabelSelector{ + nsSel = &metav1.LabelSelector{ MatchLabels: nsSelector, MatchExpressions: nsSelectorMatchExp, } @@ -148,14 +174,22 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol } } for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } // An empty From/To in ACNP rules evaluates to match all addresses. policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0) - if pSel != nil || nSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { + if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { policyPeer = []crdv1beta1.NetworkPolicyPeer{{ - PodSelector: pSel, - NamespaceSelector: nSel, + PodSelector: podSel, + NodeSelector: nodeSel, + NamespaceSelector: nsSel, Namespaces: ns, IPBlock: ipBlock, Group: ruleClusterGroup, @@ -180,12 +214,13 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol // all conflicting PRs are merged. func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPolicyProtocol, port, endPort, srcPort, endSrcPort, icmpType, icmpCode, igmpType *int32, - groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { - var pSel *metav1.LabelSelector - var nSel *metav1.LabelSelector + var podSel *metav1.LabelSelector + var nodeSel *metav1.LabelSelector + var nsSel *metav1.LabelSelector var ns *crdv1beta1.PeerNamespaces var appliedTos []crdv1beta1.AppliedTo matchSelf := crdv1beta1.NamespaceMatchSelf @@ -195,13 +230,19 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli } if podSelector != nil || podSelectorMatchExp != nil { - pSel = &metav1.LabelSelector{ + podSel = &metav1.LabelSelector{ MatchLabels: podSelector, MatchExpressions: podSelectorMatchExp, } } + if nodeSelector != nil || nodeSelectorMatchExp != nil { + nodeSel = &metav1.LabelSelector{ + MatchLabels: nodeSelector, + MatchExpressions: nodeSelectorMatchExp, + } + } if nsSelector != nil || nsSelectorMatchExp != nil { - nSel = &metav1.LabelSelector{ + nsSel = &metav1.LabelSelector{ MatchLabels: nsSelector, MatchExpressions: nsSelectorMatchExp, } @@ -218,14 +259,22 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli } } for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } // An empty From/To in ACNP rules evaluates to match all addresses. policyPeer := make([]crdv1beta1.NetworkPolicyPeer, 0) - if pSel != nil || nSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { + if podSel != nil || nodeSel != nil || nsSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" || serviceAccount != nil { policyPeer = []crdv1beta1.NetworkPolicyPeer{{ - PodSelector: pSel, - NamespaceSelector: nSel, + PodSelector: podSel, + NodeSelector: nodeSel, + NamespaceSelector: nsSel, Namespaces: ns, IPBlock: ipBlock, Group: ruleClusterGroup, @@ -247,15 +296,15 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngressForSrcPort(protoc AntreaPoli func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode, igmpType *int32, - groupAddress, cidr *string, podSelector map[string]string, nsSelector map[string]string, - podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, + groupAddress, cidr *string, podSelector map[string]string, nodeSelector map[string]string, nsSelector map[string]string, + podSelectorMatchExp []metav1.LabelSelectorRequirement, nodeSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1beta1.NamespacedName) *ClusterNetworkPolicySpecBuilder { // For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical // With the exception of calling the rule `To` vs. `From`. c := &ClusterNetworkPolicySpecBuilder{} - c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nsSelector, - podSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) + c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, igmpType, groupAddress, cidr, podSelector, nodeSelector, nsSelector, + podSelectorMatchExp, nodeSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) theRule := c.Get().Spec.Ingress[0] b.Spec.Egress = append(b.Spec.Egress, crdv1beta1.Rule{ @@ -272,7 +321,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddNodeSelectorRule(nodeSelector *meta ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction, isEgress bool) *ClusterNetworkPolicySpecBuilder { var appliedTos []crdv1beta1.AppliedTo for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } policyPeer := []crdv1beta1.NetworkPolicyPeer{{NodeSelector: nodeSelector}} k8sProtocol, _ := AntreaPolicyProtocolToK8sProtocol(protoc) @@ -299,7 +355,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddFQDNRule(fqdn string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction) *ClusterNetworkPolicySpecBuilder { var appliedTos []crdv1beta1.AppliedTo for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } policyPeer := []crdv1beta1.NetworkPolicyPeer{{FQDN: fqdn}} ports, _ := GenPortsOrProtocols(protoc, port, portName, endPort, nil, nil, nil, nil, nil, nil) @@ -318,7 +381,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddToServicesRule(svcRefs []crdv1beta1 name string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1beta1.RuleAction) *ClusterNetworkPolicySpecBuilder { var appliedTos []crdv1beta1.AppliedTo for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } newRule := crdv1beta1.Rule{ To: make([]crdv1beta1.NetworkPolicyPeer, 0), @@ -336,7 +406,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddStretchedIngressRule(pSel, nsSel ma var appliedTos []crdv1beta1.AppliedTo for _, at := range ruleAppliedToSpecs { - appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group, at.Service)) + appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, + at.NodeSelector, + at.NSSelector, + at.PodSelectorMatchExp, + at.NodeSelectorMatchExp, + at.NSSelectorMatchExp, + at.Group, + at.Service)) } newRule := crdv1beta1.Rule{ From: []crdv1beta1.NetworkPolicyPeer{{Scope: "ClusterSet"}}, diff --git a/test/integration/agent/route_test.go b/test/integration/agent/route_test.go index 65ae265a71c..974fb3f7919 100644 --- a/test/integration/agent/route_test.go +++ b/test/integration/agent/route_test.go @@ -145,7 +145,7 @@ func TestInitialize(t *testing.T) { for _, tc := range tcs { t.Logf("Running Initialize test with mode %s node config %s", tc.networkConfig.TrafficEncapMode, nodeConfig) - routeClient, err := route.NewClient(tc.networkConfig, tc.noSNAT, false, false, false, nil) + routeClient, err := route.NewClient(tc.networkConfig, tc.noSNAT, false, false, false, false, nil) assert.NoError(t, err) var xtablesReleasedTime, initializedTime time.Time @@ -252,7 +252,7 @@ func TestIpTablesSync(t *testing.T) { gwLink := createDummyGW(t) defer netlink.LinkDel(gwLink) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, false, nil) assert.Nil(t, err) inited := make(chan struct{}) @@ -303,7 +303,7 @@ func TestAddAndDeleteSNATRule(t *testing.T) { gwLink := createDummyGW(t) defer netlink.LinkDel(gwLink) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true}, false, false, false, false, false, nil) assert.Nil(t, err) inited := make(chan struct{}) @@ -357,7 +357,7 @@ func TestAddAndDeleteRoutes(t *testing.T) { for _, tc := range tcs { t.Logf("Running test with mode %s peer cidr %s peer ip %s node config %s", tc.mode, tc.peerCIDR, tc.peerIP, nodeConfig) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil) assert.NoError(t, err) err = routeClient.Initialize(nodeConfig, func() {}) assert.NoError(t, err) @@ -422,7 +422,7 @@ func TestSyncRoutes(t *testing.T) { for _, tc := range tcs { t.Logf("Running test with mode %s peer cidr %s peer ip %s node config %s", tc.mode, tc.peerCIDR, tc.peerIP, nodeConfig) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil) assert.NoError(t, err) err = routeClient.Initialize(nodeConfig, func() {}) assert.NoError(t, err) @@ -465,7 +465,7 @@ func TestSyncGatewayKernelRoute(t *testing.T) { } require.NoError(t, netlink.AddrAdd(gwLink, &netlink.Addr{IPNet: gwNet}), "configuring gw IP failed") - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap}, false, false, false, false, false, nil) assert.NoError(t, err) err = routeClient.Initialize(nodeConfig, func() {}) assert.NoError(t, err) @@ -559,7 +559,7 @@ func TestReconcile(t *testing.T) { for _, tc := range tcs { t.Logf("Running test with mode %s added routes %v desired routes %v", tc.mode, tc.addedRoutes, tc.desiredPeerCIDRs) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: tc.mode, IPv4Enabled: true}, false, false, false, false, false, nil) assert.NoError(t, err) err = routeClient.Initialize(nodeConfig, func() {}) assert.NoError(t, err) @@ -598,7 +598,7 @@ func TestRouteTablePolicyOnly(t *testing.T) { gwLink := createDummyGW(t) defer netlink.LinkDel(gwLink) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeNetworkPolicyOnly, IPv4Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeNetworkPolicyOnly, IPv4Enabled: true}, false, false, false, false, false, nil) assert.NoError(t, err) err = routeClient.Initialize(nodeConfig, func() {}) assert.NoError(t, err) @@ -654,7 +654,7 @@ func TestIPv6RoutesAndNeighbors(t *testing.T) { gwLink := createDummyGW(t) defer netlink.LinkDel(gwLink) - routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true, IPv6Enabled: true}, false, false, false, false, nil) + routeClient, err := route.NewClient(&config.NetworkConfig{TrafficEncapMode: config.TrafficEncapModeEncap, IPv4Enabled: true, IPv6Enabled: true}, false, false, false, false, false, nil) assert.Nil(t, err) _, ipv6Subnet, _ := net.ParseCIDR("fd74:ca9b:172:19::/64") gwIPv6 := net.ParseIP("fd74:ca9b:172:19::1")