diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..540f380e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,36 @@ +--- +name: Bug report +about: Create a report to help us improve STUNner +title: '' +labels: '' +assignees: '' + +--- + +### Description + +[Description of the problem] + +### Steps to Reproduce + +[Brief description of the steps you took to encounter the problem, if applicable] + +**Expected behavior:** [What you expected to happen] + +**Actual behavior:** [What actually happened] + +### Versions + +[Which version of STUNner you are using] + +### Info + +[Please copy-paste the output of the below commands and make sure to remove all sensitive information, like usernames, passwords, IP addresses, etc.] + +#### Gateway API status + +[Output of `kubectl get gateways,gatewayconfigs,gatewayclasses,udproutes.stunner.l7mp.io --all-namespaces -o yaml`] + +#### Operator logs + +[Output of `kubectl -n stunner-system logs $(kubectl get pods -l control-plane=stunner-gateway-operator-controller-manager --all-namespaces -o jsonpath='{.items[0].metadata.name}')`] diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test-dev.yaml similarity index 64% rename from .github/workflows/e2e-test.yml rename to .github/workflows/e2e-test-dev.yaml index 546b0d10..cc14c0ad 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test-dev.yaml @@ -1,20 +1,20 @@ -name: Run End-to-End Test +name: Run End-to-End Test (dev) on: workflow_dispatch: schedule: - - cron: '0 11 * * 1' + - cron: '0 12 * * 1' jobs: e2e_test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: '1.23' - name: Download modules run: go mod download @@ -29,22 +29,22 @@ jobs: driver: docker container-runtime: containerd wait: all + cpus: max cache: false - name: Start minikube tunnel run: minikube tunnel &>mktunnel.log & - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 with: - version: v3.11.3 + version: v3.16.2 - name: Install STUNner run: | helm repo add stunner https://l7mp.io/stunner helm repo update - helm install stunner-gateway-operator stunner/stunner-gateway-operator-dev --create-namespace --namespace=stunner - helm install stunner stunner/stunner-dev --create-namespace --namespace=stunner + helm install stunner-gateway-operator stunner/stunner-gateway-operator-dev --create-namespace --namespace=stunner --set stunnerGatewayOperator.deployment.container.manager.resources.requests.cpu=200m --set stunnerGatewayOperator.dataplane.spec.resources.requests.cpu=100m - name: Deploy iperf server run: kubectl apply -f docs/examples/simple-tunnel/iperf-server.yaml @@ -61,12 +61,16 @@ jobs: - name: Wait for LoadBalancer IP run: | - while [[ -z $(kubectl get svc udp-gateway -n stunner -o jsonpath="{.status.loadBalancer.ingress[0].ip}") ]]; do echo "Waiting for LoadBalancer IP"; sleep 2; done + for n in {1..60}; do [[ ! -z $(kubectl get svc udp-gateway -n stunner -o jsonpath="{.status.loadBalancer.ingress[0].ip}") ]] && break; echo "Waiting for LoadBalancer IP"; sleep 2; done + echo "* wait for the deployment" + kubectl get all -A + kubectl wait -n stunner --for=condition=Available deployment udp-gateway --timeout 5m + echo "* EVERYTHING UP" kubectl get all -A - name: Start turncat run: | - ./turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/stunnerd-config:udp-listener udp://$(kubectl get svc iperf-server -o jsonpath="{.spec.clusterIP}"):5001 &>turncat.log & + ./turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/udp-gateway:udp-listener udp://$(kubectl get svc iperf-server -o jsonpath="{.spec.clusterIP}"):5001 &>turncat.log & sleep 1 - name: Run iperf client @@ -86,6 +90,8 @@ jobs: cat turncat.log echo "* STUNNER" kubectl logs -n stunner $(kubectl get pods -n stunner -l app=stunner -o jsonpath='{.items[0].metadata.name}') + echo "* STUNNER-GATEWAY=OPERATOR" + kubectl logs -n stunner $(kubectl get pods -n stunner -l control-plane=stunner-gateway-operator-controller-manager -o jsonpath='{.items[0].metadata.name}') - name: Check iperf conectivity run: grep "Server Report" iperf.log diff --git a/.github/workflows/e2e-test-stable.yml b/.github/workflows/e2e-test-stable.yml new file mode 100644 index 00000000..e28b6ad5 --- /dev/null +++ b/.github/workflows/e2e-test-stable.yml @@ -0,0 +1,87 @@ +name: Run End-to-End Test (stable) + +on: + workflow_dispatch: + schedule: + - cron: '0 11 1-7,15-21 * 2' + +jobs: + e2e_test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install turncat + run: | + URL=`wget -q -O - https://api.github.com/repos/l7mp/stunner/releases/latest | jq -r '.assets[] | select(.name | contains ("turncat")) | select(.name | contains ("linux")) | select(.name | contains ("amd64")) | .browser_download_url'` + wget $URL -O turncat + chmod a+x turncat + + - name: Start minikube + uses: medyagh/setup-minikube@master + with: + driver: docker + container-runtime: containerd + wait: all + cache: false + + - name: Start minikube tunnel + run: minikube tunnel &>mktunnel.log & + + - name: Set up Helm + uses: azure/setup-helm@v4 + with: + version: v3.16.2 + + - name: Install STUNner + run: | + helm repo add stunner https://l7mp.io/stunner + helm repo update + helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner --set stunnerGatewayOperator.dataplane.mode=managed --set stunnerGatewayOperator.deployment.container.manager.resources.requests.cpu=200m --set stunnerGatewayOperator.dataplane.spec.resources.requests.cpu=100m + + - name: Deploy iperf server + run: kubectl apply -f docs/examples/simple-tunnel/iperf-server.yaml + + - name: Configure STUNner + run: | + kubectl apply -f docs/examples/simple-tunnel/iperf-stunner.yaml + sleep 75 + + - name: Install iperf client + run: | + sudo apt-get update + sudo apt-get -y install iperf + + - name: Wait for LoadBalancer IP + run: | + for n in {1..60}; do [[ ! -z $(kubectl get svc udp-gateway -n stunner -o jsonpath="{.status.loadBalancer.ingress[0].ip}") ]] && break; echo "Waiting for LoadBalancer IP"; sleep 2; done + kubectl wait -n stunner --for=condition=Available deployment udp-gateway --timeout 5m + kubectl get all -A + + - name: Start turncat + run: | + ./turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/udp-gateway:udp-listener udp://$(kubectl get svc iperf-server -o jsonpath="{.spec.clusterIP}"):5001 &>turncat.log & + sleep 1 + + - name: Run iperf client + run: | + iperf -c 127.0.0.1 -p 5000 -u -l 100 -b 5M -t 5 | tee iperf.log + + - name: Show logs + run: | + echo "* IPERF" + echo "** Client" + cat iperf.log + echo "** Server" + kubectl logs $(kubectl get pods -l app=iperf-server -o jsonpath='{.items[0].metadata.name}') + echo "* MINIKUBE TUNNEL" + cat mktunnel.log + echo "* TURNCAT" + cat turncat.log + echo "* STUNNER" + kubectl logs -n stunner $(kubectl get pods -n stunner -l app=stunner -o jsonpath='{.items[0].metadata.name}') + echo "* STUNNER-GATEWAY=OPERATOR" + kubectl logs -n stunner $(kubectl get pods -n stunner -l control-plane=stunner-gateway-operator-controller-manager -o jsonpath='{.items[0].metadata.name}') + + - name: Check iperf conectivity + run: grep "Server Report" iperf.log diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 45ac2fd1..2341c8ef 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,12 +18,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: '1.23' - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run linters - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: - args: --timeout 3m --issues-exit-code=0 + args: --timeout 5m diff --git a/.github/workflows/publish--add-binaries.yml b/.github/workflows/publish--add-binaries.yml new file mode 100644 index 00000000..733cdb97 --- /dev/null +++ b/.github/workflows/publish--add-binaries.yml @@ -0,0 +1,68 @@ +name: "publish: Add binaries to release assets" + +on: + workflow_call: + +jobs: + add_binaries: + name: Add binaries to release assets + runs-on: ubuntu-latest + strategy: + matrix: + include: + - os: linux + arch: amd64 + file_end: "" + - os: linux + arch: arm64 + file_end: "" + + - os: darwin + arch: amd64 + file_end: "" + - os: darwin + arch: arm64 + file_end: "" + + - os: windows + arch: amd64 + file_end: ".exe" + - os: windows + arch: arm64 + file_end: ".exe" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Get version + id: vars + run: echo tag=$(echo ${GITHUB_REF:11}) >> $GITHUB_OUTPUT + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: '1.23' + + - name: Build binaries + run: | + export CGO_ENABLED=0 GOOS=${{ matrix.os }} GOARCH=${{ matrix.arch }} + make build-bin + mv bin/turncat turncat-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} + mv bin/stunnerctl stunnerctl-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} + + - name: Release turncat binary + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: turncat-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} + tag: ${{ github.ref_name }} + asset_name: turncat-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} + + - name: Release stunnerctl binary + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: stunnerctl-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} + tag: ${{ github.ref_name }} + asset_name: stunnerctl-v${{ steps.vars.outputs.tag }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.file_end }} diff --git a/.github/workflows/publish--push-charts.yml b/.github/workflows/publish--push-charts.yml new file mode 100644 index 00000000..f39fdf7e --- /dev/null +++ b/.github/workflows/publish--push-charts.yml @@ -0,0 +1,39 @@ +name: "publish: Push Helm charts to web" + +on: + workflow_call: + inputs: + dev: + description: Whether to release a dev version + required: true + type: boolean + +jobs: + push_charts: + name: Push Helm charts to web + runs-on: ubuntu-latest + steps: + - name: Get version for non-dev release + if: ${{ inputs.dev == false || inputs.dev == 'false' }} + id: vars + run: echo tag=$(echo ${GITHUB_REF:11}) >> $GITHUB_OUTPUT + + - name: Trigger release workflow in the stunner-helm repo + if: ${{ inputs.dev == false || inputs.dev == 'false' }} + uses: convictional/trigger-workflow-and-wait@v1.6.5 + with: + github_token: ${{ secrets.WEB_PAT_TOKEN }} + owner: l7mp + repo: stunner-helm + client_payload: '{"tag": "${{ steps.vars.outputs.tag }}", "type": "stunner"}' + workflow_file_name: publish.yaml + + - name: Trigger release workflow in the stunner-helm repo + if: ${{ inputs.dev == true || inputs.dev == 'true' }} + uses: convictional/trigger-workflow-and-wait@v1.6.5 + with: + github_token: ${{ secrets.WEB_PAT_TOKEN }} + owner: l7mp + repo: stunner-helm + client_payload: '{"tag": "dev", "type": "stunner"}' + workflow_file_name: publish.yaml diff --git a/.github/workflows/publish-dev.yaml b/.github/workflows/publish-dev.yaml index 800e1609..783d3cd0 100644 --- a/.github/workflows/publish-dev.yaml +++ b/.github/workflows/publish-dev.yaml @@ -1,4 +1,4 @@ -name: "release-dev" +name: Release (dev) on: workflow_dispatch: @@ -12,35 +12,41 @@ on: - 'main' jobs: - push_to_registry: - name: Push Docker image to DockerHub + run_tests: + name: Run tests + uses: l7mp/stunner/.github/workflows/test.yml@main + + push_stunner_to_registry: + name: Push STUNner image to DockerHub + needs: run_tests + if: github.repository == 'l7mp/stunner' runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: l7mp/stunnerd tags: | type=raw,value=dev - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Build and Push - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64 @@ -49,14 +55,49 @@ jobs: labels: ${{ steps.meta.outputs.labels }} push_chart: - name: Push helm charts to the + name: Push helm charts to the repo + if: github.repository == 'l7mp/stunner' + needs: push_stunner_to_registry + uses: l7mp/stunner/.github/workflows/publish--push-charts.yml@main + with: + dev: true + secrets: inherit + + push_icetester_to_registry: + name: Push icetester image to DockerHub + needs: run_tests + if: github.repository == 'l7mp/stunner' runs-on: ubuntu-latest steps: - - name: Triggering release workflow in the stunner-helm repo - uses: convictional/trigger-workflow-and-wait@v1.6.5 + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 with: - github_token: ${{ secrets.WEB_PAT_TOKEN }} - owner: l7mp - repo: stunner-helm - client_payload: '{"tag": "dev", "type": "stunner"}' - workflow_file_name: publish.yaml + images: l7mp/icetester + tags: | + type=raw,value=dev + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Build and Push + uses: docker/build-push-action@v5 + with: + file: Dockerfile.icetester + context: . + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index b2005e75..d63d42f0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,4 @@ -name: "release" +name: Release on: push: @@ -6,16 +6,21 @@ on: - 'v[0-9]+.[0-9]+.0' jobs: - push_to_registry: - name: Push Docker image to DockerHub + run_tests: + name: Run tests + uses: l7mp/stunner/.github/workflows/test.yml@main + + push_stunner_to_registry: + name: Push STUNner image to DockerHub + needs: run_tests runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: l7mp/stunnerd tags: | @@ -23,19 +28,19 @@ jobs: type=raw,value=latest - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_TOKEN }} - name: Build and Push - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64 @@ -44,19 +49,54 @@ jobs: labels: ${{ steps.meta.outputs.labels }} push_chart: - name: Push charts to the web + name: Push helm charts to the repo + needs: push_stunner_to_registry + uses: l7mp/stunner/.github/workflows/publish--push-charts.yml@main + with: + dev: false + secrets: inherit + + push_icetester_to_registry: + name: Push icetester image to DockerHub + needs: run_tests + if: github.repository == 'l7mp/stunner' runs-on: ubuntu-latest steps: + - name: Checkout + uses: actions/checkout@v4 - - name: Get version - id: vars - run: echo tag=$(echo ${GITHUB_REF:11}) >> $GITHUB_OUTPUT + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: l7mp/icetester + tags: | + type=semver,pattern={{version}} + type=raw,value=latest + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - - name: Triggering release workflow in the stunner-helm repo - uses: convictional/trigger-workflow-and-wait@v1.6.5 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_TOKEN }} + + - name: Build and Push + uses: docker/build-push-action@v5 with: - github_token: ${{ secrets.WEB_PAT_TOKEN }} - owner: l7mp - repo: stunner-helm - client_payload: '{"tag": "${{ steps.vars.outputs.tag }}", "type": "stunner"}' - workflow_file_name: publish.yaml + file: Dockerfile.icetester + context: . + platforms: linux/amd64,linux/arm64 + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + add_binaries: + name: Add binaries to release assets + uses: l7mp/stunner/.github/workflows/publish--add-binaries.yml@main + needs: run_tests diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7ab2719c..156a8d7f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,6 +1,8 @@ name: Tests on: + workflow_call: + workflow_dispatch: push: paths: - '**.go' @@ -17,11 +19,11 @@ jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: '1.23' - name: Download modules run: go mod download - name: Go install @@ -30,21 +32,22 @@ jobs: run: go test -v -covermode=count coverage: runs-on: ubuntu-latest + if: github.repository == 'l7mp/stunner' steps: - name: Install Go if: success() - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: '1.23' - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Calc coverage run: | go test -v -covermode=count -coverprofile=coverage.out - name: Convert coverage.out to coverage.lcov uses: jandelgado/gcov2lcov-action@v1 - name: Coveralls - uses: coverallsapp/github-action@v1 + uses: coverallsapp/github-action@v2 with: github-token: ${{ secrets.github_token }} path-to-lcov: coverage.lcov diff --git a/.gitignore b/.gitignore index c7339885..5d20408a 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,5 @@ go.work # Our binaries turncat +stunnerctl stunnerd \ No newline at end of file diff --git a/.readthedocs.yaml b/.readthedocs.yaml index d9222bb5..37ceb77e 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -6,10 +6,10 @@ version: 2 # Set the version of Python and other tools you might need -# build: -# os: ubuntu-22.04 -# tools: -# python: "3.10" +build: + os: ubuntu-22.04 + tools: + python: "3.12" mkdocs: configuration: mkdocs.yml diff --git a/Dockerfile b/Dockerfile index f1b6fd44..63d52e97 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,26 +1,30 @@ ########### # BUILD -FROM golang:1.19-alpine as builder +FROM golang:1.23-alpine as builder WORKDIR /app COPY go.mod ./ COPY go.sum ./ -RUN go mod download COPY *.go ./ COPY internal/ internal/ COPY pkg/ pkg/ -COPY cmd/stunnerd/main.go cmd/stunnerd/ -COPY cmd/stunnerd/stunnerd.conf cmd/stunnerd/ +COPY cmd/ cmd/ + +COPY .git ./ +COPY Makefile ./ +RUN apk add --no-cache git make RUN apkArch="$(apk --print-arch)"; \ case "$apkArch" in \ aarch64) export GOARCH='arm64' ;; \ *) export GOARCH='amd64' ;; \ esac; \ - CGO_ENABLED=0 GOOS=linux go build -ldflags="-w -s" -o stunnerd cmd/stunnerd/main.go + export CGO_ENABLED=0; \ + export GOOS=linux; \ + make build-bin ########### # STUNNERD @@ -28,7 +32,7 @@ FROM scratch WORKDIR /app -COPY --from=builder /app/stunnerd /usr/bin/ +COPY --from=builder /app/bin/stunnerd /usr/bin/ COPY --from=builder /app/cmd/stunnerd/stunnerd.conf / EXPOSE 3478/udp diff --git a/Dockerfile.icetester b/Dockerfile.icetester new file mode 100644 index 00000000..ce243532 --- /dev/null +++ b/Dockerfile.icetester @@ -0,0 +1,39 @@ +########### +# BUILD +FROM docker.io/golang:1.23-alpine as builder + +WORKDIR /app + +COPY go.mod ./ +COPY go.sum ./ + +COPY *.go ./ +COPY internal/ internal/ +COPY pkg/ pkg/ + +COPY cmd/ cmd/ + +COPY .git ./ +COPY Makefile ./ +RUN apk add --no-cache git make + +RUN apkArch="$(apk --print-arch)"; \ + case "$apkArch" in \ + aarch64) export GOARCH='arm64' ;; \ + *) export GOARCH='amd64' ;; \ + esac; \ + export CGO_ENABLED=0; \ + export GOOS=linux; \ + make build-bin + +########### +# STUNNERD +FROM scratch + +WORKDIR /app + +COPY --from=builder /app/bin/icetester /usr/bin/ + +EXPOSE 8089/tcp + +CMD [ "icetester", "-l", "all:INFO" ] diff --git a/LICENSE b/LICENSE index 621376f6..116904bc 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 l7mp +Copyright (c) 2024 l7mp Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..15838cab --- /dev/null +++ b/Makefile @@ -0,0 +1,53 @@ +# Build variables +PACKAGE = github.com/l7mp/stunner +BUILD_DIR ?= bin/ +VERSION ?= $(shell (git describe --tags --abbrev=8 --always --long) | tr "/" "-") +COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null) +BUILD_DATE ?= $(shell date +%FT%T%z) +LDFLAGS += -s -w +LDFLAGS += -X main.version=${VERSION} -X main.commitHash=${COMMIT_HASH} -X main.buildDate=${BUILD_DATE} +GOARGS = -trimpath + +ifeq (${VERBOSE}, 1) +ifeq ($(filter -v,${GOARGS}),) + GOARGS += -v +endif +endif + +.PHONY: all +all: build + +.PHONY: generate +generate: ## OpenAPI codegen + go generate ./pkg/config/... + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: generate fmt vet + go test ./... -v + +##@ Build + +.PHONY: build +build: generate fmt vet build-bin + +.PHONY: build-bin +bin: build-bin +build-bin: + go build ${GOARGS} -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/stunnerd cmd/stunnerd/main.go + go build ${GOARGS} -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/turncat cmd/turncat/main.go + go build ${GOARGS} -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/stunnerctl cmd/stunnerctl/*.go + go build ${GOARGS} -ldflags "${LDFLAGS}" -o ${BUILD_DIR}/icetester cmd/icetester/main.go + +.PHONY: clean +clean: + # echo 'Use "make generate" to autogenerate server code' > pkg/server/server.go + # echo 'Use "make generate" to autogenerate client code' > pkg/client/client.go + # echo 'Use "make generate" to autogenerate client code' > pkg/types/types.go diff --git a/README.md b/README.md index 9585a46f..6346c382 100644 --- a/README.md +++ b/README.md @@ -32,12 +32,14 @@

+*Note: This page documents the latest development version of STUNner. See the documentation for the stable version [here](https://docs.l7mp.io/en/stable).* + # STUNner: A Kubernetes media gateway for WebRTC Ever wondered how to [deploy your WebRTC infrastructure into the cloud](https://webrtchacks.com/webrtc-media-servers-in-the-cloud)? Frightened away by the complexities of Kubernetes container networking, and the surprising ways in which it may interact -with your UDP/RTP media? Tried to read through the endless stream of [Stack +with your UDP/RTP media? Read through the endless stream of [Stack Overflow](https://stackoverflow.com/search?q=kubernetes+webrtc) [questions](https://stackoverflow.com/questions/61140228/kubernetes-loadbalancer-open-a-wide-range-thousands-of-port) [asking](https://stackoverflow.com/questions/64232853/how-to-use-webrtc-with-rtcpeerconnection-on-kubernetes) @@ -52,19 +54,16 @@ Worry no more! STUNner allows you to deploy *any* WebRTC service into Kubernetes integrating it into the [cloud-native ecosystem](https://landscape.cncf.io). STUNner exposes a standards-compliant STUN/TURN gateway for clients to access your virtualized WebRTC infrastructure running in Kubernetes, maintaining full browser compatibility and requiring minimal or no -modification to your existing WebRTC codebase. STUNner implements the standard [Kubernetes Gateway +modification to your existing WebRTC codebase. STUNner supports the [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io) so you can configure it in the familiar YAML-engineering style via Kubernetes manifests. -See the full documentation [here](https://docs.l7mp.io/en/latest). - ## Table of Contents 1. [Description](#description) 1. [Features](#features) 1. [Getting started](#getting-started) -1. [Tutorials](#tutorials) +1. [Usage](#usage) 1. [Documentation](#documentation) -1. [Caveats](#caveats) 1. [Milestones](#milestones) ## Description @@ -83,21 +82,18 @@ features we have come to expect from modern network services. Worse yet, the ent on a handful of [public](https://bloggeek.me/google-free-turn-server/) [STUN servers](https://www.npmjs.com/package/freeice) and [hosted TURN services](https://bloggeek.me/managed-webrtc-turn-speed) to connect clients behind a NAT/firewall, -which may create a useless dependency on externally operated services, introduce a bottleneck, -raise security concerns, and come with a non-trivial price tag. +which may create a useless dependency on externally operated services, introduce a performance +bottleneck, raise security concerns, and come with a non-trivial price tag. The main goal of STUNner is to allow *anyone* to deploy their own WebRTC infrastructure into Kubernetes, without relying on any external service other than the cloud-provider's standard hosted -Kubernetes offering. This is achieved by STUNner acting as a gateway for ingesting WebRTC media -traffic into the Kubernetes cluster, exposing a public-facing STUN/TURN server that WebRTC clients -can connect to. - -STUNner can act as a STUN/TURN server that WebRTC clients and media servers can use as a scalable -NAT traversal facility (headless model), or it can serve as a fully-fledged ingress gateway for -clients to reach a media server deployed behind STUNner (media-plane model). This makes it possible -to deploy WebRTC application servers and media servers into ordinary Kubernetes pods, taking -advantage of Kubernetes's excellent tooling to manage, scale, monitor and troubleshoot the WebRTC -infrastructure like any other cloud-bound workload. +Kubernetes offering. STUNner can act as a standalone STUN/TURN server that WebRTC clients and media +servers can use as a scalable NAT traversal facility (headless model), or it can act as a gateway +for ingesting WebRTC media traffic into the Kubernetes cluster by exposing a public-facing +STUN/TURN server that WebRTC clients can connect to (media-plane model). This makes it possible to +deploy WebRTC application servers and media servers into ordinary Kubernetes pods, taking advantage +of the full cloud native feature set to manage, scale, monitor and troubleshoot the WebRTC +infrastructure like any other Kubernetes workload. ![STUNner media-plane deployment architecture](./docs/img/stunner_arch.svg) @@ -130,7 +126,7 @@ way. [hacks](https://kubernetes.io/docs/concepts/configuration/overview), like privileged pods and `hostNetwork`/`hostPort` services, typically recommended as a prerequisite to containerizing your WebRTC media plane. Using STUNner a WebRTC deployment needs only two public-facing ports, one - HTTPS port for the application server and a *single* UDP port for *all* your media. + HTTPS port for signaling and a *single* UDP port for *all* your media. * **No reliance on external services for NAT traversal.** Can't afford a [hosted TURN service](https://bloggeek.me/webrtc-turn) for client-side NAT traversal? Can't get decent @@ -139,453 +135,115 @@ way. can connect to it directly without the use of *any* external STUN/TURN service whatsoever, apart from STUNner itself. -* **Easily scale your WebRTC infrastructure.** Tired of manually provisioning your WebRTC media - servers? STUNner lets you deploy the entire WebRTC infrastructure into ordinary Kubernetes pods, - thus [scaling the media plane](docs/SCALING.md) is as easy as issuing a `kubectl scale` - command. Even better, use the built in Kubernetes horizontal autoscaler to *automatically* resize - your workload based on demand. +* **Scale your WebRTC infrastructure.** Tired of manually provisioning your WebRTC media servers? + STUNner lets you deploy the entire WebRTC infrastructure into ordinary Kubernetes pods, thus + [scaling the media plane](docs/SCALING.md) is as easy as issuing a `kubectl scale` command. Or + you can use the built in Kubernetes horizontal autoscaler to *automatically* resize your workload + based on demand. + +* **Minimal client-side configuration.** STUNner comes with a built-in [authentication + service](https://github.com/l7mp/stunner-auth-service) that can be used to generate time-windowed + per-user TURN credentials through a [standards + compliant](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00) HTTP [REST + API](/docs/AUTH.md). Just set the generated [ICE + configuration](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection#configuration) + in the [`PeerConnection` JavaScript + API](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection) and + your clients will readily start using your Kubernetes-based WebRTC service via STUNner. * **Secure perimeter defense.** No need to open thousands of UDP/TCP ports on your media server for potentially malicious access; with STUNner *all* media is received through a single ingress port that you can tightly monitor and control. - - - * **Simple code and extremely small size.** Written in pure Go using the battle-tested [pion/webrtc](https://github.com/pion/webrtc) framework, STUNner is just a couple of hundred lines of fully open-source code. The server is extremely lightweight: the typical STUNner - container image size is only about 5 Mbytes. + container image size is only 15 Mbytes. -## Getting Started +The main uses of STUNner are [hosting a scalable STUN server +pool](https://medium.com/l7mp-technologies/deploying-a-scalable-stun-service-in-kubernetes-c7b9726fa41d) +in Kubernetes, as a public Kubernetes-based [TURN +service](https://github.com/l7mp/stunner/blob/main/docs/DEPLOYMENT.md#headless-deployment-model), +or as a fully-fledged [gateway +service](https://github.com/l7mp/stunner/blob/main/docs/DEPLOYMENT.md#media-plane-deployment-model) +for ingesting and load-balancing clients' media connections across a pool of WebRTC media servers +hosted in ordinary Kubernetes pods. -STUNner comes with a [Helm](https://helm.sh) chart to fire up a fully functional STUNner-based -WebRTC media gateway in minutes. Note that the default installation does not contain an application -server and a media server: STUNner is not a WebRTC service, it is merely an *enabler* for you to -deploy your *own* WebRTC infrastructure into Kubernetes. Once installed, STUNner makes sure that -your media servers are readily reachable to WebRTC clients, despite running with a private IP -address inside a Kubernetes pod. See the [tutorials](#tutorials) for some ideas on how to deploy an -actual WebRTC application behind STUNner. +## Getting Started With a minimal understanding of WebRTC and Kubernetes, deploying STUNner should take less than 5 -minutes. +minutes, in five simple steps. -* [Customize STUNner and deploy it](#installation) into your Kubernetes cluster. -* Optionally [deploy a WebRTC media server](docs/examples/kurento-one2one-call). -* [Set STUNner as the ICE server](#configuring-webrtc-clients) in your WebRTC clients. +* [Customize STUNner and deploy it](/docs/INSTALL.md) into your Kubernetes cluster. +* Optionally [deploy a WebRTC media server](/docs/README.md#media-plane-deployment-model). +* [Set STUNner as the ICE server](/docs/AUTH.md) in your WebRTC clients. * ... * Profit!! -### Installation +Note that the default installation does not contain an application server and a media server: +STUNner is not a WebRTC service in itself, it is merely an *enabler* for you to deploy your *own* +WebRTC infrastructure into Kubernetes. The simplest way to deploy STUNner is through [Helm](https://helm.sh). STUNner configuration parameters are available for customization as [Helm -Values](https://helm.sh/docs/chart_template_guide/values_files). We recommend deploying STUNner -into a separate namespace and we usually name this namespace as `stunner`, so as to isolate it from -the rest of the workload. +Values](https://helm.sh/docs/chart_template_guide/values_files). ```console helm repo add stunner https://l7mp.io/stunner helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system -helm install stunner stunner/stunner --create-namespace --namespace=stunner +helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace \ + --namespace=stunner-system ``` Find out more about the charts in the [STUNner-helm repository](https://github.com/l7mp/stunner-helm). -### Configuration - -The standard way to interact with STUNner is via the standard Kubernetes [Gateway - API](https://gateway-api.sigs.k8s.io) version - [v1alpha2](https://gateway-api.sigs.k8s.io/v1alpha2/references/spec). This is much akin to the - way you configure *all* Kubernetes workloads: specify your intents in YAML files and issue a - `kubectl apply`, and the [STUNner gateway - operator](https://github.com/l7mp/stunner-gateway-operator) will automatically reconcile the - STUNner dataplane for the new configuration. - -1. Given a fresh STUNner install, the first step is to register STUNner with the Kubernetes Gateway - API. This amounts to creating a - [GatewayClass](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GatewayClass), - which serves as the [root level configuration](/docs/GATEWAY.md#gatewayclass) for your STUNner - deployment. - - Each GatewayClass must specify a controller that will manage the Gateway objects created under - the class hierarchy. This must be set to `stunner.l7mp.io/gateway-operator` in order for STUNner - to pick up the GatewayClass. In addition, a GatewayClass can refer to further - implementation-specific configuration via a reference called `parametersRef`; in our case, this - will be a GatewayConfig object to be specified next. - - ``` console - kubectl apply -f - < **Warning** -STUNner deviates somewhat from the standard rules Kubernetes uses to handle ports in Services. In -Kubernetes each Service is associated with one or more protocol-port pairs and connections via the -Service can be made to only these specific protocol-port pairs. WebRTC media servers, however, -usually open lots of different ports, typically one per each client connection, and it would be -cumbersome to create a separate backend Service and UDPRoute for each port. In order to simplify -this, STUNner **ignores the protocol and port specified in the backend service** and allows -connections to the backend pods via *any* protocol-port pair. STUNner can therefore use only a -*single* backend Service to reach any port exposed on a WebRTC media server. - -> Considering the above example: even if the `default/media-plane` Service was created for the TCP:80 port, STUNner will allow connections via any protocol-port pair, say, via UDP:10000 or any other UDP port for that matter. This hack remains our only viable way to support WebRTC workloads in Kubernetes until [support for port ranges is implemented in Kubernetes services](https://github.com/kubernetes/kubernetes/issues/23864). Note that this affects only the *internal* backend services: STUNner is still exposed *externally* via a *single* protocol-port, but it can demultiplex incoming client media connections to any *internal* backend ports via a single UDPRoute. - -And that's all. You don't need to worry about client-side NAT traversal and WebRTC media routing -because STUNner has you covered! Even better, every time you change a Gateway API resource in -Kubernetes, say, you update the GatewayConfig to reset your STUN/TURN credentials or change the -protocol or port in one of your Gateways, the [STUNner gateway -operator](https://github.com/l7mp/stunner-gateway-operator) will automatically pick up your -modifications and update the underlying dataplane. Kubernetes is beautiful, isn't it? - -### Check your config - -The current STUNner dataplane configuration is always made available in a convenient ConfigMap -called `stunnerd-config` (you can choose the name in the GatewayConfig). The STUNner dataplane pods -themselves will use the very same ConfigMap to reconcile their internal state, so you can consider -the content to be the ground truth. - -STUNner comes with a small utility to dump the running configuration in human readable format (you -must have [`jq`](https://stedolan.github.io/jq) installed in your PATH to be able to use it). Chdir -into the main STUNner directory and issue. - -```console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener: udp-listener -Protocol: UDP -Public address: 34.118.36.108 -Public port: 3478 -``` - -As it turns out, STUNner has successfully assigned a public IP and port to our Gateway and set the -STUN/TURN credentials based on the GatewayConfig. You can use the below to dump the entire running -configuration; `jq` is there just to pretty-print JSON. - -```console -kubectl get cm -n stunner stunnerd-config -o jsonpath="{.data.stunnerd\.conf}" | jq . -``` - -### Testing - -We have successfully configured STUNner to route client connections to the `media-plane` service -but at the moment there is no backend there that would respond. Below we use a simplistic UDP -greeter service for testing: every time you send some input, the greeter service will respond with -a heartwarming welcome message. - -1. Fire up the UDP greeter service. - - The below manifest spawns the service in the `default` namespace and wraps it in a Kubernetes - service called `media-plane`. Recall, this is the target service STUNner will route connections - to. Note that the type of the `media-plane` service is `ClusterIP`, which means that Kubernetes - will *not* expose it to the Internet: the only way for clients to obtain a response is via - STUNner. - - ```console - kubectl apply -f deploy/manifests/udp-greeter.yaml - ``` - -1. We also need the ClusterIP assigned by Kubernetes to the `media-plane` service. - - ```console - export PEER_IP=$(kubectl get svc media-plane -o jsonpath='{.spec.clusterIP}') - ``` - -1. We also need a STUN/TURN client to actually initiate a connection. STUNner comes with a handy - STUN/TURN client called [`turncat`](cmd/turncat/README.md) for this purpose. Once - [built](cmd/turncat/README.md#installation), you can fire up `turncat` to listen on the standard - input and send everything it receives to STUNner. Type any input and press Enter, and you should - see a nice greeting from your cluster! - - ```console - ./turncat - k8s://stunner/stunnerd-config:udp-listener udp://${PEER_IP}:9001 - Hello STUNner - Greetings from STUNner! - ``` - -Observe that we haven't specified the public IP address and port: `turncat` is clever enough to -parse the running [STUNner configuration](#check-your-config) from Kubernetes directly. Just -specify the special STUNner URI `k8s://stunner/stunnerd-config:udp-listener`, identifying the -namespace (`stunner` here) and the name for the STUNner ConfigMap (`stunnerd-config`), plus the -listener to connect to (`udp-listener`), and `turncat` will do the heavy lifting. - -Note that your actual WebRTC clients do *not* need to use `turncat` to reach the cluster: all -modern Web browsers and WebRTC clients come with a STUN/TURN client built in. Here, `turncat` is -used only to *simulate* what a real WebRTC client would do when trying to reach STUNner. - -### Reconcile - -Any time you see fit, you can update the STUNner configuration through the Gateway API: STUNner -will automatically reconcile the dataplane for the new configuration. - -For instance, you may decide to open up your WebRTC infrastructure on TLS/TCP as well; say, because -an enterprise NAT on the client network path has gone berserk and actively filters anything except -TLS/443. The below steps will do just that: open another gateway on STUNner, this time on the -TLS/TCP port 443, and reattach the UDPRoute to both Gateways so that no matter which protocol a -client may choose the connection will be routed to the `media-plane` service (i.e., the UDP -greeter) by STUNner. - -1. Store your TLS certificate in a Kubernetes Secret. Below we create a self-signed certificate for - testing, make sure to substitute this with a valid certificate. - - ```console - openssl genrsa -out ca.key 2048 - openssl req -x509 -new -nodes -days 365 -key ca.key -out ca.crt -subj "/CN=yourdomain.com" - kubectl -n stunner create secret tls tls-secret --key ca.key --cert ca.crt - ``` - -1. Add the new TLS Gateway. Notice how the `tls-listener` now contains a `tls` object that refers - the above Secret, this way assigning the TLS certificate to use with our TLS listener. - - ```console - kubectl apply -f - <`) with the correct configuration -from the running STUNner config; don't forget that `stunnerctl` is always there for you to help. - -```js -var ICE_config = { - iceServers: [ - { - url: 'turn::?transport=udp', - username: , - credential: , - }, - ], -}; -var pc = new RTCPeerConnection(ICE_config); -``` - -Note that STUNner comes with a built-in [authentication -service](https://github.com/l7mp/stunner-auth-service) that can be used to generate a complete ICE -configuration for reaching STUNner through a [HTTP REST API](docs/AUTH.md). - -## Tutorials - -The below series of tutorials demonstrates how to leverage STUNner to deploy different WebRTC -applications into Kubernetes. - -### Basics - -* [Opening a UDP tunnel via STUNner](/docs/examples/simple-tunnel/README.md): This introductory tutorial - shows how to tunnel an external connection via STUNner to a UDP service deployed into - Kubernetes. The demo can be used to quickly check and benchmark a STUNner installation. - -### Headless deployment mode - +## Usage + +STUNner comes with a wide selection of tutorials and demos that teach you how to deploy all kinds +of WebRTC services into Kubernetes. The first couple of tutorials present the basic concepts, +especially the use of the [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io) to configure +STUNner and the [`turncat`](/docs/cmd/turncat.md) utility to test it. Each subsequent demo +showcases a specific WebRTC application, from desktop streaming and video-conferencing to +cloud-gaming, and goes from a clean Kubernetes cluster to a working and usable publicly available +WebRTC service in 5-10 minutes using a purely declarative configuration. + +* [Deploying a UDP echo service behind STUNner](/docs/examples/udp-echo/README.md): This + introductory tutorial shows how to deploy a simple UDP echo service into Kubernetes and expose it + via STUNner. If you read just one STUNner tutorial, this should be it. +* [Opening a UDP tunnel via STUNner](/docs/examples/simple-tunnel/README.md): This tutorial shows + how to tunnel an external UDP client via STUNner to a standard iperf server deployed into + Kubernetes. The demo can be used to benchmark your STUNner installation. * [Direct one to one video call via STUNner](/docs/examples/direct-one2one-call/README.md): This tutorial showcases STUNner acting as a TURN server for two WebRTC clients to establish connections between themselves, without the mediation of a media server. - -### Media-plane deployment model - -* [One to one video call with Kurento](/docs/examples/kurento-one2one-call/README.md): This tutorial - shows how to use STUNner to connect WebRTC clients to a media server deployed into Kubernetes - behind STUNner in the [media-plane deployment model](/docs/DEPLOYMENT.md). All this happens - *without* modifying the media server code in any way, just by adding 5-10 lines of - straightforward JavaScript to configure clients to use STUNner as the TURN server. -* [Magic mirror with Kurento](/docs/examples/kurento-magic-mirror/README.md): This tutorial has been - adopted from the [Kurento](https://www.kurento.org) [magic - mirror](https://doc-kurento.readthedocs.io/en/stable/tutorials/node/tutorial-magicmirror.html) - demo, deploying a basic WebRTC loopback server behind STUNner with some media processing - added. In particular, the application uses computer vision and augmented reality techniques to - add a funny hat on top of faces. * [Video-conferencing with LiveKit](/docs/examples/livekit/README.md): This tutorial helps you deploy the [LiveKit](https://livekit.io) WebRTC media server behind STUNner. The docs also show how to obtain a valid TLS certificate to secure your signaling connections, courtesy of the [cert-manager](https://cert-manager.io) project, [nip.io](https://nip.io) and [Let's Encrypt](https://letsencrypt.org). +* [Video-conferencing with Janus](/docs/examples/janus/README.md): This tutorial helps you deploy a + fully fledged [Janus](https://janus.conf.meetecho.com/) video-conferencing service into Kubernetes + behind STUNner. The docs also show how to obtain a valid TLS certificate to secure your signaling + connections, using [cert-manager](https://cert-manager.io), [nip.io](https://nip.io) and [Let's + Encrypt](https://letsencrypt.org). +* [Video-conferencing with Elixir WebRTC](/docs/examples/elixir-webrtc/README.md): This tutorial helps + you deploy a fully fledged [Elixir WebRTC](https://elixir-webrtc.org/) video-conferencing room called + [Nexus](https://github.com/elixir-webrtc/apps/tree/master/nexus) into Kubernetes + behind STUNner. The docs also show how to obtain a valid TLS certificate to secure your signaling + connections, using [cert-manager](https://cert-manager.io), [nip.io](https://nip.io) and [Let's + Encrypt](https://letsencrypt.org). * [Video-conferencing with Jitsi](/docs/examples/jitsi/README.md): This tutorial helps you deploy a fully fledged [Jitsi](https://jitsi.org) video-conferencing service into Kubernetes behind STUNner. The docs also show how to obtain a valid TLS certificate to secure your signaling connections, using [cert-manager](https://cert-manager.io), [nip.io](https://nip.io) and [Let's Encrypt](https://letsencrypt.org). -* [Cloud-gaming with Cloudretro](/docs/examples/cloudretro/README.md): This tutorial lets you play Super - Mario or Street Fighter in your browser, courtesy of the amazing +* [Video-conferencing with mediasoup](/docs/examples/mediasoup/README.md): This tutorial helps you + deploy the [mediasoup](https://mediasoup.org/) WebRTC media server behind STUNner. The docs also + show how to obtain a valid TLS certificate to secure your signaling connections, courtesy of the + [cert-manager](https://cert-manager.io) project, [nip.io](https://nip.io) and [Let's + Encrypt](https://letsencrypt.org). +* [Cloud-gaming with Cloudretro](/docs/examples/cloudretro/README.md): This tutorial lets you play + Super Mario or Street Fighter in your browser, courtesy of the amazing [CloudRetro](https://cloudretro.io) project and, of course, STUNner. The demo also presents a simple multi-cluster setup, where clients can reach the game-servers in their geographical locality to minimize latency. @@ -593,41 +251,38 @@ applications into Kubernetes. providing an ingress gateway service to a remote desktop application. We use [neko.io](https://neko.m1k1o.net) to run a browser in a secure container inside the Kubernetes cluster, and stream the desktop to clients via STUNner. +* [One to one video call with Kurento](/docs/examples/kurento-one2one-call/README.md): This tutorial + shows how to use STUNner to connect WebRTC clients to a media server deployed into Kubernetes + behind STUNner in the [media-plane deployment model](/docs/DEPLOYMENT.md). All this happens + *without* modifying the media server code in any way, just by adding 5-10 lines of + straightforward JavaScript to configure clients to use STUNner as the TURN server. +* [Magic mirror with Kurento](/docs/examples/kurento-magic-mirror/README.md): This tutorial has been + adopted from the [Kurento](https://www.kurento.org) [magic + mirror](https://doc-kurento.readthedocs.io/en/stable/tutorials/node/tutorial-magicmirror.html) + demo, deploying a basic WebRTC loopback server behind STUNner with some media processing + added. In particular, the application uses computer vision and augmented reality techniques to + add a funny hat on top of faces. ## Documentation -See the full documentation [here](/docs/README.md). - -## Caveats - -STUNner is a work-in-progress. Some features are missing, others may not work as expected. The -notable limitations at this point are as follows. - -* STUNner targets only a *partial implementation of the Kubernetes Gateway API.* In particular, - only GatewayClass, Gateway and UDPRoute resources are supported. This is intended: STUNner - deliberately ignores some complexity in the [Gateway API](https://gateway-api.sigs.k8s.io) and - deviates from the prescribed behavior in some cases, all in the name of simplifying the - configuration process. The [STUNner Kubernetes gateway - operator](https://github.com/l7mp/stunner-gateway-operator) docs contain a [detailed - list](https://github.com/l7mp/stunner-gateway-operator#caveats) on the differences. -* STUNner supports *multiple parallel GatewayClass hierarchies*, each deployed into a separate - namespace with a separate GatewayClass an a separate dataplane. This mode can be useful for - testing new STUNner versions or canary-upgrades and A/B testing of a new media server version. At - the moment, however, this mode is not supported: it should work but we don' test it. +The documentation of the stable release can be found [here](https://docs.l7mp.io/en/stable). The +documentation for the latest development release can be found [here](/docs/README.md). ## Milestones * v0.9: Demo release: STUNner basic UDP/TURN connectivity + helm chart + tutorials. -* v0.10: Dataplane: Long-term STUN/TURN credentials and [STUN/TURN over - TCP/TLS/DTLS](https://www.rfc-editor.org/rfc/rfc6062.txt) in standalone mode. +* v0.10: Dataplane: Long-term STUN/TURN credentials and [STUN/TURN over TCP/TLS/DTLS](https://www.rfc-editor.org/rfc/rfc6062.txt) in standalone mode. * v0.11: Control plane: Kubernetes gateway operator and dataplane reconciliation. * v0.12: Security: Expose TLS/DTLS settings via the Gateway API. * v0.13: Observability: Prometheus + Grafana dashboard. -* v0.15: Performance: per-allocation CPU load-balancing for UDP -* v0.16: Management: managed STUNner dataplane. -* v0.17: Performance: eBPF TURN acceleration. -* v1.0: GA -* v2.0: Service mesh: adaptive scaling & resiliency +* v0.15: Performance: Per-allocation CPU load-balancing for UDP +* v0.16: Management: Managed STUNner dataplane. +* v0.17: First release candidate: All Gateway and STUNner APIs move to v1. +* v0.18: Stabilization: Second release candidate. +* v0.19: The missing pieces: Third release candidate. +* v0.20: Final stabilization: Fourth stable release candidate +* v0.21: Towards v1: Fifth stable release candidate +* v1.0: STUNner goes GA! ## Help diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..6f97cd62 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Reporting Security Issues + +If you encounter any security issues, please get in touch with us in any of the following ways: +- open a ticket at [GitHub Security Advisories](https://github.com/l7mp/stunner/security/advisories), +- e-mail core developers at [info@l7mp.io](mailto:info@l7mp.io). + +## Learning More About Security + +To learn more about securing a STUNner deployment, please see the [security documentation](docs/SECURITY.md). diff --git a/bin/.gitkeep b/bin/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/cmd/getstunner/getstunner.sh b/cmd/getstunner/getstunner.sh new file mode 100755 index 00000000..bffbc12b --- /dev/null +++ b/cmd/getstunner/getstunner.sh @@ -0,0 +1,80 @@ +#!/bin/sh + +# STUNner tools downloader script +# +# inspired by https://raw.githubusercontent.com/istio/istio/master/release/downloadIstioCtl.sh + + +REPO=l7mp/stunner + +# Determine OS +OS="${TARGET_OS:-$(uname)}" +if [ "${OS}" = "Darwin" ] ; then + OSEXT="darwin" +else + OSEXT="linux" +fi + +# Determine the latest STUNner version +if [ "${STUNNER_VERSION}" = "" ] ; then + STUNNER_VERSION="$(curl -Lsf https://api.github.com/repos/${REPO}/releases/latest \ + | grep -o '"tag_name": "v[0-9]*.[0-9]*.[0-9]*' \ + | awk -F'"' '{print $4}')" + STUNNER_VERSION="${STUNNER_VERSION##*/}" +fi + +# Determine build params +if [ "${STUNNER_VERSION}" = "" ] ; then + printf "Unable to get latest Stunner version. Set STUNNER_VERSION env var and re-run. For example: export STUNNER_VERSION=0.18.0" + exit 1; +fi + +LOCAL_ARCH=$(uname -m) +if [ "${TARGET_ARCH}" ]; then + LOCAL_ARCH=${TARGET_ARCH} +fi + +case "${LOCAL_ARCH}" in + x86_64|amd64) + STUNNER_ARCH=amd64 + ;; + armv8*|aarch64*|arm64) + STUNNER_ARCH=arm64 + ;; + *) + echo "This system's architecture, ${LOCAL_ARCH}, isn't supported" + exit 1 + ;; +esac + +# Download binaries +progs="stunnerctl turncat" +tmp=$(mktemp -d /tmp/stunner.XXXXXX) + +for prog in $progs; do + NAME="${prog}-${STUNNER_VERSION}" + URL="https://github.com/${REPO}/releases/download/${STUNNER_VERSION}/${prog}-${STUNNER_VERSION}-${OSEXT}-${STUNNER_ARCH}" + filename="${prog}-${STUNNER_VERSION}-${OSEXT}-${STUNNER_ARCH}" + + printf "\nDownloading %s from %s ...\n" "${NAME}" "$URL" + if ! curl -o /dev/null -sIf "$URL"; then + printf "\n%s is not found, please specify a valid STUNNER_VERSION and TARGET_ARCH\n" "$URL" + exit 1 + fi + curl -fsL -o "${tmp}/${filename}" "$URL" + printf "%s download complete!\n" "${filename}" + + mkdir -p "$HOME/.l7mp/bin" + mv "${tmp}/${filename}" "$HOME/.l7mp/bin/${prog}" + chmod +x "$HOME/.l7mp/bin/${prog}" +done + +rm -r "${tmp}" + +# Print final message +printf "\n" +printf "Add stunner tools to your path with:" +printf "\n" +printf " export PATH=\$HOME/.l7mp/bin:\$PATH \n" +printf "\n" +printf "Need more information? Visit https://docs.l7mp.io/en/${STUNNER_VERSION}/ \n" diff --git a/cmd/icetester/README.md b/cmd/icetester/README.md new file mode 100644 index 00000000..a43eb208 --- /dev/null +++ b/cmd/icetester/README.md @@ -0,0 +1,42 @@ +# icetester: Universal UDP echo service using WebRTC/ICE + +`icetester` is test server that can be used WebRTC/ICE connectivity. The tester serves a simple +WebSocket/JSON API server that clients can use to create a WebRTC data channel. whatever is +received by `icetester` on the data channel will be echoed back to the client over the data channel. + +While `icetester` can be used as a standalone too, the intended use is via `stunnerctl icetest`. + +## Installation + +Install `icetester` using the standard Go toolchain and add it to `$PATH`. + +```console +go install github.com/l7mp/stunner/cmd/icetester@latest +``` + +Building from source is as easy as it usually gets with Go: + +```console +cd stunner +go build -o turncat cmd/icetester/main.go +``` + +The containerized version is available as `docker.io/l7mp/icester`. + +## Usage + +Deploy a STUNner gateway and test is via UDP and TCP through `stunnerctl`: + +```console +stunnerctl icetest +``` + +## License + +Copyright 2021-2024 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). + +MIT License - see [LICENSE](../../LICENSE) for full text. + +## Acknowledgments + +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/cmd/icetester/icetester_test.go b/cmd/icetester/icetester_test.go new file mode 100644 index 00000000..2cc53bc3 --- /dev/null +++ b/cmd/icetester/icetester_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/l7mp/stunner/pkg/logger" + "github.com/l7mp/stunner/pkg/whipconn" +) + +var ( + testerLogLevel = "all:WARN" + // testerLogLevel = "all:TRACE" + // testerLogLevel = "all:INFO" + defaultConfig = whipconn.Config{BearerToken: "whiptoken"} +) + +func echoTest(t *testing.T, conn net.Conn, content string) { + t.Helper() + + n, err := conn.Write([]byte(content)) + assert.NoError(t, err) + assert.Equal(t, len(content), n) + + buf := make([]byte, 2048) + n, err = conn.Read(buf) + assert.NoError(t, err) + assert.Equal(t, content, string(buf[:n])) +} + +var testerTestCases = []struct { + name string + config *whipconn.Config + tester func(t *testing.T, ctx context.Context) +}{ + { + name: "Basic connectivity", + tester: func(t *testing.T, ctx context.Context) { + log.Debug("Creating dialer") + d := whipconn.NewDialer(defaultConfig, loggerFactory) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(ctx, defaultICETesterAddr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1") + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2") + + assert.NoError(t, clientConn.Close(), "client conn close") + }, + }, +} + +func TestICETesterConn(t *testing.T) { + loggerFactory = logger.NewLoggerFactory(testerLogLevel) + log = loggerFactory.NewLogger("icester") + + for _, c := range testerTestCases { + t.Run(c.name, func(t *testing.T) { + log.Infof("--------------------- %s ----------------------", c.name) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + config := defaultConfig + if c.config != nil { + config = *c.config + } + + log.Debug("Running listener loop") + go func() { + err := runICETesterListener(ctx, defaultICETesterAddr, config) + assert.NoError(t, err) + }() + + c.tester(t, ctx) + }) + } +} diff --git a/cmd/icetester/main.go b/cmd/icetester/main.go new file mode 100644 index 00000000..a40b9c51 --- /dev/null +++ b/cmd/icetester/main.go @@ -0,0 +1,166 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "os" + "os/signal" + + "github.com/pion/logging" + "github.com/pion/webrtc/v4" + flag "github.com/spf13/pflag" + + v1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/buildinfo" + "github.com/l7mp/stunner/pkg/logger" + "github.com/l7mp/stunner/pkg/whipconn" +) + +const ( + // Name of the environment variable specifying the list of ICE servers, default is no ICE servers. + EnvVarNameICEServers = "ICE_SERVERS" + + // Name of the environment variable specifying the ICE transport policy (either "relay" or "all"), default is "all". + EnvVarNameICETransportPolicy = "ICE_TRANSPORT_POLICY" + + // HIP bearer token for authenticating WHIP requests, default is no bearer token. + EnvVarNameBearerToken = "BEARER_TOKEN" + + // WHIP API endpoint, default is "/whip". Must include the leading slash ("/"). + EnvVarNameWHIPEndpoint = "WHIP_ENDPOINT" +) + +var ( + version = "dev" + commitHash = "n/a" + buildDate = "" + + defaultICEServers = []webrtc.ICEServer{} + defaultICETransportPolicy = webrtc.NewICETransportPolicy("all") + defaultBearerToken = "" + defaultWHIPEndpoint = "/whip" + defaultICETesterAddr = fmt.Sprintf(":%d", v1.DefaultICETesterPort) + + loggerFactory logging.LoggerFactory + log logging.LeveledLogger +) + +func main() { + os.Args[0] = "icetester" + var whipServerAddr = flag.StringP("addr", "a", defaultICETesterAddr, "WHIP server listener address") + var level = flag.StringP("log", "l", "all:WARN", "Log level") + var verbose = flag.BoolP("verbose", "v", false, "Enable verbose logging, identical to -l all:DEBUG") + + flag.Parse() + + if *verbose { + *level = "all:DEBUG" + } + + loggerFactory = logger.NewLoggerFactory(*level) + log = loggerFactory.NewLogger("icester") + + buildInfo := buildinfo.BuildInfo{Version: version, CommitHash: commitHash, BuildDate: buildDate} + log.Debugf("Starting icetester %s", buildInfo.String()) + + iceServers := defaultICEServers + if os.Getenv(EnvVarNameICEServers) != "" { + s := []webrtc.ICEServer{} + if err := json.Unmarshal([]byte(os.Getenv(EnvVarNameICEServers)), &s); err != nil { + log.Errorf("Environment ICE_SERVERS is invalid: %s", err.Error()) + os.Exit(1) + } + iceServers = s + } + + iceTransportPolicy := defaultICETransportPolicy + if os.Getenv(EnvVarNameICETransportPolicy) != "" { + iceTransportPolicy = webrtc.NewICETransportPolicy(os.Getenv(EnvVarNameICETransportPolicy)) + } + + token := defaultBearerToken + if os.Getenv(EnvVarNameBearerToken) != "" { + token = os.Getenv(EnvVarNameBearerToken) + } + + whipEndpoint := defaultWHIPEndpoint + if os.Getenv(EnvVarNameWHIPEndpoint) != "" { + endpoint := os.Getenv(EnvVarNameWHIPEndpoint) + if endpoint[0] != '/' { + log.Errorf("Environment WHIP_ENDPOINT is invalid: %s, expecting a leading slash '/'", endpoint) + os.Exit(1) + } + whipEndpoint = endpoint + } + + whipServerConfig := whipconn.Config{ + ICEServers: iceServers, + ICETransportPolicy: iceTransportPolicy, + BearerToken: token, + WHIPEndpoint: whipEndpoint, + } + + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + + if err := runICETesterListener(ctx, *whipServerAddr, whipServerConfig); err != nil { + log.Errorf("Could not create WHIP server listener: %s", err.Error()) + os.Exit(1) + } + + os.Exit(0) +} + +func runICETesterListener(ctx context.Context, addr string, config whipconn.Config) error { + log.Infof("Creating WHIP server listener with config %#v", config) + l, err := whipconn.NewListener(addr, config, loggerFactory) + if err != nil { + return fmt.Errorf("Could not create WHIP server listener: %s", err.Error()) + } + + log.Debug("Creating echo service") + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + + log.Debugf("Accepting WHIP server connection with resource ID: %s", + conn.(*whipconn.ListenerConn).ResourceUrl) + + // readloop + go func() { + buf := make([]byte, 100) + for { + n, err := conn.Read(buf) + if err != nil { + return + } + + _, err = conn.Write(buf[:n]) + if err != nil { + return + } + } + }() + } + }() + + <-ctx.Done() + + for _, conn := range l.GetConns() { + if err := conn.Close(); err != nil && !errors.Is(err, net.ErrClosed) && + !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("WHIP connection close error: %s", err.Error()) + } + } + + l.Close() + + return nil +} diff --git a/cmd/stunnerctl/README.md b/cmd/stunnerctl/README.md index 87ba4c19..02079af1 100644 --- a/cmd/stunnerctl/README.md +++ b/cmd/stunnerctl/README.md @@ -1,22 +1,214 @@ # stunnerctl: Command line toolbox for STUNner A CLI tool to simplify the interaction with STUNner. +The prominent use of `stunnerctl` is to load or watch STUNner dataplane configurations from a Kubernetes cluster for debugging and troubleshooting, or just for checking whether everything is configured the way it should be. + +## Installation + +Install the `stunnerctl` binary using the standard Go toolchain and add it to `$PATH`. + +```console +go install github.com/l7mp/stunner/cmd/stunnerctl@latest +``` + +You can also enforce a specific OS, CPU architecture, and STUNner version: + +```console +GOOS=windows GOARCH=amd64 go install github.com/l7mp/stunner/cmd/stunnerctl@v0.17.5 +``` + +Building from source is as easy as it usually gets with Go: + +```console +cd stunner +go build -o stunnerctl cmd/stunnerctl/main.go +``` ## Usage -Dump the running config from a live STUNner deployment in human-readable format. +Type `stunnerctl` to get a glimpse of the sub-commands and features provided. + +### Config + +The `config` sub-command is used to load or watch running dataplane configs from the STUNner config discovery service (CDS) running in a remote Kubernetes cluster. Usually the CDS server role is fulfilled by the [STUNner gateway operator](https://github.com/l7mp/stunner-gateway-operator) but you can choose any CDS service you want (see the `--cds-server-*` CLI flags in the help). The main use of this command is to check the active dataplane configuration for troubleshooting connectivity problems. + +- Dump a summary of the running config of the STUNner gateway called `udp-gateway` deployed into the `stunner` namespace: + + ```console + stunnerctl -n stunner config udp-gateway + Gateway: stunner/udp-gateway (loglevel: "all:INFO") + Authentication type: static, username/password: user-1/pass-1 + Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.88.91:9001 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.3, 10.80.7.104] + ``` + +- The same, but using the alternative Kubernetes config file `~/my-config.conf` to access the cluster. The rest of the usual `kubectl` flags (`--context`, `--token`, etc.) are also available to select the cluster to connect to. + + ``` console + stunnerctl --kubeconfig ~/my-config.conf -n stunner config udp-gateway + ``` + +- Dump the running config of all gateways in the `stunner` namespace in JSON format (YAML is also available using `-o yaml`): + + ```console + stunnerctl -n stunner config -o json + {"version":"v1","admin":{"name":"stunner/tcp-gateway",...}} + {"version":"v1","admin":{"name":"stunner/udp-gateway",...}}} + ``` + +- Watch STUNner configs as they are being refreshed by the operator and dump only the name of the gateway whose config changes: + + ```console + stunnerctl config --all-namespaces -o jsonpath='{.admin.name}' -w + stunner/tcp-gateway + stunner/udp-gateway + ... + ``` + +For those who don't have the Go toolchain available to run `go install`, STUNner provides a minimalistic `stunnerctl` replacement called `stunnerctl.sh`. +This script requires nothing else than `bash`, `kubectl`, `curl` and `jq` to work. + +The below will dump the running config of `tcp-gateway` deployed into the `stunner` namespace: ```console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener: udp-listener -Protocol: UDP -Public address: 34.118.36.108 -Public port: 3478 +cd stunner +cmd/stunnerctl/stunnerctl.sh running-config stunner/tcp-gateway +STUN/TURN authentication type: static +STUN/TURN username: user-1 +STUN/TURN password: pass-1 +Listener 1 + Name: stunner/tcp-gateway/tcp-listener + Listener: stunner/tcp-gateway/tcp-listener + Protocol: TURN-TCP + Public address: 35.187.97.94 + Public port: 3478 +``` + +You can also use `kubectl port-forward` to load or watch STUNner configs manually. Open a port-forwarded connection to the STUNner gateway operator: + +``` console +export CDS_SERVER_NAME=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.name}') +export CDS_SERVER_NAMESPACE=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.namespace}') +kubectl -n $CDS_SERVER_NAMESPACE port-forward pod/${CDS_SERVER_NAME} 63478:13478 & +``` + +If all goes well, you can now connect to the STUNner CDS API served by the gateway operator through the port-forwarded tunnel opened by `kubectl` just using `curl`. The below will load the config of the `udp-gateway` in the `stunner` namespace: + +``` console +curl -s http://127.0.0.1:63478/api/v1/configs/stunner/udp-gateway +``` + +If you happen to have a WebSocket client like the wonderful [`websocat`](https://github.com/vi/websocat) tool installed, you can also watch the configs as they are being rendered by the operator en live. + +``` console +websocat ws://127.0.0.1:63478/api/v1/configs/stunner/udp-gateway?watch=true - ``` +### Status + +The `status` sub-command reports the status of the dataplane pods for a gateway, especially the runtime state of the `stunnerd` daemon. + +- Find all dataplane pods for the `udp-gateway` in the `stunner` namespace and dump a status summary: + + ``` console + stunnerctl -n stunner status udp-gateway + stunner/udp-gateway-856c9f4dc9-524hc: + stunner/udp-gateway:{logLevel="all:INFO",health-check="http://:8086"} + static-auth:{realm="stunner.l7mp.io",username="",password=""} + listeners:1/clusters:1 + allocs:3/status=READY + stunner/udp-gateway-856c9f4dc9-c7wcq: + stunner/udp-gateway:{logLevel="all:INFO",health-check="http://:8086"} + static-auth:{realm="stunner.l7mp.io",username="",password=""} + listeners:1/clusters:1 + allocs:2/status=READY + ``` + +- Same but report only the runtime status of the `stunnerd` pods in the `stunner` namespace: + + ``` console + stunnerctl -n stunner status -o jsonpath='{.status}' + READY + TERMINATING + ``` + +### Authentication + +The `auth` sub-command can be used to obtain a TURN credential or a full ICE server config for connecting to a specific gateway. The authentication service API is usually served by a separate [STUNner authentication server](https://github.com/l7mp/stunner-auth-service) deployed alongside the gateway operator. The main use of this command is to feed an ICE agent manually with the ICE server config to connect to a specific STUNner gateway. + +- Obtain a full ICE server config for `udp-gateway` deployed into the `stunner` namespace: + + ``` console + stunnerctl -n stunner auth udp-gateway + {"iceServers":[{"credential":"pass-1","urls":["turn:10.104.19.179:3478?transport=udp"],"username":"user-1"}],"iceTransportPolicy":"all"} + ``` + +- Request a plain [TURN credential](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00) using the authentication service deployed into the `stunner-system-prod` namespace: + + ``` console + stunnerctl -n stunner auth udp-gateway --auth-turn-credential --auth-service-namespace=stunner-system-prod + {"password":"pass-1","ttl":86400,"uris":["turn:10.104.19.179:3478?transport=udp"],"username":"user-1"} + ``` + +### ICE test + +The `icetest` sub-command can be used to run a full-blown ICE test. This command is intended for +users to check a STUNner installation and pinpoint installation errors. + +The tester will fire up a WHIP server in the cluster, configure a UDP and a TCP gateway to expose +it, makes a PeerConnection to the WHIP server via the gateways, and performs a quick test by +sending a set of packets via a data channel created over the PeerConnection and measures loss and +latency using the packets echoed back by the WHIP server. If successful, the tester will output the +measured statistics, otherwise it reports the error that stopped the ICE test and provides some +diagnostics that to help troubleshooting. + +- Run a dataplane test over UDP and TCP: + + ``` console + stunnerctl icetest + Initializing... completed + Checking installation... completed + Checking Gateway... completed + Obtaining ICE server configuration... completed + Running asymmetric ICE test over TURN-UDP... completed + Statistics: rate=48.65pps, loss=0/973pkts=0.00%, RTT:mean=20.67ms/median=20.54ms/P95=22.23ms/P99=23.34ms + LocalICECandidates: + * udp4 relay 10.244.0.24:43988 related 0.0.0.0:43716 (resolved: 10.244.0.24:43988) + RemoteICECandidates: + * udp4 host 10.244.0.163:35242 (resolved: 10.244.0.163:35242) + Running asymmetric ICE test over TURN-TCP... completed + Statistics: rate=48.55pps, loss=0/971pkts=0.00%, RTT:mean=21.00ms/median=20.89ms/P95=22.45ms/P99=23.47ms + LocalICECandidates: + * udp4 relay 10.244.0.162:45090 related 0.0.0.0:45654 (resolved: 10.244.0.162:45090) + RemoteICECandidates: + * udp4 host 10.244.0.163:51653 (resolved: 10.244.0.163:51653) + Running symmetric ICE test over TURN-UDP... completed + Statistics: rate=48.65pps, loss=0/973pkts=0.00%, RTT:mean=20.63ms/median=20.47ms/P95=21.85ms/P99=23.01ms + LocalICECandidates: + * udp4 relay 10.244.0.24:47282 related 0.0.0.0:55122 (resolved: 10.244.0.24:47282) + RemoteICECandidates: + * udp4 relay 10.244.0.24:47367 related 0.0.0.0:51777 (resolved: 10.244.0.24:47367) + Running symmetric ICE test over TURN-TCP... completed + Statistics: rate=48.60pps, loss=0/972pkts=0.00%, RTT:mean=24.61ms/median=20.56ms/P95=39.96ms/P99=133.40ms + LocalICECandidates: + * udp4 relay 10.244.0.162:56555 related 0.0.0.0:42600 (resolved: 10.244.0.162:56555) + RemoteICECandidates: + * udp4 relay 10.244.0.162:33397 related 10.244.0.163:53124 (resolved: 10.244.0.162:33397) + ``` + +- Clean up the Kubernetes resources the tester might have left behind on a previous run and perform + the test only on TURN-UDP with at a rate of 100 packets per second using a 2 minute timeout: + + ``` console + stunnerctl icetest --force-cleanup -packet-rate 100 --timeout 2m udp + ``` + +Run `stunnerctl icetest --help` for further useful command line arguments. + ## License Copyright 2021-2023 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). diff --git a/cmd/stunnerctl/auth.go b/cmd/stunnerctl/auth.go new file mode 100644 index 00000000..61efc6fa --- /dev/null +++ b/cmd/stunnerctl/auth.go @@ -0,0 +1,67 @@ +package main + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/spf13/cobra" + + cdsclient "github.com/l7mp/stunner/pkg/config/client" +) + +func runAuth(_ *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log.Debug("Searching for authentication server") + pod, err := cdsclient.DiscoverK8sAuthServer(ctx, k8sConfigFlags, authConfigFlags, + loggerFactory.NewLogger("auth-fwd")) + if err != nil { + return fmt.Errorf("error searching for auth service: %w", err) + } + + u := url.URL{ + Scheme: "http", + Host: pod.Addr, + Path: "/ice", + } + q := u.Query() + q.Set("service", "turn") + u.RawQuery = q.Encode() + + if authConfigFlags.TurnAuth { + // enforce TURN credential format + u.Path = "" + } + + if k8sConfigFlags.Namespace != nil && *k8sConfigFlags.Namespace != "" { + q := u.Query() + q.Set("namespace", *k8sConfigFlags.Namespace) + if len(args) > 0 { + q.Set("gateway", args[0]) + } + u.RawQuery = q.Encode() + } + + log.Debugf("Querying to authentication server %s using URL %q", pod.String(), u.String()) + res, err := http.Get(u.String()) + if err != nil { + return fmt.Errorf("error querying auth service %s: %w", pod.String(), err) + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("HTTP error querying auth service %s: expected status %d, got %d", + pod.String(), http.StatusOK, res.StatusCode) + } + + b, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("cannot read HTTP response: %w", err) + } + + fmt.Println(string(b)) + + return nil +} diff --git a/cmd/stunnerctl/config.go b/cmd/stunnerctl/config.go new file mode 100644 index 00000000..a77da115 --- /dev/null +++ b/cmd/stunnerctl/config.go @@ -0,0 +1,123 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + "sigs.k8s.io/yaml" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + cdsclient "github.com/l7mp/stunner/pkg/config/client" +) + +func runConfig(_ *cobra.Command, args []string) error { + gwNs := "default" + if k8sConfigFlags.Namespace != nil && *k8sConfigFlags.Namespace != "" { + gwNs = *k8sConfigFlags.Namespace + } + + jsonQuery, output, err := ParseJSONPathFlag(output) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + log.Debug("Searching for CDS server") + pod, err := cdsclient.DiscoverK8sCDSServer(ctx, k8sConfigFlags, cdsConfigFlags, + loggerFactory.NewLogger("cds-fwd")) + if err != nil { + return fmt.Errorf("error searching for CDS server: %w", err) + } + + log.Debugf("Connecting to CDS server: %s", pod.String()) + var cds cdsclient.CdsApi + cdslog := loggerFactory.NewLogger("cds-client") + if all { + cds, err = cdsclient.NewAllConfigsAPI(pod.Addr, cdslog) + } else if len(args) == 0 { + cds, err = cdsclient.NewConfigsNamespaceAPI(pod.Addr, gwNs, cdslog) + } else { + gwName := args[0] + cds, err = cdsclient.NewConfigNamespaceNameAPI(pod.Addr, gwNs, gwName, cdslog) + } + + if err != nil { + return fmt.Errorf("error creating CDS client: %w", err) + } + + confChan := make(chan *stnrv1.StunnerConfig, 8) + if watch { + err := cds.Watch(ctx, confChan, false) + if err != nil { + close(confChan) + return err + } + + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + <-sigs + close(confChan) + }() + } else { + resp, err := cds.Get(ctx) + if err != nil { + close(confChan) + return err + } + for _, c := range resp { + confChan <- c + } + + close(confChan) + } + + for c := range confChan { + if cdsclient.IsConfigDeleted(c) { + fmt.Printf("Gateway: %s \n", c.Admin.Name) + continue + } + switch output { + case "yaml": + if out, err := yaml.Marshal(c); err != nil { + return err + } else { + fmt.Println(string(out)) + } + case "json": + if out, err := json.Marshal(c); err != nil { + return err + } else { + fmt.Println(string(out)) + } + case "jsonpath": + values, err := jsonQuery.FindResults(c) + if err != nil { + return err + } + + if len(values) == 0 || len(values[0]) == 0 { + fmt.Println("") + } + + for arrIx := range values { + for valIx := range values[arrIx] { + fmt.Printf("%v\n", values[arrIx][valIx].Interface()) + } + } + case "summary": + fmt.Print(c.Summary()) + default: + fmt.Println(c.String()) + } + } + + return nil +} diff --git a/cmd/stunnerctl/icetest.go b/cmd/stunnerctl/icetest.go new file mode 100644 index 00000000..a2d81efe --- /dev/null +++ b/cmd/stunnerctl/icetest.go @@ -0,0 +1,187 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "sync" + "time" + + "github.com/spf13/cobra" + + "github.com/l7mp/stunner/internal/icetester" + v1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/logger" +) + +const DefaultTestNamespace = "icetest" + +func runICETest(_ *cobra.Command, args []string) error { + ns := DefaultTestNamespace + if k8sConfigFlags.Namespace != nil && *k8sConfigFlags.Namespace != "" { + ns = *k8sConfigFlags.Namespace + } + + turnTransports := []v1.ListenerProtocol{} + protos := args + if len(protos) == 0 { + // run all tests for all transports if no specific transport is provided + protos = []string{"udp", "tcp"} + } + for _, arg := range protos { + proto, err := v1.NewListenerProtocol(arg) + if err != nil { + return err + } + switch proto { + case v1.ListenerProtocolUDP: + turnTransports = append(turnTransports, v1.ListenerProtocolTURNUDP) + case v1.ListenerProtocolTCP: + turnTransports = append(turnTransports, v1.ListenerProtocolTURNTCP) + case v1.ListenerProtocolTURNUDP, v1.ListenerProtocolTURNTCP: + turnTransports = append(turnTransports, proto) + default: + return fmt.Errorf("ICE test is currently not available on TURN transport protocol %s", proto) + } + } + + // Create a buffered logger for the tests + logBuffer := &bytes.Buffer{} + bufferedLoggerFactory := logger.NewLoggerFactory("all:TRACE") // hardcode highest loglevel + bufferedLoggerFactory.SetWriter(logBuffer) + + eventCh := make(chan icetester.Event, 12) + defer close(eventCh) + tester, err := icetester.NewICETester(icetester.Config{ + EventChannel: eventCh, + + K8sConfigFlags: k8sConfigFlags, + CDSConfigFlags: cdsConfigFlags, + AuthConfigFlags: authConfigFlags, + + Namespace: ns, + TURNTransports: turnTransports, + ICETesterImage: iceTesterImage, + ForceCleanup: forceCleanup, + PacketRate: iceTesterPacketRate, + + Logger: bufferedLoggerFactory, + }) + if err != nil { + return fmt.Errorf("Failed to create ICE tester: %w", err) + } + + // run for at most 5 minutes + ctx, cancel := context.WithTimeout(context.Background(), iceTesterTimeout) + + // stop on interrupt as well + ctx, stop := signal.NotifyContext(ctx, os.Interrupt) + defer stop() + + // event handler + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case e := <-eventCh: + printEvent(e, logBuffer) + + case <-ctx.Done(): + return + } + } + }() + + err = tester.Start(ctx) + if err == nil { + err = ctx.Err() + } + + if err != nil { + switch ctx.Err() { + case context.DeadlineExceeded: + fmt.Printf("\nICE tester timed out after %s\n", iceTesterTimeout) + printLogs(logBuffer) + case context.Canceled: + fmt.Printf("\nICE tester stopped due to user interrupt\n") + default: + fmt.Printf("\nICE tester error: %s\n", err.Error()) + } + } + + cancel() + + // wait until the printer finishes + wg.Wait() + + return nil +} + +func printEvent(e icetester.Event, logbuf io.ReadWriter) { + // started + if e.InProgress { + proto := "" + if arg, ok := e.Args["ICETransport"]; ok { + if p, ok := arg.(string); ok { + proto = fmt.Sprintf(" over %s", p) + } + } + + fmt.Printf("%s: %s%s... ", e.Timestamp.Format(time.RFC822), e.Type.String(), proto) + return + } + + // completed successfully + if e.Error == nil { + fmt.Println("completed") + if arg, ok := e.Args["Stats"]; ok { + if s, ok := arg.(*icetester.Stats); ok { + fmt.Printf("\tStatistics: rate=%0.2fpps, loss=%d/%dpkts=%0.2f%%, "+ + "RTT:mean=%0.2fms/median=%0.2fms/P95=%0.2fms/P99=%0.2fms\n", + s.SendRate, s.PacketsSent-s.PacketsReceived, s.PacketsSent, + s.LossRate, s.MeanLatency, s.MedianLatency, s.P95Latency, + s.P99Latency) + } + } + + for _, ctype := range []string{"LocalICECandidates", "RemoteICECandidates"} { + if arg, ok := e.Args[ctype]; ok { + if cs, ok := arg.([]icetester.CandidateDesc); ok { + fmt.Printf("\t%s:\n", ctype) + for _, c := range cs { + sel := " " + if c.Selected { + sel = "* " + } + fmt.Printf("\t %s%s\n", sel, c.Candidate) + } + } + } + } + + return + } + + // completed with error + fmt.Printf("error\nError: %q\nTimeStamp: %s\n", e.Error.Error(), + e.Timestamp.Format(time.DateTime)) + if e.Diagnostics != "" { + fmt.Printf("Diagnostics: %s\n", e.Diagnostics) + } + printLogs(logbuf) +} + +func printLogs(logbuf io.ReadWriter) { + logs, err := io.ReadAll(logbuf) + if err != nil { + fmt.Printf("Logs not available due to error when reading log buffer: %s", err.Error()) + } else if len(logs) != 0 { + fmt.Println("Detailed logs") + fmt.Print(string(logs)) + } +} diff --git a/cmd/stunnerctl/main.go b/cmd/stunnerctl/main.go new file mode 100644 index 00000000..15632b0e --- /dev/null +++ b/cmd/stunnerctl/main.go @@ -0,0 +1,206 @@ +package main + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/pion/logging" + "github.com/spf13/cobra" + cliopt "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/util/jsonpath" + + "github.com/l7mp/stunner/internal/icetester" + cdsclient "github.com/l7mp/stunner/pkg/config/client" + "github.com/l7mp/stunner/pkg/logger" +) + +// list all configs: stunnerctl get config --all-namespaces +// watch all configs in namesapce stunner: stunnerctl -n stunner get config --watch +// get short-form config for stunner/udp-gateway: stunnerctl -n stunner get config udp-gateway +// get config for stunner/udp-gateway in yaml format: stunnerctl -n stunner get config udp-gateway --output yaml + +var ( + output, iceTesterImage, loglevel string + watch, all, verbose, forceCleanup bool + k8sConfigFlags *cliopt.ConfigFlags + cdsConfigFlags *cdsclient.CDSConfigFlags + authConfigFlags *cdsclient.AuthConfigFlags + podConfigFlags *cdsclient.PodConfigFlags + iceTesterTimeout time.Duration + iceTesterPacketRate int + + loggerFactory logger.LoggerFactory + log logging.LeveledLogger + + rootCmd = &cobra.Command{ + Use: "stunnerctl", + Short: "A command line utility to inspect STUNner dataplane .", + Long: "The stunnerctl tool is a CLI for inspecting, watching and troublehssooting STUNner gateways", + DisableAutoGenTag: true, + PersistentPreRun: func(_ *cobra.Command, _ []string) { + if verbose { + loglevel = "all:TRACE" + } + + loggerFactory = logger.NewLoggerFactory(loglevel) + log = loggerFactory.NewLogger("stunnerctl") + }, + } +) + +var ( + configCmd = &cobra.Command{ + Use: "config", + Aliases: []string{"stunner-config"}, + Short: "Get or watch dataplane configs of a gateway", + Args: cobra.RangeArgs(0, 1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := runConfig(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, + } + statusCmd = &cobra.Command{ + Use: "status [gateway]", + Aliases: []string{"dataplane-status"}, + Short: "Read status from dataplane pods for a gateway", + Args: cobra.RangeArgs(0, 1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := runStatus(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, + } + authCmd = &cobra.Command{ + Use: "auth", + Aliases: []string{"get-credential"}, + Short: "Obtain authenticaction credentials for a gateway", + Args: cobra.RangeArgs(0, 1), + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := runAuth(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, + } + iceTestCmd = &cobra.Command{ + Use: "icetest [udp, tcp, ...]", + Short: "Test ICE connectivity with the specified transports", + DisableAutoGenTag: true, + Run: func(cmd *cobra.Command, args []string) { + if err := runICETest(cmd, args); err != nil { + fmt.Println(err) + os.Exit(1) + } + }, + } +) + +func init() { + rootCmd.PersistentFlags().BoolVarP(&all, "all-namespaces", "a", false, "Consider all namespaces") + rootCmd.PersistentFlags().StringVarP(&output, "output", "o", "summary", "Output format, either json, yaml, summary or jsonpath=template") + rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging, identical to -l all:DEBUG (overrides -l)") + rootCmd.PersistentFlags().StringVarP(&loglevel, "loglevel", "l", "all:WARN", "Set loglevel (format: :, overrides: PION_LOG_*, default: all:WARN)") + + // Kubernetes config flags: persistent, all commands + k8sConfigFlags = cliopt.NewConfigFlags(true) + k8sConfigFlags.AddFlags(rootCmd.PersistentFlags()) + + // CDS server discovery flags: only for "config" command + cdsConfigFlags = cdsclient.NewCDSConfigFlags() + cdsConfigFlags.AddFlags(configCmd.Flags()) + + // watch flag: only for config + configCmd.Flags().BoolVarP(&watch, "watch", "w", false, "Watch for config updates from server") + + // Pod discovery flags: only for "status" command + podConfigFlags = cdsclient.NewPodConfigFlags() + podConfigFlags.AddFlags(statusCmd.Flags()) + + // Auth discovery flags: only for "auth" command + authConfigFlags = cdsclient.NewAuthConfigFlags() + authConfigFlags.AddFlags(authCmd.Flags()) + + // ICE test: uses CDS and auth args + cdsConfigFlags.AddFlags(iceTestCmd.Flags()) + authConfigFlags.AddFlags(iceTestCmd.Flags()) + + // ICE test timeout + iceTestCmd.Flags().IntVarP(&iceTesterPacketRate, "packet-rate", "r", 50, + "Packet rate [pkts/sec], 0 means flood test (Default: 50)") + iceTestCmd.Flags().DurationVarP(&iceTesterTimeout, "timeout", "t", icetester.DefaultICETesterTimeout, + "Timeout") + iceTestCmd.Flags().StringVar(&iceTesterImage, "ice-tester-image", icetester.DefaultICETesterImage, + "Default icetester container image") + iceTestCmd.Flags().BoolVar(&forceCleanup, "force-cleanup", false, "Remove tester namespace if it exists") + + // Add commands + rootCmd.AddCommand(configCmd) + rootCmd.AddCommand(statusCmd) + rootCmd.AddCommand(authCmd) + rootCmd.AddCommand(iceTestCmd) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "Whoops. There was an error while executing your CLI '%s'", err) + os.Exit(1) + } +} + +// //////////////////////// +var jsonRegexp = regexp.MustCompile(`^\{\.?([^{}]+)\}$|^\.?([^{}]+)$`) + +// k8s.io/kubectl/pkg/cmd/get +func RelaxedJSONPathExpression(pathExpression string) (string, error) { + if len(pathExpression) == 0 { + return pathExpression, nil + } + submatches := jsonRegexp.FindStringSubmatch(pathExpression) + if submatches == nil { + return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") + } + if len(submatches) != 3 { + return "", fmt.Errorf("unexpected submatch list: %v", submatches) + } + var fieldSpec string + if len(submatches[1]) != 0 { + fieldSpec = submatches[1] + } else { + fieldSpec = submatches[2] + } + return fmt.Sprintf("{.%s}", fieldSpec), nil +} + +func ParseJSONPathFlag(output string) (*jsonpath.JSONPath, string, error) { + if !strings.HasPrefix(output, "jsonpath") { + return nil, output, nil + } + + as := strings.Split(output, "=") + if len(as) != 2 || as[0] != "jsonpath" { + return nil, output, fmt.Errorf("invalid jsonpath output definition %q", output) + } + + jsonQuery := jsonpath.New("output") + + // Parse and print jsonpath + fields, err := RelaxedJSONPathExpression(as[1]) + if err != nil { + return nil, output, fmt.Errorf("invalid jsonpath query %w", err) + } + + if err := jsonQuery.Parse(fields); err != nil { + return nil, output, fmt.Errorf("cannor parse jsonpath query %w", err) + } + + return jsonQuery, "jsonpath", nil +} diff --git a/cmd/stunnerctl/status.go b/cmd/stunnerctl/status.go new file mode 100644 index 00000000..ce4adc78 --- /dev/null +++ b/cmd/stunnerctl/status.go @@ -0,0 +1,118 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/spf13/cobra" + "sigs.k8s.io/yaml" + + v1 "github.com/l7mp/stunner/pkg/apis/v1" + cdsclient "github.com/l7mp/stunner/pkg/config/client" +) + +func runStatus(_ *cobra.Command, args []string) error { + jsonQuery, output, err := ParseJSONPathFlag(output) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + gwNs := "default" + extraLog := "in namespace default" + if k8sConfigFlags.Namespace != nil && *k8sConfigFlags.Namespace != "" { + gwNs = *k8sConfigFlags.Namespace + extraLog = fmt.Sprintf("in namespace %s", gwNs) + } + // --all-namespaces overrides -n + if all { + gwNs = "" + extraLog = "in all namespaces" + } + + gw := "" + if len(args) > 0 { + gw = args[0] + } + if gwNs != "" && gw != "" { + extraLog += fmt.Sprintf("for gateway %s", gw) + } + + log.Debug("Searching for dataplane pods " + extraLog) + pods, err := cdsclient.DiscoverK8sStunnerdPods(ctx, k8sConfigFlags, podConfigFlags, + gwNs, gw, loggerFactory.NewLogger("stunnerd-fwd")) + if err != nil { + return fmt.Errorf("error searching for stunnerd pods: %w", err) + } + + for _, pod := range pods { + client := http.Client{ + Timeout: 5 * time.Second, + } + url := fmt.Sprintf("http://%s/status", pod.Addr) + res, err := client.Get(url) + if err != nil { + log.Errorf("Error querying status for stunnerd pod at URL %q on %s: %s", + url, pod.String(), err.Error()) + continue + } + + if res.StatusCode != http.StatusOK { + log.Errorf("Status query failed on %s with HTTP error code %s", + pod.String(), res.Status) + continue + } + + s := v1.StunnerStatus{} + err = json.NewDecoder(res.Body).Decode(&s) + if err != nil { + log.Errorf("Could not decode status response: %s", err.Error()) + continue + } + + switch output { + case "yaml": + if out, err := yaml.Marshal(s); err != nil { + return err + } else { + fmt.Println(string(out)) + } + case "json": + if out, err := json.Marshal(s); err != nil { + return err + } else { + fmt.Println(string(out)) + } + case "jsonpath": + values, err := jsonQuery.FindResults(s) + if err != nil { + return err + } + + if len(values) == 0 || len(values[0]) == 0 { + fmt.Println("") + } + + for arrIx := range values { + for valIx := range values[arrIx] { + fmt.Printf("%v\n", values[arrIx][valIx].Interface()) + } + } + case "summary": + fallthrough + default: + if pod.Proxy { + fmt.Printf("%s/%s:\n\t%s\n", pod.Namespace, pod.Name, s.Summary()) + } else { + fmt.Printf("%s:\n\t%s\n", pod.Addr, s.Summary()) + } + } + } + + return nil +} diff --git a/cmd/stunnerctl/stunnerctl b/cmd/stunnerctl/stunnerctl.sh similarity index 73% rename from cmd/stunnerctl/stunnerctl rename to cmd/stunnerctl/stunnerctl.sh index a9b1b88c..fa7272fb 100755 --- a/cmd/stunnerctl/stunnerctl +++ b/cmd/stunnerctl/stunnerctl.sh @@ -6,6 +6,9 @@ USAGE="stunnerctl running-config " COMMAND="$1" ARG="$2" +# stop the port-forwarder +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + jq=$(which jq) if [ -z "$jq" ] ; then echo "Error: cannot find jq in PATH" && exit 0 @@ -19,12 +22,24 @@ running_config () { name=${args[1]} [ -z $namespace -o -z $name ] && echo "cannot parse argument" && exit 0 - [ $(kubectl get cm -n $namespace -o json| jq ".items | map(select(.metadata.name==\"${name}\"))|length") -eq 0 ] && \ - echo "STUNner configmap ${namespace}/${name} not found" && exit 1 + # find the CDS server + CDS_SERVER_NAME=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.name}') + CDS_SERVER_NAMESPACE=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.namespace}') + [ -z $CDS_SERVER_NAME -o -z $CDS_SERVER_NAMESPACE ] && echo "Could not find CDS server" && exit 1 + + # start the port-forwarder + kubectl -n $CDS_SERVER_NAMESPACE port-forward pod/${CDS_SERVER_NAME} 63478:13478 >/dev/null 2>&1 & + # query the cds server + sleep 1 tmpfile=$(mktemp "./stunnerd-config.XXXXXX") - kubectl get cm -n $namespace $name -o jsonpath="{.data.stunnerd\.conf}" > $tmpfile + curl -s http://127.0.0.1:63478/api/v1/configs/${namespace}/${name} > $tmpfile + if grep -q "onfig not found" $tmpfile >/dev/null 2>&1; then + cat $tmpfile + exit 1 + fi + local AUTH_TYPE=$($jq ".auth.type" $tmpfile) [ $AUTH_TYPE == "plaintext" ] && AUTH_TYPE="static" [ $AUTH_TYPE == "longterm" ] && AUTH_TYPE="ephemeral" diff --git a/cmd/stunnerd/README.md b/cmd/stunnerd/README.md index 41eb67db..ba47d719 100644 --- a/cmd/stunnerd/README.md +++ b/cmd/stunnerd/README.md @@ -4,14 +4,14 @@ The `stunnerd` daemon implements the STUNner gateway dataplane. The daemon supports two basic modes. For quick tests `stunnerd` can be configured as a TURN server by specifying a TURN network URI on the command line. For more complex scenarios, and especially -for use in a Kubernetes cluster, `stunnerd` can take configuration from a config file. In addition, -`stunnerd` implements a watch-mode, so that it can actively monitor the config file for updates -and, once the config file has changed, automatically reconcile the TURN server to the new -configuration. This mode is intended for use with the [STUNner Kubernetes gateway -operator](https://github.com/l7mp/stunner-gateway-operator): the operator watches the Kubernetes -[Gateway API](https://gateway-api.sigs.k8s.io) resources and renders the active control plane -configuration into a ConfigMap, which is then mapped into the `stunnerd` pod's filesystem so that -the daemon can pick up the latest configuration using the watch mode. +for use in a Kubernetes cluster, `stunnerd` can take configuration from a config origin, which can +either be a config file or from a remote server reached over WebSocket. In addition, `stunnerd` +implements a watch-mode, so that it can actively monitor the config origin for updates and +automatically reconcile the TURN server to any new configuration. This mode is intended for use +with the [STUNner Kubernetes gateway operator](https://github.com/l7mp/stunner-gateway-operator): +the operator watches the Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io) resources, +renders the active control plane configuration per each `stunnerd` pod and dynamically updates the +dataplane using STUNner's config discovery service. ## Features @@ -23,14 +23,17 @@ the daemon can pick up the latest configuration using the watch mode. Extensions for TCP Allocations * TURN transport over UDP, TCP, TLS/TCP and DTLS/UDP. * TURN/UDP listener CPU scaling. -* Two authentication modes via the long-term STUN/TURN credential mechanism: `plaintext` using a - static username/password pair, and `longterm` with dynamically generated time-scoped credentials. +* Two authentication modes via the long-term STUN/TURN credential mechanism: `static` using a + static username/password pair, and `ephemeral` with dynamically generated time-scoped + credentials. +* Peer port range filtering. ## Getting Started ### Installation As easy as with any Go program. + ```console cd stunner go build -o stunnerd cmd/stunnerd/main.go @@ -38,95 +41,79 @@ go build -o stunnerd cmd/stunnerd/main.go ### Usage -The below command will open a `stunnerd` UDP listener at `127.0.0.1:5000`, set `plaintext` -authentication using the username/password pair `user1/passwrd1`, and raises the debug level to the -maximum. +The below command will open a `stunnerd` UDP listener at `127.0.0.1:5000`, set `static` authentication using the username/password pair `user1/passwrd1`, and raise the debug level to the maximum. ```console ./stunnerd --log=all:TRACE turn://user1:passwd1@127.0.0.1:5000 ``` -Alternatively, run `stunnerd` in verbose mode with the config file taken from -`cmd/stunnerd/stunnerd.conf`. Adding the flag `-w` will enable watch mode. +Alternatively, run `stunnerd` in verbose mode with the config file taken from `cmd/stunnerd/stunnerd.conf`. Adding the flag `-w` will enable watch mode. ```console -./stunnerd -v -w -c cmd/stunnerd/stunnerd.conf +./stunnerd -v -w -c file://cmd/stunnerd/stunnerd.conf ``` -Type `./stunnerd` to see a short description of the command line arguments supported by `stunnerd`. +Type `./stunnerd -h` to get a short description of the supported command line arguments. -In practice, you'll rarely need to run `stunnerd` directly: just fire up the [prebuilt container -image](https://hub.docker.com/repository/docker/l7mp/stunnerd) in Kubernetes and you should be good -to go. +In practice, you'll rarely need to run `stunnerd` directly: just fire up the [prebuilt container image](https://hub.docker.com/repository/docker/l7mp/stunnerd) in Kubernetes and you should be good to go. Or better yet, [install](/docs/INSTALL.md) the STUNner Kubernetes gateway operator that will readily manage the `stunnerd` pods for each Gateway you create. ## Configuration -Using the below configuration, `stunnerd` will open 4 STUNner listeners: two for accepting -unencrypted connections at UDP/3478 and TCP/3478, and two for encrypted connections at TLS/TCP/3479 -and DTLS/UDP/3479. For easier debugging, the port for the transport relay connections opened by -`stunnerd` will be taken from [10000:19999] for the UDP listener, [20000:29999] for the TCP -listener, etc. The daemon will use `longterm` authentication, with the shared secret read from the -environment variable `$STUNNER_SHARED_SECRET` during initialization. The relay address is taken -from the `$STUNNER_ADDR` environment variable. +Using the below configuration, `stunnerd` will open 4 STUNner listeners: two for accepting unencrypted connections at UDP/3478 and TCP/3478, and two for encrypted connections at TLS/TCP/3479 and DTLS/UDP/3479. The daemon will use `ephemeral` authentication, with the shared secret taken from the environment variable `$STUNNER_SHARED_SECRET` during initialization. The relay address will be taken from the `$STUNNER_ADDR` environment variable. ``` yaml -version: v1alpha1 +version: v1 admin: name: my-stunnerd logLevel: all:DEBUG realm: "my-realm.example.com" -static: - auth: - type: longterm - credentials: - secret: $STUNNER_SHARED_SECRET - listeners: - - name: stunnerd-udp - address: "$STUNNER_ADDR" - protocol: udp - port: 3478 - minPort: 10000 - maxPort: 19999 - - name: stunnerd-tcp - address: "$STUNNER_ADDR" - protocol: tcp - port: 3478 - minPort: 20000 - maxPort: 29999 - - name: stunnerd-tls - protocol: tls - port: 3479 - minPort: 30000 - maxPort: 39999 - cert: "my-cert.cert" - key: "my-key.key" - - name: stunnerd-dtls - protocol: dtls - port: 3479 - cert: "my-cert.cert" - key: "my-key.key" - minPort: 40000 - maxPort: 49999 +auth: + type: ephemeral + credentials: + secret: $STUNNER_SHARED_SECRET +listeners: + - name: stunnerd-udp + address: "$STUNNER_ADDR" + protocol: turn-udp + port: 3478 + routes: + - default/media-plane + - name: stunnerd-tcp + address: "$STUNNER_ADDR" + protocol: turn-tcp + port: 3478 + routes: + - default/media-plane + - name: stunnerd-tls + address: "$STUNNER_ADDR" + protocol: turn-tls + port: 3479 + cert: "my-cert.cert" + key: "my-key.key" + routes: + - default/media-plane + - name: stunnerd-dtls + address: "$STUNNER_ADDR" + protocol: turn-dtls + port: 3479 + cert: "my-cert.cert" + key: "my-key.key" + routes: + - default/media-plane +clusters: + - name: stunner/iperf-server + protocol: UDP + type: STATIC + endpoints: + - 127.0.0.1 ``` -## Advanced features - -### TURN/UDP listener CPU scaling +STUNner can run multiple parallel readloops for TURN/UDP listeners, which allows it to scale to practically any number of CPUs and brings massive performance improvements for UDP workloads. This can be achieved by creating a configurable number of UDP readloop threads over the same TURN listener. The kernel will load-balance allocations across the readloops per the IP 5-tuple and so the same allocation will always stay at the same CPU, which is important for correct TURN operations. -STUNner can run multiple parallel readloops for TURN/UDP listeners, which allows it to scale to any -practical number of CPUs and brings massive performance improvements on UDP workloads. This is -achieved by creating a configurable number of UDP server sockets using the `SO_REUSEPORT` socket -option and spawn a separate goroutine to run a parallel readloop per each listener. The kernel will -load-balance allocations across the sockets/readloops per the IP 5-tuple, therefore the same -allocation will always stay at the same CPU which is important for correct operations. - -The feature is exposed via the command line flag `--udp-thread-num=`. The below -starts `stunnerd` watching the config file in `/etc/stunnerd/stunnerd.conf` using 32 parallel UDP -readloops (the default is 16). +The feature is exposed via the command line flag `--udp-thread-num=`. The below starts `stunnerd` watching the config file in `/etc/stunnerd/stunnerd.conf` using 32 parallel UDP readloops (the default is 16). ``` sh ./stunnerd -w -c /etc/stunnerd/stunnerd.conf --udp-thread-num=32 - ``` ## License @@ -137,5 +124,4 @@ MIT License - see [LICENSE](../../LICENSE) for full text. ## Acknowledgments -Initial code adopted from [pion/stun](https://github.com/pion/stun) and -[pion/turn](https://github.com/pion/turn). +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/cmd/stunnerd/main.go b/cmd/stunnerd/main.go index e5454c3a..462e79d5 100644 --- a/cmd/stunnerd/main.go +++ b/cmd/stunnerd/main.go @@ -1,48 +1,76 @@ package main import ( - // "fmt" "context" + "fmt" "os" "os/signal" "syscall" "time" flag "github.com/spf13/pflag" + cliopt "k8s.io/cli-runtime/pkg/genericclioptions" "github.com/l7mp/stunner" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/buildinfo" + cdsclient "github.com/l7mp/stunner/pkg/config/client" ) -// usage: stunnerd -v turn://user1:passwd1@127.0.0.1:3478?transport=udp - -const ( - defaultLoglevel = "all:INFO" - confUpdatePeriod = 1 * time.Second +var ( + version = "dev" + commitHash = "n/a" + buildDate = "" ) func main() { os.Args[0] = "stunnerd" - var config = flag.StringP("config", "c", "", "Config file.") - var level = flag.StringP("log", "l", "", "Log level (default: all:INFO).") - var watch = flag.BoolP("watch", "w", false, "Watch config file for updates (default: false).") + var config = flag.StringP("config", "c", "", "Config origin, either a valid address in the format IP:port, or HTTP URL to the CDS server, or literal \"k8s\" to discover the CDS server from Kubernetes, or a proper file name URI in the format file:// (overrides: STUNNER_CONFIG_ORIGIN)") + var level = flag.StringP("log", "l", "", "Log level (format: :, overrides: PION_LOG_*, default: all:INFO)") + var id = flag.StringP("id", "i", "", "Id for identifying with the CDS server (format: /, overrides: STUNNER_NAMESPACE/STUNNER_NAME, default: )") + var watch = flag.BoolP("watch", "w", false, "Watch config file for updates (default: false)") var udpThreadNum = flag.IntP("udp-thread-num", "u", 0, - "Number of readloop threads (CPU cores) per UDP listener. Zero disables UDP multithreading (default: 0).") - var dryRun = flag.BoolP("dry-run", "d", false, "Suppress side-effects, intended for testing (default: false).") - var verbose = flag.BoolP("verbose", "v", false, "Verbose logging, identical to <-l all:DEBUG>.") + "Number of readloop threads (CPU cores) per UDP listener. Zero disables UDP multithreading (default: 0)") + var dryRun = flag.BoolP("dry-run", "d", false, "Suppress side-effects, intended for testing (default: false)") + var verbose = flag.BoolP("verbose", "v", false, "Verbose logging, identical to <-l all:DEBUG>") + + // Kubernetes config flags + k8sConfigFlags := cliopt.NewConfigFlags(true) + k8sConfigFlags.AddFlags(flag.CommandLine) + + // CDS server discovery flags + cdsConfigFlags := cdsclient.NewCDSConfigFlags() + cdsConfigFlags.AddFlags(flag.CommandLine) + flag.Parse() - logLevel := defaultLoglevel + logLevel := stnrv1.DefaultLogLevel if *verbose { - // verbose mode on, override any loglevel logLevel = "all:DEBUG" } + if *level != "" { - // loglevel set on the comman line, use that one instead logLevel = *level } + configOrigin := stnrv1.DefaultConfigDiscoveryAddress + if origin, ok := os.LookupEnv(stnrv1.DefaultEnvVarConfigOrigin); ok { + configOrigin = origin + } + if *config != "" { + configOrigin = *config + } + + if *id == "" { + name, ok1 := os.LookupEnv(stnrv1.DefaultEnvVarName) + namespace, ok2 := os.LookupEnv(stnrv1.DefaultEnvVarNamespace) + if ok1 && ok2 { + *id = fmt.Sprintf("%s/%s", namespace, name) + } + } + st := stunner.NewStunner(stunner.Options{ + Name: *id, LogLevel: logLevel, DryRun: *dryRun, UDPListenerThreadNum: *udpThreadNum, @@ -51,55 +79,72 @@ func main() { log := st.GetLogger().NewLogger("stunnerd") - conf := make(chan v1alpha1.StunnerConfig, 1) + buildInfo := buildinfo.BuildInfo{Version: version, CommitHash: commitHash, BuildDate: buildDate} + log.Infof("Starting stunnerd id %q, STUNner %s ", st.GetId(), buildInfo.String()) + + conf := make(chan *stnrv1.StunnerConfig, 1) defer close(conf) - var cancelWatcher context.CancelFunc - if *config == "" && flag.NArg() == 1 { - log.Infof("starting %s with default configuration at TURN URI: %s", + var cancelConfigLoader context.CancelFunc + if flag.NArg() == 1 { + log.Infof("Starting %s with default configuration at TURN URI: %s", os.Args[0], flag.Arg(0)) c, err := stunner.NewDefaultConfig(flag.Arg(0)) if err != nil { - log.Errorf("could not load default STUNner config: %s", err.Error()) + log.Errorf("Could not load default STUNner config: %s", err.Error()) os.Exit(1) } - conf <- *c + conf <- c - } else if *config != "" && !*watch { - log.Infof("loading configuration from config file %q", *config) + } else if !*watch { + ctx, cancel := context.WithCancel(context.Background()) - c, err := stunner.LoadConfig(*config) + if configOrigin == "k8s" { + log.Info("Discovering configuration from Kubernetes") + cdsAddr, err := cdsclient.DiscoverK8sCDSServer(ctx, k8sConfigFlags, cdsConfigFlags, + st.GetLogger().NewLogger("cds-fwd")) + if err != nil { + log.Errorf("Error searching for CDS server: %s", err.Error()) + os.Exit(1) + } + configOrigin = cdsAddr.Addr + } + + log.Infof("Loading configuration from origin %q", configOrigin) + c, err := st.LoadConfig(configOrigin) if err != nil { log.Error(err.Error()) os.Exit(1) } + cancel() - conf <- *c - - } else if *config != "" && *watch { - log.Infof("watching configuration file at %q", *config) + conf <- c - // init stunnerd with an empty config: this bootstraps it with the default - // resources (above all, starts the health-checker) - initConf := stunner.NewZeroConfig() - log.Debug("bootstrapping with zero reconciliation") - if err := st.Reconcile(*initConf); err != nil { - log.Errorf("could not reconcile initial configuratoin: %s", err.Error()) - os.Exit(1) - } + } else if *watch { + log.Info("Bootstrapping stunnerd with minimal config") + z := cdsclient.ZeroConfig(st.GetId()) + conf <- z ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cancelWatcher = cancel - - if err := stunner.WatchConfig(ctx, stunner.Watcher{ - ConfigFile: *config, - ConfigChannel: conf, - Logger: st.GetLogger(), - }); err != nil { - log.Errorf("could not create config file watcher: %s", err.Error()) + cancelConfigLoader = cancel + + if configOrigin == "k8s" { + log.Info("Discovering configuration from Kubernetes") + cdsAddr, err := cdsclient.DiscoverK8sCDSServer(ctx, k8sConfigFlags, cdsConfigFlags, + st.GetLogger().NewLogger("cds-fwd")) + if err != nil { + log.Errorf("Error searching for CDS server: %s", err.Error()) + os.Exit(1) + } + configOrigin = cdsAddr.Addr + } + + log.Infof("Watching configuration at origin %q (ignoring delete-config updates)", configOrigin) + if err := st.WatchConfig(ctx, configOrigin, conf, true); err != nil { + log.Errorf("Could not run config watcher: %s", err.Error()) os.Exit(1) } } else { @@ -107,13 +152,9 @@ func main() { os.Exit(1) } - sigint := make(chan os.Signal, 1) - defer close(sigint) - signal.Notify(sigint, syscall.SIGINT) - sigterm := make(chan os.Signal, 1) defer close(sigterm) - signal.Notify(sigterm, syscall.SIGTERM) + signal.Notify(sigterm, syscall.SIGTERM, syscall.SIGINT) exit := make(chan bool, 1) defer close(exit) @@ -121,21 +162,18 @@ func main() { for { select { case <-exit: - log.Info("normal exit on graceful shutdown") - os.Exit(0) - - case <-sigint: - log.Info("normal exit") + log.Info("Normal exit on graceful shutdown") os.Exit(0) case <-sigterm: - log.Info("caught SIGTERM: performing a graceful shutdown") + log.Infof("Commencing graceful shutdown with %d active connection(s)", + st.AllocationCount()) st.Shutdown() - // cancel the config watcher - if cancelWatcher != nil { - log.Info("canceling config watcher") - cancelWatcher() + if cancelConfigLoader != nil { + log.Info("Canceling config loader") + cancelConfigLoader() + cancelConfigLoader = nil } go func() { @@ -150,25 +188,25 @@ func main() { }() case c := <-conf: - log.Trace("new configuration file available") + log.Infof("New configuration available: %q", c.String()) // command line loglevel overrides config if *verbose || *level != "" { c.Admin.LogLevel = logLevel } - // we have working stunnerd: reconcile - log.Debug("initiating reconciliation") - err := st.Reconcile(c) - log.Trace("reconciliation ready") - if err != nil { - if e, ok := err.(v1alpha1.ErrRestarted); ok { - log.Debugf("reconciliation ready: %s", e.Error()) + log.Debug("Initiating reconciliation") + + if err := st.Reconcile(c); err != nil { + if e, ok := err.(stnrv1.ErrRestarted); ok { + log.Debugf("Reconciliation ready: %s", e.Error()) } else { - log.Errorf("could not reconcile new configuration: %s, "+ - "rolling back to last running config", err.Error()) + log.Errorf("Could not reconcile new configuration "+ + "(running configuration unchanged): %s", err.Error()) } } + + log.Trace("Reconciliation ready") } } } diff --git a/cmd/stunnerd/stunnerd.conf b/cmd/stunnerd/stunnerd.conf index 03403d72..bee46539 100644 --- a/cmd/stunnerd/stunnerd.conf +++ b/cmd/stunnerd/stunnerd.conf @@ -23,25 +23,25 @@ listeners: - name: stunnerd-udp public_address: "$STUNNER_ADDR" address: "$STUNNER_ADDR" - protocol: udp + protocol: TURN-UDP port: $STUNNER_PORT - min_port: $STUNNER_MIN_PORT - max_port: $STUNNER_MAX_PORT + min_relay_port: $STUNNER_MIN_PORT + max_relay_port: $STUNNER_MAX_PORT routes: - open-cluster # - media-server-cluster - name: stunnerd-tcp public_address: "$STUNNER_ADDR" address: "$STUNNER_ADDR" - protocol: tcp + protocol: TURN-TCP port: $STUNNER_PORT - min_port: $STUNNER_MIN_PORT - max_port: $STUNNER_MAX_PORT + min_relay_port: $STUNNER_MIN_PORT + max_relay_port: $STUNNER_MAX_PORT routes: - open-cluster # - media-server-cluster # - name: stunnerd-tls - # protocol: tls + # protocol: TURN-TLS # port: 3479 # cert: # key: diff --git a/cmd/turncat/README.md b/cmd/turncat/README.md index 9d4d5f2d..9589affe 100644 --- a/cmd/turncat/README.md +++ b/cmd/turncat/README.md @@ -1,50 +1,63 @@ # turncat: Swiss-army-knife testing tool for STUNner -`turncat` is a STUN/TURN client to open a connection through a TURN server to an arbitrary remote -address/port. The main use is to open a local tunnel endpoint to any service running inside a -Kubernetes cluster via STUNner. This is very similar in functionality to `kubectl proxy`, but it -uses STUN/TURN to enter the cluster. +`turncat` is a STUN/TURN client to open a connection through a TURN server to an arbitrary remote address/port. +The main use is to open a local tunnel endpoint to any service running inside a Kubernetes cluster via STUNner. +This is very similar in functionality to `kubectl port-forward`, but it uses STUN/TURN to enter the cluster. +This is much faster than the TCP connection used by `kubectl`. -## Getting Started +## Installation -### Installation +Install the `turncat` binary using the standard Go toolchain and add it to `$PATH`. -As simple as it gets: +```console +go install github.com/l7mp/stunner/cmd/turncat@latest +``` + +You can also enforce a specific OS, CPU architecture, and STUNner version like below: + +```console +GOOS=windows GOARCH=amd64 go install github.com/l7mp/stunner/cmd/turncat@v0.17.5 +``` + +Building from source is as easy as it usually gets with Go: ```console cd stunner go build -o turncat cmd/turncat/main.go ``` -### Usage +## Usage + +Listen to client connections on the UDP listener `127.0.0.1:5000` and tunnel the received packets through the TURN server located at `192.0.2.1:3478` to the UDP listener located at `192.0.2.2:53`. +Use the [`static` STUN/TURN credential mechanism](/docs/AUTH.md) to authenticate with the TURN server and set the user/passwd to `test/test`: + +```console +./turncat --log=all:INFO,turncat:DEBUG udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478 \ + udp://192.0.2.2:53 +``` -Listen to client connections on the UDP listener `127.0.0.1:5000` and tunnel the received packets -through the TURN server located at `192.0.2.1:3478` to the UDP server located at -`192.0.2.2:53`. Use the longterm STUN/TURN credential mechanism to authenticate with the TURN -server and set the user/passwd to `test/test`: +TLS/DTLS should also work. +Below `--insecure` allows `turncat` to accept self-signed TLS certificates and `--verbose` is equivalent to setting all loggers to DEBUG mode (`-l all:DEBUG`). ```console -./turncat --log=all:INFO,turncat:DEBUG udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478 udp://192.0.2.2:53 +./turncat --verbose --insecure udp://127.0.0.1:5000 \ + turn://test:test@192.0.2.1:3478?transport=tls udp://192.0.2.2:53 ``` -TLS/DTLS should also work fine; note that `--insecure` allows `turncat` to accept self-signed TLS -certificates and `--verbose` is equivalent to setting all `turncat` loggers to DEBUG mode (`-l -all:DEBUG`). +Alternatively, you can specify the special TURN server meta-URI `k8s://stunner/udp-gateway:udp-listener` to let `turncat` parse the running STUNner configuration from the active Kubernetes cluster. +The URI directs `turncat` to read the config of the STUNner Gateway called `udp-gateway` in the `stunner` namespace and connect to the TURN listener named `udp-listener`. +The CLI flag `-` instructs `turncat` to listen on the standard input: anything you type in the terminal will be sent via STUNner to the peer `udp://10.0.0.1:9001` (after you press Enter). +The CLI flag `-v` will enable verbose logging. ```console -./turncat --verbose --insecure udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478?transport=tls udp://192.0.2.2:53 +./turncat -v - k8s://stunner/udp-gateway:udp-listener udp://10.0.0.1:9001 ``` -Alternatively, specify the special TURN server URI `k8s://stunner/stunnerd-config:udp-listener` to -let `turncat` parse the running STUNner configuration from the active Kubernetes cluster. The URI -directs `turncat` to read the STUNner config from the ConfigMap named `stunnerd-config` in the -`stunner` namespace, and connect to the STUNner listener named `udp-listener`. The CLI flag `-` -instructs `turncat` to listen on the standard input: anything you type in the terminal will be sent -via STUNner to the peer `udp://10.0.0.1:9001` (after you press Enter). The CLI flag `-v` will -enable verbose logging. +Note that the standard `kubectl` command line flags are available. +For instance, the below will use the context `prod-europe` from the kubeconfig file `kube-prod.conf`: ```console -./turncat -v - k8s://stunner/stunnerd-config:udp-listener udp://10.0.0.1:9001 +./turncat --kubeconfig=kube-prod.conf --context prod-europe -v - k8s://... udp://... ``` ## License @@ -55,5 +68,4 @@ MIT License - see [LICENSE](../../LICENSE) for full text. ## Acknowledgments -Initial code adopted from [pion/stun](https://github.com/pion/stun) and -[pion/turn](https://github.com/pion/turn). +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/cmd/turncat/main.go b/cmd/turncat/main.go index 844c72d6..9f305daa 100644 --- a/cmd/turncat/main.go +++ b/cmd/turncat/main.go @@ -2,7 +2,6 @@ package main import ( "context" - "encoding/json" "fmt" "os" "os/signal" @@ -11,24 +10,36 @@ import ( "time" "github.com/pion/logging" - "github.com/pion/turn/v2" + "github.com/pion/turn/v4" flag "github.com/spf13/pflag" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" + cliopt "k8s.io/cli-runtime/pkg/genericclioptions" "github.com/l7mp/stunner" - stunnerv1alpha1 "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/buildinfo" + cdsclient "github.com/l7mp/stunner/pkg/config/client" "github.com/l7mp/stunner/pkg/logger" ) -const usage = "turncat [-l|--log ] [-i|--insecure] client server peer\n\tclient: ://:\n\tserver: @: | /:listener\n\tpeer: udp://:\n\tauth: \n" -const defaultStunnerdConfigfileName = "stunnerd.conf" - -var log logging.LeveledLogger -var defaultDuration time.Duration +const usage = `turncat [options] + client-addr: ://: + turn-server-addr: @: | /: + peer-addr: udp://: + auth: +` + +var ( + k8sConfigFlags *cliopt.ConfigFlags + cdsConfigFlags *cdsclient.CDSConfigFlags + log logging.LeveledLogger + defaultDuration time.Duration + loggerFactory logger.LoggerFactory + + version = "dev" + commitHash = "n/a" + buildDate = "" +) func main() { var Usage = func() { @@ -38,13 +49,29 @@ func main() { os.Args[0] = "turncat" defaultDuration, _ = time.ParseDuration("1h") - var level = flag.StringP("log", "l", "all:WARN", "Log level (default: all:WARN).") - // var user = flag.StringP("user", "u", "", "Set username. Auth fields in the TURN URI override this.") - // var passwd = flag.StringP("log", "l", "all:WARN", "Log level (default: all:WARN).") - var insecure = flag.BoolP("insecure", "i", false, "Insecure TLS mode, accept self-signed certificates (default: false).") - var verbose = flag.BoolP("verbose", "v", false, "Verbose logging, identical to -l all:DEBUG.") + + // Kubernetes config flags + k8sConfigFlags = cliopt.NewConfigFlags(true) + k8sConfigFlags.AddFlags(flag.CommandLine) + + // CDS server discovery flags + cdsConfigFlags = cdsclient.NewCDSConfigFlags() + cdsConfigFlags.AddFlags(flag.CommandLine) + + var serverName string + flag.StringVar(&serverName, "sni", "", "Server name (SNI) for TURN/TLS client connections") + var insecure = flag.BoolP("insecure", "i", false, "Insecure TLS mode, accept self-signed TURN server certificates (default: false)") + var level = flag.StringP("log", "l", "all:WARN", "Log level") + var verbose = flag.BoolP("verbose", "v", false, "Enable verbose logging, identical to -l all:DEBUG") + var help = flag.BoolP("help", "h", false, "Display this help text and exit") + flag.Parse() + if *help { + Usage() + os.Exit(0) + } + if flag.NArg() != 3 { Usage() os.Exit(1) @@ -54,8 +81,11 @@ func main() { *level = "all:DEBUG" } - logger := logger.NewLoggerFactory(*level) - log = logger.NewLogger("turncat-cli") + loggerFactory = logger.NewLoggerFactory(*level) + log = loggerFactory.NewLogger("turncat-cli") + + buildInfo := buildinfo.BuildInfo{Version: version, CommitHash: commitHash, BuildDate: buildDate} + log.Debugf("Starting turncat %s", buildInfo.String()) uri := flag.Arg(1) log.Debugf("Reading STUNner config from URI %q", uri) @@ -86,8 +116,9 @@ func main() { PeerAddr: flag.Arg(2), Realm: config.Auth.Realm, AuthGen: authGen, + ServerName: serverName, InsecureMode: *insecure, - LoggerFactory: logger, + LoggerFactory: loggerFactory, } t, err := stunner.NewTurncat(cfg) if err != nil { @@ -103,7 +134,7 @@ func main() { t.Close() } -func getStunnerConf(uri string) (*stunnerv1alpha1.StunnerConfig, error) { +func getStunnerConf(uri string) (*stnrv1.StunnerConfig, error) { s := strings.Split(uri, "://") if len(s) < 2 { return nil, fmt.Errorf("cannot parse server URI") @@ -115,14 +146,14 @@ func getStunnerConf(uri string) (*stunnerv1alpha1.StunnerConfig, error) { case "k8s": conf, err := getStunnerConfFromK8s(def) if err != nil { - return nil, fmt.Errorf("Could not read running STUNner configuration from "+ + return nil, fmt.Errorf("could not read running STUNner configuration from "+ "Kubernetes: %w", err) } return conf, nil case "turn": conf, err := getStunnerConfFromCLI(def) if err != nil { - return nil, fmt.Errorf("Could not generate STUNner configuration from "+ + return nil, fmt.Errorf("could not generate STUNner configuration from "+ "URI %q: %w", uri, err) } return conf, nil @@ -131,46 +162,40 @@ func getStunnerConf(uri string) (*stunnerv1alpha1.StunnerConfig, error) { } } -func getStunnerConfFromK8s(def string) (*stunnerv1alpha1.StunnerConfig, error) { +func getStunnerConfFromK8s(def string) (*stnrv1.StunnerConfig, error) { namespace, name, listener, err := parseK8sDef(def) if err != nil { return nil, err } - ctx := context.Background() - cfg := config.GetConfigOrDie() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cli, err := client.New(cfg, client.Options{}) + log.Debug("Searching for CDS server") + cdsAddr, err := cdsclient.DiscoverK8sCDSServer(ctx, k8sConfigFlags, cdsConfigFlags, + loggerFactory.NewLogger("cds-fwd")) if err != nil { - return nil, err - } - - // get the configmap - lookupKey := types.NamespacedName{ - Namespace: namespace, - Name: name, + return nil, fmt.Errorf("error searching for CDS server: %w", err) } - cm := &corev1.ConfigMap{} - err = cli.Get(ctx, lookupKey, cm) + cds, err := cdsclient.NewConfigNamespaceNameAPI(cdsAddr.Addr, namespace, name, + loggerFactory.NewLogger("cds-client")) if err != nil { - return nil, err + return nil, fmt.Errorf("error creating CDS client: %w", err) } - //parse out the stunnerconf - jsonConf, found := cm.Data[defaultStunnerdConfigfileName] - if !found { - return nil, fmt.Errorf("error unpacking STUNner configmap: %s not found", - defaultStunnerdConfigfileName) + confs, err := cds.Get(ctx) + if err != nil { + return nil, fmt.Errorf("error obtaining config from CDS client: %w", err) } - - conf := stunnerv1alpha1.StunnerConfig{} - if err := json.Unmarshal([]byte(jsonConf), &conf); err != nil { - return nil, err + if len(confs) != 1 { + return nil, fmt.Errorf("invalid number of configs returned from CDS client: %d", + len(confs)) } + conf := confs[0] // remove all but the named listener - ls := []stunnerv1alpha1.ListenerConfig{} + ls := []stnrv1.ListenerConfig{} for _, l := range conf.Listeners { // parse out the listener name (as per the Gateway API) from the TURN listener-name // (this is in the form: // @@ -196,13 +221,13 @@ func getStunnerConfFromK8s(def string) (*stunnerv1alpha1.StunnerConfig, error) { "specified TURN server URI", listener) } - conf.Listeners = []stunnerv1alpha1.ListenerConfig{{}} + conf.Listeners = make([]stnrv1.ListenerConfig, 1) copy(conf.Listeners, ls) - return &conf, nil + return conf, nil } -func getStunnerConfFromCLI(def string) (*stunnerv1alpha1.StunnerConfig, error) { +func getStunnerConfFromCLI(def string) (*stnrv1.StunnerConfig, error) { uri := fmt.Sprintf("turn://%s", def) conf, err := stunner.NewDefaultConfig(uri) @@ -212,11 +237,11 @@ func getStunnerConfFromCLI(def string) (*stunnerv1alpha1.StunnerConfig, error) { u, err := stunner.ParseUri(uri) if err != nil { - return nil, fmt.Errorf("Invalid STUNner URI %q: %s", uri, err) + return nil, fmt.Errorf("invalid STUNner URI %q: %s", uri, err) } if u.Username == "" || u.Password == "" { - return nil, fmt.Errorf("Username/password must be set: '%s'", uri) + return nil, fmt.Errorf("username/password must be set: '%s'", uri) } conf.Listeners[0].PublicAddr = u.Address @@ -225,15 +250,15 @@ func getStunnerConfFromCLI(def string) (*stunnerv1alpha1.StunnerConfig, error) { return conf, nil } -func getAuth(config *stunnerv1alpha1.StunnerConfig) (stunner.AuthGen, error) { +func getAuth(config *stnrv1.StunnerConfig) (stunner.AuthGen, error) { auth := config.Auth - atype, err := stunnerv1alpha1.NewAuthType(auth.Type) + atype, err := stnrv1.NewAuthType(auth.Type) if err != nil { return nil, err } switch atype { - case stunnerv1alpha1.AuthTypeLongTerm: + case stnrv1.AuthTypeEphemeral: s, found := auth.Credentials["secret"] if !found { return nil, fmt.Errorf("cannot find shared secret for %s authentication", @@ -243,7 +268,7 @@ func getAuth(config *stunnerv1alpha1.StunnerConfig) (stunner.AuthGen, error) { return turn.GenerateLongTermCredentials(s, defaultDuration) }, nil - case stunnerv1alpha1.AuthTypePlainText: + case stnrv1.AuthTypeStatic: u, found := auth.Credentials["username"] if !found { return nil, fmt.Errorf("cannot find username for %s authentication", @@ -264,7 +289,7 @@ func getAuth(config *stunnerv1alpha1.StunnerConfig) (stunner.AuthGen, error) { } } -func getStunnerURI(config *stunnerv1alpha1.StunnerConfig) (string, error) { +func getStunnerURI(config *stnrv1.StunnerConfig) (string, error) { // we should have only a single listener at this point if len(config.Listeners) != 1 { return "", fmt.Errorf("cannot find listener in STUNner configuration: %s", @@ -282,16 +307,21 @@ func getStunnerURI(config *stunnerv1alpha1.StunnerConfig) (string, error) { return "", fmt.Errorf("no protocol for listener %q", l.Name) } - return fmt.Sprintf("%s://%s:%d", strings.ToLower(l.Protocol), l.PublicAddr, - l.PublicPort), nil + return stunner.GetStandardURLFromListener(&l) } func parseK8sDef(def string) (string, string, string, error) { - re := regexp.MustCompile(`([0-9A-Za-z_-]+)/([0-9A-Za-z_-]+):([0-9A-Za-z_-]+)`) + re := regexp.MustCompile(`^/([0-9A-Za-z_-]+):([0-9A-Za-z_-]+)$`) xs := re.FindStringSubmatch(def) - if len(xs) != 4 { - return "", "", "", fmt.Errorf("cannot parse STUNner configmap def: %q", def) + if len(xs) == 3 && k8sConfigFlags.Namespace != nil { + return *k8sConfigFlags.Namespace, xs[1], xs[2], nil + } + + re = regexp.MustCompile(`^([0-9A-Za-z_-]+)/([0-9A-Za-z_-]+):([0-9A-Za-z_-]+)$`) + xs = re.FindStringSubmatch(def) + if len(xs) == 4 { + return xs[1], xs[2], xs[3], nil } - return xs[1], xs[2], xs[3], nil + return "", "", "", fmt.Errorf("cannot parse STUNner K8s URI: %q", def) } diff --git a/config.go b/config.go index a13d12d2..e39675be 100644 --- a/config.go +++ b/config.go @@ -3,28 +3,20 @@ package stunner import ( "context" "encoding/base64" - "encoding/json" - "errors" "fmt" - "os" - "regexp" - "strconv" "strings" - "time" - "github.com/fsnotify/fsnotify" - "github.com/pion/logging" - "github.com/pion/transport/v2" - "sigs.k8s.io/yaml" + "github.com/pion/transport/v3" "github.com/l7mp/stunner/internal/resolver" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/client" ) -const confUpdatePeriod = 1 * time.Second - // Options defines various options for the STUNner server. type Options struct { + // Name is the identifier of this stunnerd daemon instance. Defaults to hostname. + Name string // DryRun suppresses sideeffects: STUNner will not initialize listener sockets and bring up // the TURN server, and it will not fire up the health-check and the metrics // servers. Intended for testing, default is false. @@ -52,30 +44,11 @@ type Options struct { Net transport.Net } -// NewZeroConfig builds a zero configuration useful for bootstrapping STUNner. It starts with -// plaintext authentication and opens no listeners and clusters. -func NewZeroConfig() *v1alpha1.StunnerConfig { - return &v1alpha1.StunnerConfig{ - ApiVersion: v1alpha1.ApiVersion, - Admin: v1alpha1.AdminConfig{}, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", - Realm: v1alpha1.DefaultRealm, - Credentials: map[string]string{ - "username": "dummy-username", - "password": "dummy-password", - }, - }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{}, - } -} - // NewDefaultConfig builds a default configuration from a TURN server URI. Example: the URI // `turn://user:pass@127.0.0.1:3478?transport=udp` will be parsed into a STUNner configuration with // a server running on the localhost at UDP port 3478, with plain-text authentication using the // username/password pair `user:pass`. Health-checks and metric scarping are disabled. -func NewDefaultConfig(uri string) (*v1alpha1.StunnerConfig, error) { +func NewDefaultConfig(uri string) (*stnrv1.StunnerConfig, error) { u, err := ParseUri(uri) if err != nil { return nil, fmt.Errorf("Invalid URI '%s': %s", uri, err) @@ -86,36 +59,37 @@ func NewDefaultConfig(uri string) (*v1alpha1.StunnerConfig, error) { } h := "" - c := &v1alpha1.StunnerConfig{ - ApiVersion: v1alpha1.ApiVersion, - Admin: v1alpha1.AdminConfig{ - LogLevel: v1alpha1.DefaultLogLevel, + c := &stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stnrv1.DefaultLogLevel, // MetricsEndpoint: "http://:8088", HealthCheckEndpoint: &h, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Type: "plaintext", - Realm: v1alpha1.DefaultRealm, + Realm: stnrv1.DefaultRealm, Credentials: map[string]string{ "username": u.Username, "password": u.Password, }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Protocol: u.Protocol, Addr: u.Address, Port: u.Port, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Type: "STATIC", Endpoints: []string{"0.0.0.0/0"}, }}, } - if strings.ToUpper(u.Protocol) == "TLS" || strings.ToUpper(u.Protocol) == "DTLS" { + p := strings.ToUpper(u.Protocol) + if p == "TLS" || p == "DTLS" || p == "TURN-TLS" || p == "TURN-DTLS" { certPem, keyPem, err := GenerateSelfSignedKey() if err != nil { return nil, err @@ -131,273 +105,60 @@ func NewDefaultConfig(uri string) (*v1alpha1.StunnerConfig, error) { return c, nil } -// LoadConfig loads a configuration from a file, substituting environment variables for -// placeholders in the configuration file. Returns the new configuration or error if load fails. -func LoadConfig(config string) (*v1alpha1.StunnerConfig, error) { - c, err := os.ReadFile(config) - if err != nil { - return nil, fmt.Errorf("could not read config: %s\n", err.Error()) - } - - // substitute environtment variables - // default port: STUNNER_PUBLIC_PORT -> STUNNER_PORT - re := regexp.MustCompile(`^[0-9]+$`) - port, ok := os.LookupEnv("STUNNER_PORT") - if !ok || (ok && port == "") || (ok && !re.Match([]byte(port))) { - publicPort := v1alpha1.DefaultPort - publicPortStr, ok := os.LookupEnv("STUNNER_PUBLIC_PORT") - if ok { - if p, err := strconv.Atoi(publicPortStr); err == nil { - publicPort = p - } - } - os.Setenv("STUNNER_PORT", fmt.Sprintf("%d", publicPort)) - } - - e := os.ExpandEnv(string(c)) - - s := v1alpha1.StunnerConfig{} - // try YAML first - if err = yaml.Unmarshal([]byte(e), &s); err != nil { - // if it fails, try to json - if errJ := json.Unmarshal([]byte(e), &s); err != nil { - return nil, fmt.Errorf("could not parse config file at '%s': "+ - "YAML parse error: %s, JSON parse error: %s\n", - config, err.Error(), errJ.Error()) - } - } - - return &s, nil -} - // GetConfig returns the configuration of the running STUNner daemon. -func (s *Stunner) GetConfig() *v1alpha1.StunnerConfig { +func (s *Stunner) GetConfig() *stnrv1.StunnerConfig { s.log.Tracef("GetConfig") // singletons, but we want to avoid panics when GetConfig is called on an uninitialized // STUNner object - adminConf := v1alpha1.AdminConfig{} + adminConf := stnrv1.AdminConfig{} if len(s.adminManager.Keys()) > 0 { - adminConf = *s.GetAdmin().GetConfig().(*v1alpha1.AdminConfig) + adminConf = *s.GetAdmin().GetConfig().(*stnrv1.AdminConfig) } - authConf := v1alpha1.AuthConfig{} + authConf := stnrv1.AuthConfig{} if len(s.authManager.Keys()) > 0 { - authConf = *s.GetAuth().GetConfig().(*v1alpha1.AuthConfig) + authConf = *s.GetAuth().GetConfig().(*stnrv1.AuthConfig) } listeners := s.listenerManager.Keys() clusters := s.clusterManager.Keys() - c := v1alpha1.StunnerConfig{ + c := stnrv1.StunnerConfig{ ApiVersion: s.version, Admin: adminConf, Auth: authConf, - Listeners: make([]v1alpha1.ListenerConfig, len(listeners)), - Clusters: make([]v1alpha1.ClusterConfig, len(clusters)), + Listeners: make([]stnrv1.ListenerConfig, len(listeners)), + Clusters: make([]stnrv1.ClusterConfig, len(clusters)), } for i, name := range listeners { - c.Listeners[i] = *s.GetListener(name).GetConfig().(*v1alpha1.ListenerConfig) + c.Listeners[i] = *s.GetListener(name).GetConfig().(*stnrv1.ListenerConfig) } for i, name := range clusters { - c.Clusters[i] = *s.GetCluster(name).GetConfig().(*v1alpha1.ClusterConfig) + c.Clusters[i] = *s.GetCluster(name).GetConfig().(*stnrv1.ClusterConfig) } return &c } -type Watcher struct { - // ConfigFile specifies the config file name to watch. - ConfigFile string - // ConfigChannel is used to return the configs read. - ConfigChannel chan<- v1alpha1.StunnerConfig - // Logger is a logger factory as returned by, e.g., stunner.GetLogger(). - Logger logging.LoggerFactory - // Log is a leveled logger used to report progress. Either Logger or Log must be specified. - Log logging.LeveledLogger -} - -// WatchConfig will watch a configuration file specified in the `Watcher.ConfigFile` parameter for -// changes and emit a new `StunnerConfig` on `Watcher.ConfigChannel` each time the file changes. If -// no file exists at the given path, then WatchConfig will periodically retry until the file -// appears. The configuration sent through the channel is not validated, make sure to check for -// syntax errors on the receiver side. Use the `context` to cancel the watcher. -func WatchConfig(ctx context.Context, w Watcher) error { - if w.ConfigChannel == nil { - return errors.New("uninitialized config channel") - } - - if w.ConfigFile == "" { - return errors.New("uninitialized config file path") - } - - if w.Log == nil { - w.Log = w.Logger.NewLogger("watch-config") - } - w.Log.Tracef("WatchConfig") - - go func() { - for { - // try to watch - if ok := configWatcher(ctx, w); !ok { - return - } - - if ok := tryWatchConfig(ctx, w); !ok { - return - } - } - - }() - - return nil -} - -// tryWatchConfig runs a timer to look for the config file at the given path and returns it -// immediately once found. Returns true if further action is needed (configWatcher has to be -// started) or false on normal exit. -func tryWatchConfig(ctx context.Context, w Watcher) bool { - w.Log.Tracef("tryWatchConfig") - config := w.ConfigFile - - ticker := time.NewTicker(confUpdatePeriod) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return false - - case <-ticker.C: - w.Log.Debugf("trying to read config file %q from periodic timer", - config) - - // check if config file exists and it is readable - if _, err := os.Stat(config); errors.Is(err, os.ErrNotExist) { - w.Log.Debugf("config file %q does not exist", config) - - // report status in every 10th second - if time.Now().Second()%10 == 0 { - w.Log.Warnf("waiting for config file %q", config) - } - - continue - } - - return true - } - } -} - -// configWatcher actually watches the config and emits the configs found on the specified -// channel. Returns true if further action is needed (tryWatachConfig is to be started) or false on -// normal exit. -func configWatcher(ctx context.Context, w Watcher) bool { - w.Log.Tracef("configWatcher") - prev := v1alpha1.StunnerConfig{} - - // create a new watcher - watcher, err := fsnotify.NewWatcher() +// LoadConfig loads a configuration from an origin. This is a shim wrapper around configclient.Load. +func (s *Stunner) LoadConfig(origin string) (*stnrv1.StunnerConfig, error) { + client, err := client.New(origin, s.name, s.logger) if err != nil { - return true + return nil, err } - defer watcher.Close() - config := w.ConfigFile - ch := w.ConfigChannel - - if err := watcher.Add(config); err != nil { - w.Log.Debugf("could not add config file %q watcher: %s", config, err.Error()) - return true - } + return client.Load() +} - // emit an initial config - c, err := LoadConfig(config) +// WatchConfig watches a configuration from an origin. This is a shim wrapper around configclient.Watch. +func (s *Stunner) WatchConfig(ctx context.Context, origin string, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + client, err := client.New(origin, s.name, s.logger) if err != nil { - w.Log.Warnf("could not load config file %q: %s", config, err.Error()) - return true + return err } - w.Log.Debugf("config file successfully loaded from %q", config) - - // send a deepcopy over the channel - copy := v1alpha1.StunnerConfig{} - c.DeepCopyInto(©) - ch <- copy - - // save deepcopy so that we can filter repeated events - c.DeepCopyInto(&prev) - - for { - select { - case <-ctx.Done(): - return false - - case e, ok := <-watcher.Events: - if !ok { - w.Log.Debug("config watcher event handler received invalid event") - return true - } - - w.Log.Debugf("received watcher event: %s", e.String()) - - if e.Has(fsnotify.Remove) { - w.Log.Warnf("config file deleted %q, disabling watcher", e.Op.String()) - - if err := watcher.Remove(config); err != nil { - w.Log.Debugf("could not remove config file %q watcher: %s", - config, err.Error()) - } - - return true - } - - if !e.Has(fsnotify.Write) { - w.Log.Debugf("unhandled notify op on config file %q (ignoring): %s", - e.Name, e.Op.String()) - continue - } - - w.Log.Debugf("loading configuration file: %s", config) - c, err = LoadConfig(config) - if err != nil { - // assume it is a YAML/JSON syntax error (LoadConfig does not - // validate): report and ignore - w.Log.Warnf("could not load config file %q: %s", config, err.Error()) - continue - } - - // suppress repeated events - if c.DeepEqual(&prev) { - w.Log.Debugf("ignoring recurrent notify event for the same config file") - continue - } - - w.Log.Debugf("config file successfully loaded from %q", config) - - copy := v1alpha1.StunnerConfig{} - c.DeepCopyInto(©) - ch <- copy - - // save deepcopy so that we can filter repeated events - c.DeepCopyInto(&prev) - - case err, ok := <-watcher.Errors: - if !ok { - w.Log.Debugf("config watcher error handler received invalid error") - return true - } - - w.Log.Debugf("watcher error, deactivating watcher: %s", err.Error()) - - if err := watcher.Remove(config); err != nil { - w.Log.Debugf("could not remove config file %q watcher: %s", - config, err.Error()) - } - - return true - } - } + return client.Watch(ctx, ch, suppressDelete) } diff --git a/config_test.go b/config_test.go index b83faf34..0a9b247d 100644 --- a/config_test.go +++ b/config_test.go @@ -1,19 +1,22 @@ package stunner import ( + "context" "fmt" "net" - // "reflect" - "context" + "net/http" "os" + "strings" "testing" "time" - "github.com/pion/transport/test" + "github.com/gorilla/websocket" + "github.com/pion/transport/v3/test" "github.com/stretchr/testify/assert" "sigs.k8s.io/yaml" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + cdsclient "github.com/l7mp/stunner/pkg/config/client" "github.com/l7mp/stunner/pkg/logger" ) @@ -35,8 +38,8 @@ func TestStunnerDefaultServerVNet(t *testing.T) { for _, conf := range []string{ "turn://user1:passwd1@1.2.3.4:3478?transport=udp", - "udp://user1:passwd1@1.2.3.4:3478?transport=udp", - "udp://user1:passwd1@1.2.3.4:3478", + "turn://user1:passwd1@1.2.3.4?transport=udp", + "turn://user1:passwd1@1.2.3.4:3478", } { testName := fmt.Sprintf("TestStunner_NewDefaultConfig_URI:%s", conf) t.Run(testName, func(t *testing.T) { @@ -49,7 +52,7 @@ func TestStunnerDefaultServerVNet(t *testing.T) { // patch in the loglevel c.Admin.LogLevel = stunnerTestLoglevel - checkDefaultConfig(t, c, "UDP") + checkDefaultConfig(t, c, "TURN-UDP") // patch in the vnet log.Debug("building virtual network") @@ -64,7 +67,7 @@ func TestStunnerDefaultServerVNet(t *testing.T) { }) log.Debug("starting stunnerd") - assert.NoError(t, stunner.Reconcile(*c), "starting server") + assert.NoError(t, stunner.Reconcile(c), "starting server") log.Debug("creating a client") lconn, err := v.wan.ListenPacket("udp4", "0.0.0.0:0") @@ -104,12 +107,12 @@ func TestStunnerConfigFileRoundTrip(t *testing.T) { // patch in the loglevel c.Admin.LogLevel = stunnerTestLoglevel - checkDefaultConfig(t, c, "UDP") + checkDefaultConfig(t, c, "TURN-UDP") file, err2 := yaml.Marshal(c) assert.NoError(t, err2, "marschal config fike") - newConf := &v1alpha1.StunnerConfig{} + newConf := &stnrv1.StunnerConfig{} err = yaml.Unmarshal(file, newConf) assert.NoError(t, err, "unmarschal config from file") @@ -135,7 +138,7 @@ func TestStunnerConfigFileWatcher(t *testing.T) { log.Debug("creating a temp file for config") f, err := os.CreateTemp("", "stunner_conf_*.yaml") assert.NoError(t, err, "creating temp config file") - // we just need the filename for now so we remove the fle first + // we just need the filename for now so we remove the file first file := f.Name() assert.NoError(t, os.Remove(file), "removing temp config file") @@ -143,17 +146,15 @@ func TestStunnerConfigFileWatcher(t *testing.T) { stunner := NewStunner(Options{LogLevel: stunnerTestLoglevel}) log.Debug("starting watcher") - conf := make(chan v1alpha1.StunnerConfig, 1) + conf := make(chan *stnrv1.StunnerConfig, 1) defer close(conf) log.Debug("init watcher with nonexistent config file") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err = WatchConfig(ctx, Watcher{ - ConfigFile: file, - ConfigChannel: conf, - Logger: loggerFactory, - }) + + url := "file://" + file + err = stunner.WatchConfig(ctx, url, conf, false) assert.NoError(t, err, "creating config watcher") // nothing should happen here: wait a bit so that the watcher has comfortable time to start @@ -183,14 +184,15 @@ func TestStunnerConfigFileWatcher(t *testing.T) { _, err = f.Write(y) assert.NoError(t, err, "write config to temp file") - // wait a bit so that the watcher has time to react - time.Sleep(50 * time.Millisecond) + // // wait a bit so that the watcher has time to react + // time.Sleep(50 * time.Millisecond) - // read back result - c2 := <-conf - checkDefaultConfig(t, &c2, "UDP") + c2, ok := <-conf + assert.True(t, ok, "config emitted") + checkDefaultConfig(t, c2, "TURN-UDP") + + log.Debug("write a wrong config file: WatchConfig validates") - log.Debug("write a wrong config file (WatchConfig does not validate)") c2.Listeners[0].Protocol = "dummy" y, err = yaml.Marshal(c2) assert.NoError(t, err, "marshal config file") @@ -204,15 +206,20 @@ func TestStunnerConfigFileWatcher(t *testing.T) { // this makes sure that we do not share anything with ConfigWatch c2.Listeners[0].PublicAddr = "AAAAAAAAAAAAAa" - // wait a bit so that the watcher has time to react + // we should not read anything so that channel should not br redable time.Sleep(50 * time.Millisecond) - - c3 := <-conf - checkDefaultConfig(t, &c3, "dummy") + readable := false + select { + case _, ok := <-conf: + readable = ok + default: + readable = false + } + assert.False(t, readable, "wrong config file does not trigger a watch event") log.Debug("update the config file and check") - c3.Listeners[0].Protocol = "TCP" - y, err = yaml.Marshal(c3) + c2.Listeners[0].Protocol = "TURN-TCP" + y, err = yaml.Marshal(c2) assert.NoError(t, err, "marshal config file") err = f.Truncate(0) assert.NoError(t, err, "truncate temp file") @@ -221,18 +228,327 @@ func TestStunnerConfigFileWatcher(t *testing.T) { _, err = f.Write(y) assert.NoError(t, err, "write config to temp file") - // wait a bit so that the watcher has time to react + c3 := <-conf + checkDefaultConfig(t, c3, "TURN-TCP") + + stunner.Close() +} + +const ( + testConfigV1 = `{"version":"v1","admin":{"name":"ns1/tester", "loglevel":"all:ERROR"},"auth":{"type":"static","credentials":{"password":"passwd1","username":"user1"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster"]}],"clusters":[{"name":"echo-server-cluster","type":"STATIC","endpoints":["1.2.3.5"]}]}` + testConfigV1A1 = `{"version":"v1alpha1","admin":{"name":"ns1/tester", "loglevel":"all:ERROR"},"auth":{"type":"ephemeral","credentials":{"secret":"test-secret"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster"]}],"clusters":[{"name":"echo-server-cluster","type":"STATIC","endpoints":["1.2.3.5"]}]}` +) + +// test with v1alpha1 and v1 +func TestStunnerConfigFileWatcherMultiVersion(t *testing.T) { + lim := test.TimeOut(time.Second * 10) + defer lim.Stop() + + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test-watcher") + + testName := "TestStunnerConfigFileWatcher" + log.Debugf("-------------- Running test: %s -------------", testName) + + log.Debug("creating a temp file for config") + f, err := os.CreateTemp("", "stunner_conf_*.yaml") + assert.NoError(t, err, "creating temp config file") + // we just need the filename for now so we remove the file first + file := f.Name() + assert.NoError(t, os.Remove(file), "removing temp config file") + + log.Debug("creating a stunnerd") + stunner := NewStunner(Options{LogLevel: stunnerTestLoglevel}) + + log.Debug("starting watcher") + conf := make(chan *stnrv1.StunnerConfig, 1) + defer close(conf) + + log.Debug("init watcher with nonexistent config file") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + url := "file://" + file + err = stunner.WatchConfig(ctx, url, conf, false) + assert.NoError(t, err, "creating config watcher") + + // nothing should happen here: wait a bit so that the watcher has comfortable time to start + time.Sleep(50 * time.Millisecond) + + log.Debug("write v1 config and check") + + // recreate the temp file and write config + f, err = os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644) + assert.NoError(t, err, "recreate temp config file") + defer os.Remove(file) + + err = f.Truncate(0) + assert.NoError(t, err, "truncate temp file") + _, err = f.Seek(0, 0) + assert.NoError(t, err, "seek temp file") + _, err = f.WriteString(testConfigV1) + assert.NoError(t, err, "write config to temp file") + + c2, ok := <-conf + assert.True(t, ok, "config emitted") + + assert.Equal(t, stnrv1.ApiVersion, c2.ApiVersion, "version") + assert.Equal(t, "all:ERROR", c2.Admin.LogLevel, "loglevel") + assert.True(t, c2.Auth.Type == "static" || c2.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, c2.Listeners, 1, "listeners len") + assert.Equal(t, "udp", c2.Listeners[0].Name, "listener name") + assert.Equal(t, "TURN-UDP", c2.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, c2.Listeners[0].Port, "listener port") + assert.Len(t, c2.Listeners[0].Routes, 1, "routes len") + assert.Equal(t, "echo-server-cluster", c2.Listeners[0].Routes[0], "route name") + assert.Len(t, c2.Clusters, 1, "clusters len") + assert.Equal(t, "echo-server-cluster", c2.Clusters[0].Name, "cluster name") + assert.Equal(t, "STATIC", c2.Clusters[0].Type, "cluster proto") + assert.Len(t, c2.Clusters[0].Endpoints, 1, "endpoints len") + assert.Equal(t, "1.2.3.5", c2.Clusters[0].Endpoints[0], "cluster port") + + err = f.Truncate(0) + assert.NoError(t, err, "truncate temp file") + _, err = f.Seek(0, 0) + assert.NoError(t, err, "seek temp file") + _, err = f.WriteString(testConfigV1A1) + assert.NoError(t, err, "write config to temp file") + + c2, ok = <-conf + assert.True(t, ok, "config emitted") + + assert.Equal(t, stnrv1.ApiVersion, c2.ApiVersion, "version") + assert.Equal(t, "all:ERROR", c2.Admin.LogLevel, "loglevel") + assert.True(t, c2.Auth.Type == "static" || c2.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, c2.Listeners, 1, "listeners len") + assert.Equal(t, "udp", c2.Listeners[0].Name, "listener name") + assert.Equal(t, "TURN-UDP", c2.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, c2.Listeners[0].Port, "listener port") + assert.Len(t, c2.Listeners[0].Routes, 1, "routes len") + assert.Equal(t, "echo-server-cluster", c2.Listeners[0].Routes[0], "route name") + assert.Len(t, c2.Clusters, 1, "clusters len") + assert.Equal(t, "echo-server-cluster", c2.Clusters[0].Name, "cluster name") + assert.Equal(t, "STATIC", c2.Clusters[0].Type, "cluster proto") + assert.Len(t, c2.Clusters[0].Endpoints, 1, "endpoints len") + assert.Equal(t, "1.2.3.5", c2.Clusters[0].Endpoints[0], "cluster port") + + stunner.Close() +} + +func TestStunnerConfigPollerMultiVersion(t *testing.T) { + lim := test.TimeOut(time.Second * 10) + defer lim.Stop() + + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test-poller") + + testName := "TestStunnerConfigPoller" + log.Debugf("-------------- Running test: %s -------------", testName) + + log.Debug("creating a mock CDS server") + addr := "localhost:63479" + origin := "ws://" + addr + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := &http.Server{Addr: addr} + defer s.Close() + + http.HandleFunc("/api/v1/configs/ns1/tester", + func(w http.ResponseWriter, req *http.Request) { + upgrader := websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } + + conn, err := upgrader.Upgrade(w, req, nil) + assert.NoError(t, err, "upgrade HTTP connection") + defer func() { _ = conn.Close() }() + + // for the pong handler: conn.Close() will kill this + go func() { + for { + _, _, err := conn.ReadMessage() + if err != nil { + return + } + } + }() + + conn.SetPingHandler(func(string) error { + return conn.WriteMessage(websocket.PongMessage, []byte("keepalive")) + }) + + // send v1config + assert.NoError(t, conn.WriteMessage(websocket.TextMessage, []byte(testConfigV1)), "write config v1") + + // send v1config + assert.NoError(t, conn.WriteMessage(websocket.TextMessage, []byte(testConfigV1A1)), "write config v1alpha1") + + select { + case <-ctx.Done(): + case <-req.Context().Done(): + } + + conn.Close() + }) + + // serve + go func() { + _ = s.ListenAndServe() + }() + + // wait a bit so that the server has time to setup time.Sleep(50 * time.Millisecond) - // read back result - c4 := <-conf - checkDefaultConfig(t, &c4, "TCP") + log.Debug("creating a stunnerd") + stunner := NewStunner(Options{LogLevel: stunnerTestLoglevel, Name: "ns1/tester"}) + + log.Debug("starting watcher") + conf := make(chan *stnrv1.StunnerConfig, 1) + defer close(conf) + + log.Debug("init config poller") + assert.NoError(t, stunner.WatchConfig(ctx, origin, conf, true), "creating config poller") + + c2, ok := <-conf + assert.True(t, ok, "config emitted") + + assert.Equal(t, stnrv1.ApiVersion, c2.ApiVersion, "version") + assert.Equal(t, "all:ERROR", c2.Admin.LogLevel, "loglevel") + assert.True(t, c2.Auth.Type == "static" || c2.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, c2.Listeners, 1, "listeners len") + assert.Equal(t, "udp", c2.Listeners[0].Name, "listener name") + assert.Equal(t, "TURN-UDP", c2.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, c2.Listeners[0].Port, "listener port") + assert.Len(t, c2.Listeners[0].Routes, 1, "routes len") + assert.Equal(t, "echo-server-cluster", c2.Listeners[0].Routes[0], "route name") + assert.Len(t, c2.Clusters, 1, "clusters len") + assert.Equal(t, "echo-server-cluster", c2.Clusters[0].Name, "cluster name") + assert.Equal(t, "STATIC", c2.Clusters[0].Type, "cluster proto") + assert.Len(t, c2.Clusters[0].Endpoints, 1, "endpoints len") + assert.Equal(t, "1.2.3.5", c2.Clusters[0].Endpoints[0], "cluster port") + + // next read yields a v1alpha1 config + c2, ok = <-conf + assert.True(t, ok, "config emitted") + + assert.Equal(t, stnrv1.ApiVersion, c2.ApiVersion, "version") + assert.Equal(t, "all:ERROR", c2.Admin.LogLevel, "loglevel") + assert.True(t, c2.Auth.Type == "static" || c2.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, c2.Listeners, 1, "listeners len") + assert.Equal(t, "udp", c2.Listeners[0].Name, "listener name") + assert.Equal(t, "TURN-UDP", c2.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, c2.Listeners[0].Port, "listener port") + assert.Len(t, c2.Listeners[0].Routes, 1, "routes len") + assert.Equal(t, "echo-server-cluster", c2.Listeners[0].Routes[0], "route name") + assert.Len(t, c2.Clusters, 1, "clusters len") + assert.Equal(t, "echo-server-cluster", c2.Clusters[0].Name, "cluster name") + assert.Equal(t, "STATIC", c2.Clusters[0].Type, "cluster proto") + assert.Len(t, c2.Clusters[0].Endpoints, 1, "endpoints len") + assert.Equal(t, "1.2.3.5", c2.Clusters[0].Endpoints[0], "cluster port") stunner.Close() } -func checkDefaultConfig(t *testing.T, c *v1alpha1.StunnerConfig, proto string) { - assert.Equal(t, "plaintext", c.Auth.Type, "auth-type") +func TestStunnerURIParser(t *testing.T) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + // loggerFactory := logger.NewLoggerFactory("all:TRACE") + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test") + + for _, conf := range []struct { + uri string + su StunnerUri + }{ + // udp + {"turn://user1:passwd1@1.2.3.4:3478?transport=udp", StunnerUri{"turn-udp", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turn://user1:passwd1@1.2.3.4?transport=udp", StunnerUri{"turn-udp", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turn://user1:passwd1@1.2.3.4:3478", StunnerUri{"turn-udp", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + // tcp + {"turn://user1:passwd1@1.2.3.4:3478?transport=tcp", StunnerUri{"turn-tcp", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turn://user1:passwd1@1.2.3.4?transport=tcp", StunnerUri{"turn-tcp", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + // tls - old style + {"turn://user1:passwd1@1.2.3.4:3478?transport=tls", StunnerUri{"turn-tls", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turn://user1:passwd1@1.2.3.4?transport=tls", StunnerUri{"turn-tls", "1.2.3.4", "user1", "passwd1", 443, nil}}, + // tls - RFC style + {"turns://user1:passwd1@1.2.3.4:3478?transport=tcp", StunnerUri{"turn-tls", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turns://user1:passwd1@1.2.3.4?transport=tcp", StunnerUri{"turn-tls", "1.2.3.4", "user1", "passwd1", 443, nil}}, + // dtls - old style + {"turn://user1:passwd1@1.2.3.4:3478?transport=dtls", StunnerUri{"turn-dtls", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turn://user1:passwd1@1.2.3.4?transport=dtls", StunnerUri{"turn-dtls", "1.2.3.4", "user1", "passwd1", 443, nil}}, + // dtls - RFC style + {"turns://user1:passwd1@1.2.3.4:3478?transport=udp", StunnerUri{"turn-dtls", "1.2.3.4", "user1", "passwd1", 3478, nil}}, + {"turns://user1:passwd1@1.2.3.4?transport=udp", StunnerUri{"turn-dtls", "1.2.3.4", "user1", "passwd1", 443, nil}}, + // no cred + {"turn://1.2.3.4:3478?transport=udp", StunnerUri{"turn-udp", "1.2.3.4", "", "", 3478, nil}}, + {"turn://1.2.3.4?transport=udp", StunnerUri{"turn-udp", "1.2.3.4", "", "", 3478, nil}}, + {"turn://1.2.3.4", StunnerUri{"turn-udp", "1.2.3.4", "", "", 3478, nil}}, + } { + testName := fmt.Sprintf("TestStunnerURIParser:%s", conf.uri) + t.Run(testName, func(t *testing.T) { + log.Debugf("-------------- Running test: %s -------------", testName) + u, err := ParseUri(conf.uri) + assert.NoError(t, err, "URI parser") + assert.Equal(t, strings.ToLower(conf.su.Protocol), strings.ToLower(u.Protocol), "uri protocol") + assert.Equal(t, conf.su.Address, u.Address, "uri address") + assert.Equal(t, conf.su.Username, u.Username, "uri username") + assert.Equal(t, conf.su.Password, u.Password, "uri password") + assert.Equal(t, conf.su.Port, u.Port, "uri port") + }) + } +} + +// make sure credentials are excempt from env-substitution in ParseConfig +func TestCredentialParser(t *testing.T) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test") + + for _, testConf := range []struct { + name string + config []byte + user, pass, secret string + }{ + {"plain", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pass","username":"user"}}}`), "user", "pass", ""}, + // user name with $ + {"username_with_leading_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pass","username":"$user"}}}`), "$user", "pass", ""}, + {"username_with_trailing_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pass","username":"user$"}}}`), "user$", "pass", ""}, + {"username_with_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pass","username":"us$er"}}}`), "us$er", "pass", ""}, + // passwd with $ + {"passwd_with_leading_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"$pass","username":"user"}}}`), "user", "$pass", ""}, + {"passwd_with_trailing_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pass$","username":"user"}}}`), "user", "pass$", ""}, + {"passwd_with_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"password":"pa$ss","username":"user"}}}`), "user", "pa$ss", ""}, + // secret with $ + {"secret_with_leading_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"secret":"$secret","username":"user"}}}`), "user", "", "$secret"}, + {"secret_with_trailing_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"secret":"secret$","username":"user"}}}`), "user", "", "secret$"}, + {"secret_with_$", []byte(`{"version":"v1","admin":{"name":"ns1/tester"},"auth":{"type":"static","credentials":{"secret":"sec$ret","username":"user"}}}`), "user", "", "sec$ret"}, + } { + testName := fmt.Sprintf("TestCredentialParser:%s", testConf.name) + t.Run(testName, func(t *testing.T) { + log.Debugf("-------------- Running test: %s -------------", testName) + c, err := cdsclient.ParseConfig(testConf.config) + assert.NoError(t, err, "parser") + assert.Equal(t, testConf.user, c.Auth.Credentials["username"], "username") + assert.Equal(t, testConf.pass, c.Auth.Credentials["password"], "password") + assert.Equal(t, testConf.secret, c.Auth.Credentials["secret"], "secret") + }) + } +} + +func checkDefaultConfig(t *testing.T, c *stnrv1.StunnerConfig, proto string) { + assert.Equal(t, "static", c.Auth.Type, "auth-type") assert.Equal(t, "user1", c.Auth.Credentials["username"], "username") assert.Equal(t, "passwd1", c.Auth.Credentials["password"], "passwd") assert.Len(t, c.Listeners, 1, "listeners len") diff --git a/deploy/manifests/default-dataplane.yaml b/deploy/manifests/default-dataplane.yaml new file mode 100644 index 00000000..a3fcfb52 --- /dev/null +++ b/deploy/manifests/default-dataplane.yaml @@ -0,0 +1,22 @@ +apiVersion: stunner.l7mp.io/v1 +kind: Dataplane +metadata: + name: default +spec: + replicas: 1 + image: l7mp/stunnerd:dev + imagePullPolicy: Always + command: + - "stunnerd" + args: + - "-w" + - "--udp-thread-num=1" + hostNetwork: false + resources: + limits: + cpu: 250m + memory: 120Mi + requests: + cpu: 250m + memory: 120Mi + terminationGracePeriodSeconds: 3600 diff --git a/deploy/manifests/static/dataplane.yaml b/deploy/manifests/static/dataplane.yaml new file mode 100644 index 00000000..4d59c0f9 --- /dev/null +++ b/deploy/manifests/static/dataplane.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: stunner.l7mp.io/v1 +kind: Dataplane +metadata: + name: default +spec: + replicas: 1 + image: l7mp/stunnerd:latest + imagePullPolicy: IfNotPresent + command: + - stunnerd + args: + - -w + - --udp-thread-num=16 + resources: + limits: + cpu: 2 + memory: 512Mi + requests: + cpu: 500m + memory: 128Mi + terminationGracePeriodSeconds: 3600 + hostNetwork: false diff --git a/deploy/manifests/static/gateway-api-crd.yaml b/deploy/manifests/static/gateway-api-crd.yaml index 60e9d1c6..9a8c9e04 100644 --- a/deploy/manifests/static/gateway-api-crd.yaml +++ b/deploy/manifests/static/gateway-api-crd.yaml @@ -3,8 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/1538 - gateway.networking.k8s.io/bundle-version: v0.6.2 + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 gateway.networking.k8s.io/channel: experimental creationTimestamp: null name: gatewayclasses.gateway.networking.k8s.io @@ -35,10 +35,7 @@ spec: name: Description priority: 1 type: string - # deprecated: true - # deprecationWarning: The v1alpha2 version of GatewayClass has been deprecated and - # will be removed in a future release of the API. Please upgrade to v1beta1. - name: v1alpha2 + name: v1 schema: openAPIV3Schema: description: "GatewayClass describes a class of Gateways available to the @@ -50,7 +47,7 @@ spec: to GatewayClass or associated parameters. If implementations choose to propagate GatewayClass changes to existing Gateways, that MUST be clearly documented by the implementation. \n Whenever one or more Gateways are using a GatewayClass, - implementations MUST add the `gateway-exists-finalizer.gateway.networking.k8s.io` + implementations SHOULD add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the associated GatewayClass. This ensures that a GatewayClass associated with a Gateway is not deleted while in use. \n GatewayClass is a Cluster level resource." @@ -79,6 +76,9 @@ spec: minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf description: description: Description helps describe a GatewayClass with more details. maxLength: 64 @@ -133,7 +133,9 @@ spec: reason: Waiting status: Unknown type: Accepted - description: Status defines the current state of GatewayClass. + description: "Status defines the current state of GatewayClass. \n Implementations + MUST populate status on all GatewayClass resources which specify their + controller name." properties: conditions: default: @@ -149,14 +151,12 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -218,6 +218,35 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + supportedFeatures: + description: 'SupportedFeatures is the set of features the GatewayClass + support. It MUST be sorted in ascending alphabetical order. ' + items: + description: SupportedFeature is used to describe distinct features + that are covered by conformance tests. + enum: + - Gateway + - GatewayPort8080 + - GatewayStaticAddresses + - HTTPRoute + - HTTPRouteDestinationPortMatching + - HTTPRouteHostRewrite + - HTTPRouteMethodMatching + - HTTPRoutePathRedirect + - HTTPRoutePathRewrite + - HTTPRoutePortRedirect + - HTTPRouteQueryParamMatching + - HTTPRouteRequestMirror + - HTTPRouteRequestMultipleMirrors + - HTTPRouteResponseHeaderModification + - HTTPRouteSchemeRedirect + - Mesh + - ReferenceGrant + - TLSRoute + type: string + maxItems: 64 + type: array + x-kubernetes-list-type: set type: object required: - spec @@ -252,7 +281,7 @@ spec: to GatewayClass or associated parameters. If implementations choose to propagate GatewayClass changes to existing Gateways, that MUST be clearly documented by the implementation. \n Whenever one or more Gateways are using a GatewayClass, - implementations MUST add the `gateway-exists-finalizer.gateway.networking.k8s.io` + implementations SHOULD add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the associated GatewayClass. This ensures that a GatewayClass associated with a Gateway is not deleted while in use. \n GatewayClass is a Cluster level resource." @@ -281,6 +310,9 @@ spec: minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf description: description: Description helps describe a GatewayClass with more details. maxLength: 64 @@ -335,7 +367,9 @@ spec: reason: Waiting status: Unknown type: Accepted - description: Status defines the current state of GatewayClass. + description: "Status defines the current state of GatewayClass. \n Implementations + MUST populate status on all GatewayClass resources which specify their + controller name." properties: conditions: default: @@ -351,14 +385,12 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -420,6 +452,35 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + supportedFeatures: + description: 'SupportedFeatures is the set of features the GatewayClass + support. It MUST be sorted in ascending alphabetical order. ' + items: + description: SupportedFeature is used to describe distinct features + that are covered by conformance tests. + enum: + - Gateway + - GatewayPort8080 + - GatewayStaticAddresses + - HTTPRoute + - HTTPRouteDestinationPortMatching + - HTTPRouteHostRewrite + - HTTPRouteMethodMatching + - HTTPRoutePathRedirect + - HTTPRoutePathRewrite + - HTTPRoutePortRedirect + - HTTPRouteQueryParamMatching + - HTTPRouteRequestMirror + - HTTPRouteRequestMultipleMirrors + - HTTPRouteResponseHeaderModification + - HTTPRouteSchemeRedirect + - Mesh + - ReferenceGrant + - TLSRoute + type: string + maxItems: 64 + type: array + x-kubernetes-list-type: set type: object required: - spec @@ -432,15 +493,15 @@ status: acceptedNames: kind: "" plural: "" - conditions: [] - storedVersions: [] + conditions: null + storedVersions: null --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/1538 - gateway.networking.k8s.io/bundle-version: v0.6.2 + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 gateway.networking.k8s.io/channel: experimental creationTimestamp: null name: gateways.gateway.networking.k8s.io @@ -470,10 +531,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - # deprecated: true - # deprecationWarning: The v1alpha2 version of Gateway has been deprecated and will - # be removed in a future release of the API. Please upgrade to v1beta1. - name: v1alpha2 + name: v1 schema: openAPIV3Schema: description: Gateway represents an instance of a service-traffic handling @@ -503,17 +561,29 @@ spec: for the address(es) on the \"outside of the Gateway\", that traffic bound for this Gateway will use. This could be the IP address or hostname of an external load balancer or other networking infrastructure, - or some other address that traffic will be sent to. \n The .listener.hostname - field is used to route traffic that has already arrived at the Gateway - to the correct in-cluster destination. \n If no Addresses are specified, - the implementation MAY schedule the Gateway in an implementation-specific - manner, assigning an appropriate set of Addresses. \n The implementation - MUST bind all Listeners to every GatewayAddress that it assigns - to the Gateway and add a corresponding entry in GatewayStatus.Addresses. - \n Support: Extended" + or some other address that traffic will be sent to. \n If no Addresses + are specified, the implementation MAY schedule the Gateway in an + implementation-specific manner, assigning an appropriate set of + Addresses. \n The implementation MUST bind all Listeners to every + GatewayAddress that it assigns to the Gateway and add a corresponding + entry in GatewayStatus.Addresses. \n Support: Extended \n " items: description: GatewayAddress describes an address that can be bound to a Gateway. + oneOf: + - properties: + type: + enum: + - IPAddress + value: + anyOf: + - format: ipv4 + - format: ipv6 + - properties: + type: + not: + enum: + - IPAddress properties: type: default: IPAddress @@ -532,40 +602,154 @@ spec: required: - value type: object + x-kubernetes-validations: + - message: Hostname value must only contain valid characters (matching + ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$) + rule: 'self.type == ''Hostname'' ? self.value.matches(r"""^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"""): + true' maxItems: 16 type: array + x-kubernetes-validations: + - message: IPAddress values must be unique + rule: 'self.all(a1, a1.type == ''IPAddress'' ? self.exists_one(a2, + a2.type == a1.type && a2.value == a1.value) : true )' + - message: Hostname values must be unique + rule: 'self.all(a1, a1.type == ''Hostname'' ? self.exists_one(a2, + a2.type == a1.type && a2.value == a1.value) : true )' gatewayClassName: description: GatewayClassName used for this Gateway. This is the name of a GatewayClass resource. maxLength: 253 minLength: 1 type: string + infrastructure: + description: "Infrastructure defines infrastructure level attributes + about this Gateway instance. \n Support: Core \n " + properties: + annotations: + additionalProperties: + description: AnnotationValue is the value of an annotation in + Gateway API. This is used for validation of maps such as TLS + options. This roughly matches Kubernetes annotation validation, + although the length validation in that case is based on the + entire size of the annotations struct. + maxLength: 4096 + minLength: 0 + type: string + description: "Annotations that SHOULD be applied to any resources + created in response to this Gateway. \n For implementations + creating other Kubernetes objects, this should be the `metadata.annotations` + field on resources. For other implementations, this refers to + any relevant (implementation specific) \"annotations\" concepts. + \n An implementation may chose to add additional implementation-specific + annotations as they see fit. \n Support: Extended" + maxProperties: 8 + type: object + labels: + additionalProperties: + description: AnnotationValue is the value of an annotation in + Gateway API. This is used for validation of maps such as TLS + options. This roughly matches Kubernetes annotation validation, + although the length validation in that case is based on the + entire size of the annotations struct. + maxLength: 4096 + minLength: 0 + type: string + description: "Labels that SHOULD be applied to any resources created + in response to this Gateway. \n For implementations creating + other Kubernetes objects, this should be the `metadata.labels` + field on resources. For other implementations, this refers to + any relevant (implementation specific) \"labels\" concepts. + \n An implementation may chose to add additional implementation-specific + labels as they see fit. \n Support: Extended" + maxProperties: 8 + type: object + type: object listeners: description: "Listeners associated with this Gateway. Listeners define logical endpoints that are bound on this Gateway's addresses. At - least one Listener MUST be specified. \n Each listener in a Gateway - must have a unique combination of Hostname, Port, and Protocol. - \n An implementation MAY group Listeners by Port and then collapse - each group of Listeners into a single Listener if the implementation - determines that the Listeners in the group are \"compatible\". An - implementation MAY also group together and collapse compatible Listeners - belonging to different Gateways. \n For example, an implementation - might consider Listeners to be compatible with each other if all - of the following conditions are met: \n 1. Either each Listener - within the group specifies the \"HTTP\" Protocol or each Listener - within the group specifies either the \"HTTPS\" or \"TLS\" Protocol. - \n 2. Each Listener within the group specifies a Hostname that is - unique within the group. \n 3. As a special case, one Listener - within a group may omit Hostname, in which case this Listener - matches when no other Listener matches. \n If the implementation - does collapse compatible Listeners, the hostname provided in the - incoming client request MUST be matched to a Listener to find the - correct set of Routes. The incoming hostname MUST be matched using - the Hostname field for each Listener in order of most to least specific. - That is, exact matches must be processed before wildcard matches. - \n If this field specifies multiple Listeners that have the same - Port value but are not compatible, the implementation must raise - a \"Conflicted\" condition in the Listener status. \n Support: Core" + least one Listener MUST be specified. \n Each Listener in a set + of Listeners (for example, in a single Gateway) MUST be _distinct_, + in that a traffic flow MUST be able to be assigned to exactly one + listener. (This section uses \"set of Listeners\" rather than \"Listeners + in a single Gateway\" because implementations MAY merge configuration + from multiple Gateways onto a single data plane, and these rules + _also_ apply in that case). \n Practically, this means that each + listener in a set MUST have a unique combination of Port, Protocol, + and, if supported by the protocol, Hostname. \n Some combinations + of port, protocol, and TLS settings are considered Core support + and MUST be supported by implementations based on their targeted + conformance profile: \n HTTP Profile \n 1. HTTPRoute, Port: 80, + Protocol: HTTP 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: + Terminate, TLS keypair provided \n TLS Profile \n 1. TLSRoute, Port: + 443, Protocol: TLS, TLS Mode: Passthrough \n \"Distinct\" Listeners + have the following property: \n The implementation can match inbound + requests to a single distinct Listener. When multiple Listeners + share values for fields (for example, two Listeners with the same + Port value), the implementation can match requests to only one of + the Listeners using other Listener fields. \n For example, the following + Listener scenarios are distinct: \n 1. Multiple Listeners with the + same Port that all use the \"HTTP\" Protocol that all have unique + Hostname values. 2. Multiple Listeners with the same Port that use + either the \"HTTPS\" or \"TLS\" Protocol that all have unique Hostname + values. 3. A mixture of \"TCP\" and \"UDP\" Protocol Listeners, + where no Listener with the same Protocol has the same Port value. + \n Some fields in the Listener struct have possible values that + affect whether the Listener is distinct. Hostname is particularly + relevant for HTTP or HTTPS protocols. \n When using the Hostname + value to select between same-Port, same-Protocol Listeners, the + Hostname value must be different on each Listener for the Listener + to be distinct. \n When the Listeners are distinct based on Hostname, + inbound request hostnames MUST match from the most specific to least + specific Hostname values to choose the correct Listener and its + associated set of Routes. \n Exact matches must be processed before + wildcard matches, and wildcard matches must be processed before + fallback (empty Hostname value) matches. For example, `\"foo.example.com\"` + takes precedence over `\"*.example.com\"`, and `\"*.example.com\"` + takes precedence over `\"\"`. \n Additionally, if there are multiple + wildcard entries, more specific wildcard entries must be processed + before less specific wildcard entries. For example, `\"*.foo.example.com\"` + takes precedence over `\"*.example.com\"`. The precise definition + here is that the higher the number of dots in the hostname to the + right of the wildcard character, the higher the precedence. \n The + wildcard character will match any number of characters _and dots_ + to the left, however, so `\"*.example.com\"` will match both `\"foo.bar.example.com\"` + _and_ `\"bar.example.com\"`. \n If a set of Listeners contains Listeners + that are not distinct, then those Listeners are Conflicted, and + the implementation MUST set the \"Conflicted\" condition in the + Listener Status to \"True\". \n Implementations MAY choose to accept + a Gateway with some Conflicted Listeners only if they only accept + the partial Listener set that contains no Conflicted Listeners. + To put this another way, implementations may accept a partial Listener + set only if they throw out *all* the conflicting Listeners. No picking + one of the conflicting listeners as the winner. This also means + that the Gateway must have at least one non-conflicting Listener + in this case, otherwise it violates the requirement that at least + one Listener must be present. \n The implementation MUST set a \"ListenersNotValid\" + condition on the Gateway Status when the Gateway contains Conflicted + Listeners whether or not they accept the Gateway. That Condition + SHOULD clearly indicate in the Message which Listeners are conflicted, + and which are Accepted. Additionally, the Listener status for those + listeners SHOULD indicate which Listeners are conflicted and not + Accepted. \n A Gateway's Listeners are considered \"compatible\" + if: \n 1. They are distinct. 2. The implementation can serve them + in compliance with the Addresses requirement that all Listeners + are available on all assigned addresses. \n Compatible combinations + in Extended support are expected to vary across implementations. + A combination that is compatible for one implementation may not + be compatible for another. \n For example, an implementation that + cannot serve both TCP and UDP listeners on the same address, or + cannot mix HTTPS and generic TLS listens on the same port would + not consider those cases compatible, even though they are distinct. + \n Note that requests SHOULD match at most one Listener. For example, + if Listeners are defined for \"foo.example.com\" and \"*.example.com\", + a request to \"foo.example.com\" SHOULD only be routed using routes + attached to the \"foo.example.com\" Listener (and not the \"*.example.com\" + Listener). This concept is known as \"Listener Isolation\". Implementations + that do not support Listener Isolation MUST clearly document this. + \n Implementations MAY merge separate Gateways onto a single set + of Addresses if all Listeners across all Gateways are compatible. + \n Support: Core" items: description: Listener embodies the concept of a logical endpoint where a Gateway accepts network connections. @@ -582,19 +766,18 @@ spec: determined in order of the following criteria: \n * The most specific match as defined by the Route type. * The oldest Route based on creation timestamp. For example, a Route with - \ a creation timestamp of \"2020-09-08 01:02:03\" is given - precedence over a Route with a creation timestamp of \"2020-09-08 - 01:02:04\". * If everything else is equivalent, the Route - appearing first in alphabetical order (namespace/name) should - be given precedence. For example, foo/bar is given precedence - over foo/baz. \n All valid rules within a Route attached to - this Listener should be implemented. Invalid Route rules can - be ignored (sometimes that will mean the full Route). If a - Route rule transitions from valid to invalid, support for - that Route rule should be dropped to ensure consistency. For - example, even if a filter specified by a Route rule is invalid, - the rest of the rules within that Route should still be supported. - \n Support: Core" + a creation timestamp of \"2020-09-08 01:02:03\" is given precedence + over a Route with a creation timestamp of \"2020-09-08 01:02:04\". + * If everything else is equivalent, the Route appearing first + in alphabetical order (namespace/name) should be given precedence. + For example, foo/bar is given precedence over foo/baz. \n + All valid rules within a Route attached to this Listener should + be implemented. Invalid Route rules can be ignored (sometimes + that will mean the full Route). If a Route rule transitions + from valid to invalid, support for that Route rule should + be dropped to ensure consistency. For example, even if a filter + specified by a Route rule is invalid, the rest of the rules + within that Route should still be supported. \n Support: Core" properties: kinds: description: "Kinds specifies the groups and kinds of Routes @@ -639,12 +822,12 @@ spec: from: default: Same description: "From indicates where Routes will be selected - for this Gateway. Possible values are: * All: Routes + for this Gateway. Possible values are: \n * All: Routes in all namespaces may be used by this Gateway. * Selector: Routes in namespaces selected by the selector may - be used by this Gateway. * Same: Only Routes in - the same namespace may be used by this Gateway. \n - Support: Core" + be used by this Gateway. * Same: Only Routes in the + same namespace may be used by this Gateway. \n Support: + Core" enum: - All - Selector @@ -700,6 +883,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic type: object type: object hostname: @@ -711,19 +895,18 @@ spec: following protocols: \n * TLS: The Listener Hostname MUST match the SNI. * HTTP: The Listener Hostname MUST match the Host header of the request. * HTTPS: The Listener Hostname - SHOULD match at both the TLS and HTTP protocol layers as - described above. If an implementation does not ensure that - both the SNI and Host header match the Listener hostname, - \ it MUST clearly document that. \n For HTTPRoute and TLSRoute - resources, there is an interaction with the `spec.hostnames` - array. When both listener and route specify hostnames, there - MUST be an intersection between the values for a Route to - be accepted. For more information, refer to the Route specific - Hostnames documentation. \n Hostnames that are prefixed with - a wildcard label (`*.`) are interpreted as a suffix match. - That means that a match for `*.example.com` would match both - `test.example.com`, and `foo.test.example.com`, but not `example.com`. - \n Support: Core" + SHOULD match at both the TLS and HTTP protocol layers as described + above. If an implementation does not ensure that both the + SNI and Host header match the Listener hostname, it MUST clearly + document that. \n For HTTPRoute and TLSRoute resources, there + is an interaction with the `spec.hostnames` array. When both + listener and route specify hostnames, there MUST be an intersection + between the values for a Route to be accepted. For more information, + refer to the Route specific Hostnames documentation. \n Hostnames + that are prefixed with a wildcard label (`*.`) are interpreted + as a suffix match. That means that a match for `*.example.com` + would match both `test.example.com`, and `foo.test.example.com`, + but not `example.com`. \n Support: Core" maxLength: 253 minLength: 1 pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ @@ -804,7 +987,7 @@ spec: kind: default: Secret description: Kind is kind of the referent. For example - "HTTPRoute" or "Service". + "Secret". maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -815,9 +998,10 @@ spec: minLength: 1 type: string namespace: - description: "Namespace is the namespace of the backend. - When unspecified, the local namespace is inferred. - \n Note that when a namespace is specified, a ReferenceGrant + description: "Namespace is the namespace of the referenced + object. When unspecified, the local namespace is + inferred. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. @@ -836,13 +1020,13 @@ spec: description: "Mode defines the TLS behavior for the TLS session initiated by the client. There are two possible modes: \n - Terminate: The TLS session between the downstream - client and the Gateway is terminated at the Gateway. - This mode requires certificateRefs to be set and contain - at least one element. - Passthrough: The TLS session is - NOT terminated by the Gateway. This implies that the - Gateway can't decipher the TLS stream except for the - ClientHello message of the TLS protocol. CertificateRefs - field is ignored in this mode. \n Support: Core" + client and the Gateway is terminated at the Gateway. This + mode requires certificateRefs to be set and contain at + least one element. - Passthrough: The TLS session is NOT + terminated by the Gateway. This implies that the Gateway + can't decipher the TLS stream except for the ClientHello + message of the TLS protocol. CertificateRefs field is + ignored in this mode. \n Support: Core" enum: - Terminate - Passthrough @@ -869,6 +1053,11 @@ spec: maxProperties: 16 type: object type: object + x-kubernetes-validations: + - message: certificateRefs must be specified when TLSModeType + is Terminate + rule: 'self.mode == ''Terminate'' ? size(self.certificateRefs) + > 0 : true' required: - name - port @@ -880,6 +1069,24 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + x-kubernetes-validations: + - message: tls must be specified for protocols ['HTTPS', 'TLS'] + rule: 'self.all(l, l.protocol in [''HTTPS'', ''TLS''] ? has(l.tls) + : true)' + - message: tls must not be specified for protocols ['HTTP', 'TCP', + 'UDP'] + rule: 'self.all(l, l.protocol in [''HTTP'', ''TCP'', ''UDP''] ? + !has(l.tls) : true)' + - message: hostname must not be specified for protocols ['TCP', 'UDP'] + rule: 'self.all(l, l.protocol in [''TCP'', ''UDP''] ? (!has(l.hostname) + || l.hostname == '''') : true)' + - message: Listener name must be unique within the Gateway + rule: self.all(l1, self.exists_one(l2, l1.name == l2.name)) + - message: Combination of port, protocol and hostname must be unique + for each listener + rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol + == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -889,19 +1096,40 @@ spec: conditions: - lastTransitionTime: "1970-01-01T00:00:00Z" message: Waiting for controller - reason: NotReconciled + reason: Pending status: Unknown type: Accepted + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: Waiting for controller + reason: Pending + status: Unknown + type: Programmed description: Status defines the current state of Gateway. properties: addresses: - description: Addresses lists the IP addresses that have actually been - bound to the Gateway. These addresses may differ from the addresses - in the Spec, e.g. if the Gateway automatically assigns an address - from a reserved pool. + description: "Addresses lists the network addresses that have been + bound to the Gateway. \n This list may differ from the addresses + provided in the spec under some conditions: \n * no addresses are + specified, all addresses are dynamically assigned * a combination + of specified and dynamic addresses are assigned * a specified address + was unusable (e.g. already in use) \n " items: - description: GatewayAddress describes an address that can be bound - to a Gateway. + description: GatewayStatusAddress describes a network address that + is bound to a Gateway. + oneOf: + - properties: + type: + enum: + - IPAddress + value: + anyOf: + - format: ipv4 + - format: ipv6 + - properties: + type: + not: + enum: + - IPAddress properties: type: default: IPAddress @@ -920,6 +1148,11 @@ spec: required: - value type: object + x-kubernetes-validations: + - message: Hostname value must only contain valid characters (matching + ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$) + rule: 'self.type == ''Hostname'' ? self.value.matches(r"""^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"""): + true' maxItems: 16 type: array conditions: @@ -939,19 +1172,17 @@ spec: the `GatewayConditionType` and `GatewayConditionReason` constants so that operators and tools can converge on a common vocabulary to describe Gateway state. \n Known condition types are: \n * \"Accepted\" - * \"Ready\"" + * \"Programmed\" * \"Ready\"" items: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -1020,8 +1251,23 @@ spec: description: ListenerStatus is the status associated with a Listener. properties: attachedRoutes: - description: AttachedRoutes represents the total number of Routes - that have been successfully attached to this Listener. + description: "AttachedRoutes represents the total number of + Routes that have been successfully attached to this Listener. + \n Successful attachment of a Route to a Listener is based + solely on the combination of the AllowedRoutes field on the + corresponding Listener and the Route's ParentRefs field. A + Route is successfully attached to a Listener when it is selected + by the Listener's AllowedRoutes field AND the Route has a + valid ParentRef selecting the whole Gateway resource or a + specific Listener as a parent resource (more detail on attachment + semantics can be found in the documentation on the various + Route kinds ParentRefs fields). Listener or Route status does + not impact successful attachment, i.e. the AttachedRoutes + field count MUST be set for Listeners with condition Accepted: + false and MUST count successfully attached Routes that may + themselves have Accepted: false conditions. \n Uses for this + field include troubleshooting Route attachment and measuring + blast radius/impact of changes to a Listener." format: int32 type: integer conditions: @@ -1031,15 +1277,14 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path - .status.conditions. For example, \n \ttype FooStatus struct{ - \t // Represents the observations of a foo's current - state. \t // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type - \t // +patchStrategy=merge \t // +listType=map \t - \ // +listMapKey=type \t Conditions []metav1.Condition + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other - fields \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -1204,17 +1449,29 @@ spec: for the address(es) on the \"outside of the Gateway\", that traffic bound for this Gateway will use. This could be the IP address or hostname of an external load balancer or other networking infrastructure, - or some other address that traffic will be sent to. \n The .listener.hostname - field is used to route traffic that has already arrived at the Gateway - to the correct in-cluster destination. \n If no Addresses are specified, - the implementation MAY schedule the Gateway in an implementation-specific - manner, assigning an appropriate set of Addresses. \n The implementation - MUST bind all Listeners to every GatewayAddress that it assigns - to the Gateway and add a corresponding entry in GatewayStatus.Addresses. - \n Support: Extended" + or some other address that traffic will be sent to. \n If no Addresses + are specified, the implementation MAY schedule the Gateway in an + implementation-specific manner, assigning an appropriate set of + Addresses. \n The implementation MUST bind all Listeners to every + GatewayAddress that it assigns to the Gateway and add a corresponding + entry in GatewayStatus.Addresses. \n Support: Extended \n " items: description: GatewayAddress describes an address that can be bound to a Gateway. + oneOf: + - properties: + type: + enum: + - IPAddress + value: + anyOf: + - format: ipv4 + - format: ipv6 + - properties: + type: + not: + enum: + - IPAddress properties: type: default: IPAddress @@ -1233,40 +1490,154 @@ spec: required: - value type: object + x-kubernetes-validations: + - message: Hostname value must only contain valid characters (matching + ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$) + rule: 'self.type == ''Hostname'' ? self.value.matches(r"""^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"""): + true' maxItems: 16 type: array + x-kubernetes-validations: + - message: IPAddress values must be unique + rule: 'self.all(a1, a1.type == ''IPAddress'' ? self.exists_one(a2, + a2.type == a1.type && a2.value == a1.value) : true )' + - message: Hostname values must be unique + rule: 'self.all(a1, a1.type == ''Hostname'' ? self.exists_one(a2, + a2.type == a1.type && a2.value == a1.value) : true )' gatewayClassName: description: GatewayClassName used for this Gateway. This is the name of a GatewayClass resource. maxLength: 253 minLength: 1 type: string + infrastructure: + description: "Infrastructure defines infrastructure level attributes + about this Gateway instance. \n Support: Core \n " + properties: + annotations: + additionalProperties: + description: AnnotationValue is the value of an annotation in + Gateway API. This is used for validation of maps such as TLS + options. This roughly matches Kubernetes annotation validation, + although the length validation in that case is based on the + entire size of the annotations struct. + maxLength: 4096 + minLength: 0 + type: string + description: "Annotations that SHOULD be applied to any resources + created in response to this Gateway. \n For implementations + creating other Kubernetes objects, this should be the `metadata.annotations` + field on resources. For other implementations, this refers to + any relevant (implementation specific) \"annotations\" concepts. + \n An implementation may chose to add additional implementation-specific + annotations as they see fit. \n Support: Extended" + maxProperties: 8 + type: object + labels: + additionalProperties: + description: AnnotationValue is the value of an annotation in + Gateway API. This is used for validation of maps such as TLS + options. This roughly matches Kubernetes annotation validation, + although the length validation in that case is based on the + entire size of the annotations struct. + maxLength: 4096 + minLength: 0 + type: string + description: "Labels that SHOULD be applied to any resources created + in response to this Gateway. \n For implementations creating + other Kubernetes objects, this should be the `metadata.labels` + field on resources. For other implementations, this refers to + any relevant (implementation specific) \"labels\" concepts. + \n An implementation may chose to add additional implementation-specific + labels as they see fit. \n Support: Extended" + maxProperties: 8 + type: object + type: object listeners: description: "Listeners associated with this Gateway. Listeners define logical endpoints that are bound on this Gateway's addresses. At - least one Listener MUST be specified. \n Each listener in a Gateway - must have a unique combination of Hostname, Port, and Protocol. - \n An implementation MAY group Listeners by Port and then collapse - each group of Listeners into a single Listener if the implementation - determines that the Listeners in the group are \"compatible\". An - implementation MAY also group together and collapse compatible Listeners - belonging to different Gateways. \n For example, an implementation - might consider Listeners to be compatible with each other if all - of the following conditions are met: \n 1. Either each Listener - within the group specifies the \"HTTP\" Protocol or each Listener - within the group specifies either the \"HTTPS\" or \"TLS\" Protocol. - \n 2. Each Listener within the group specifies a Hostname that is - unique within the group. \n 3. As a special case, one Listener - within a group may omit Hostname, in which case this Listener - matches when no other Listener matches. \n If the implementation - does collapse compatible Listeners, the hostname provided in the - incoming client request MUST be matched to a Listener to find the - correct set of Routes. The incoming hostname MUST be matched using - the Hostname field for each Listener in order of most to least specific. - That is, exact matches must be processed before wildcard matches. - \n If this field specifies multiple Listeners that have the same - Port value but are not compatible, the implementation must raise - a \"Conflicted\" condition in the Listener status. \n Support: Core" + least one Listener MUST be specified. \n Each Listener in a set + of Listeners (for example, in a single Gateway) MUST be _distinct_, + in that a traffic flow MUST be able to be assigned to exactly one + listener. (This section uses \"set of Listeners\" rather than \"Listeners + in a single Gateway\" because implementations MAY merge configuration + from multiple Gateways onto a single data plane, and these rules + _also_ apply in that case). \n Practically, this means that each + listener in a set MUST have a unique combination of Port, Protocol, + and, if supported by the protocol, Hostname. \n Some combinations + of port, protocol, and TLS settings are considered Core support + and MUST be supported by implementations based on their targeted + conformance profile: \n HTTP Profile \n 1. HTTPRoute, Port: 80, + Protocol: HTTP 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: + Terminate, TLS keypair provided \n TLS Profile \n 1. TLSRoute, Port: + 443, Protocol: TLS, TLS Mode: Passthrough \n \"Distinct\" Listeners + have the following property: \n The implementation can match inbound + requests to a single distinct Listener. When multiple Listeners + share values for fields (for example, two Listeners with the same + Port value), the implementation can match requests to only one of + the Listeners using other Listener fields. \n For example, the following + Listener scenarios are distinct: \n 1. Multiple Listeners with the + same Port that all use the \"HTTP\" Protocol that all have unique + Hostname values. 2. Multiple Listeners with the same Port that use + either the \"HTTPS\" or \"TLS\" Protocol that all have unique Hostname + values. 3. A mixture of \"TCP\" and \"UDP\" Protocol Listeners, + where no Listener with the same Protocol has the same Port value. + \n Some fields in the Listener struct have possible values that + affect whether the Listener is distinct. Hostname is particularly + relevant for HTTP or HTTPS protocols. \n When using the Hostname + value to select between same-Port, same-Protocol Listeners, the + Hostname value must be different on each Listener for the Listener + to be distinct. \n When the Listeners are distinct based on Hostname, + inbound request hostnames MUST match from the most specific to least + specific Hostname values to choose the correct Listener and its + associated set of Routes. \n Exact matches must be processed before + wildcard matches, and wildcard matches must be processed before + fallback (empty Hostname value) matches. For example, `\"foo.example.com\"` + takes precedence over `\"*.example.com\"`, and `\"*.example.com\"` + takes precedence over `\"\"`. \n Additionally, if there are multiple + wildcard entries, more specific wildcard entries must be processed + before less specific wildcard entries. For example, `\"*.foo.example.com\"` + takes precedence over `\"*.example.com\"`. The precise definition + here is that the higher the number of dots in the hostname to the + right of the wildcard character, the higher the precedence. \n The + wildcard character will match any number of characters _and dots_ + to the left, however, so `\"*.example.com\"` will match both `\"foo.bar.example.com\"` + _and_ `\"bar.example.com\"`. \n If a set of Listeners contains Listeners + that are not distinct, then those Listeners are Conflicted, and + the implementation MUST set the \"Conflicted\" condition in the + Listener Status to \"True\". \n Implementations MAY choose to accept + a Gateway with some Conflicted Listeners only if they only accept + the partial Listener set that contains no Conflicted Listeners. + To put this another way, implementations may accept a partial Listener + set only if they throw out *all* the conflicting Listeners. No picking + one of the conflicting listeners as the winner. This also means + that the Gateway must have at least one non-conflicting Listener + in this case, otherwise it violates the requirement that at least + one Listener must be present. \n The implementation MUST set a \"ListenersNotValid\" + condition on the Gateway Status when the Gateway contains Conflicted + Listeners whether or not they accept the Gateway. That Condition + SHOULD clearly indicate in the Message which Listeners are conflicted, + and which are Accepted. Additionally, the Listener status for those + listeners SHOULD indicate which Listeners are conflicted and not + Accepted. \n A Gateway's Listeners are considered \"compatible\" + if: \n 1. They are distinct. 2. The implementation can serve them + in compliance with the Addresses requirement that all Listeners + are available on all assigned addresses. \n Compatible combinations + in Extended support are expected to vary across implementations. + A combination that is compatible for one implementation may not + be compatible for another. \n For example, an implementation that + cannot serve both TCP and UDP listeners on the same address, or + cannot mix HTTPS and generic TLS listens on the same port would + not consider those cases compatible, even though they are distinct. + \n Note that requests SHOULD match at most one Listener. For example, + if Listeners are defined for \"foo.example.com\" and \"*.example.com\", + a request to \"foo.example.com\" SHOULD only be routed using routes + attached to the \"foo.example.com\" Listener (and not the \"*.example.com\" + Listener). This concept is known as \"Listener Isolation\". Implementations + that do not support Listener Isolation MUST clearly document this. + \n Implementations MAY merge separate Gateways onto a single set + of Addresses if all Listeners across all Gateways are compatible. + \n Support: Core" items: description: Listener embodies the concept of a logical endpoint where a Gateway accepts network connections. @@ -1283,19 +1654,18 @@ spec: determined in order of the following criteria: \n * The most specific match as defined by the Route type. * The oldest Route based on creation timestamp. For example, a Route with - \ a creation timestamp of \"2020-09-08 01:02:03\" is given - precedence over a Route with a creation timestamp of \"2020-09-08 - 01:02:04\". * If everything else is equivalent, the Route - appearing first in alphabetical order (namespace/name) should - be given precedence. For example, foo/bar is given precedence - over foo/baz. \n All valid rules within a Route attached to - this Listener should be implemented. Invalid Route rules can - be ignored (sometimes that will mean the full Route). If a - Route rule transitions from valid to invalid, support for - that Route rule should be dropped to ensure consistency. For - example, even if a filter specified by a Route rule is invalid, - the rest of the rules within that Route should still be supported. - \n Support: Core" + a creation timestamp of \"2020-09-08 01:02:03\" is given precedence + over a Route with a creation timestamp of \"2020-09-08 01:02:04\". + * If everything else is equivalent, the Route appearing first + in alphabetical order (namespace/name) should be given precedence. + For example, foo/bar is given precedence over foo/baz. \n + All valid rules within a Route attached to this Listener should + be implemented. Invalid Route rules can be ignored (sometimes + that will mean the full Route). If a Route rule transitions + from valid to invalid, support for that Route rule should + be dropped to ensure consistency. For example, even if a filter + specified by a Route rule is invalid, the rest of the rules + within that Route should still be supported. \n Support: Core" properties: kinds: description: "Kinds specifies the groups and kinds of Routes @@ -1340,12 +1710,12 @@ spec: from: default: Same description: "From indicates where Routes will be selected - for this Gateway. Possible values are: * All: Routes + for this Gateway. Possible values are: \n * All: Routes in all namespaces may be used by this Gateway. * Selector: Routes in namespaces selected by the selector may - be used by this Gateway. * Same: Only Routes in - the same namespace may be used by this Gateway. \n - Support: Core" + be used by this Gateway. * Same: Only Routes in the + same namespace may be used by this Gateway. \n Support: + Core" enum: - All - Selector @@ -1401,6 +1771,7 @@ spec: requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic type: object type: object hostname: @@ -1412,19 +1783,18 @@ spec: following protocols: \n * TLS: The Listener Hostname MUST match the SNI. * HTTP: The Listener Hostname MUST match the Host header of the request. * HTTPS: The Listener Hostname - SHOULD match at both the TLS and HTTP protocol layers as - described above. If an implementation does not ensure that - both the SNI and Host header match the Listener hostname, - \ it MUST clearly document that. \n For HTTPRoute and TLSRoute - resources, there is an interaction with the `spec.hostnames` - array. When both listener and route specify hostnames, there - MUST be an intersection between the values for a Route to - be accepted. For more information, refer to the Route specific - Hostnames documentation. \n Hostnames that are prefixed with - a wildcard label (`*.`) are interpreted as a suffix match. - That means that a match for `*.example.com` would match both - `test.example.com`, and `foo.test.example.com`, but not `example.com`. - \n Support: Core" + SHOULD match at both the TLS and HTTP protocol layers as described + above. If an implementation does not ensure that both the + SNI and Host header match the Listener hostname, it MUST clearly + document that. \n For HTTPRoute and TLSRoute resources, there + is an interaction with the `spec.hostnames` array. When both + listener and route specify hostnames, there MUST be an intersection + between the values for a Route to be accepted. For more information, + refer to the Route specific Hostnames documentation. \n Hostnames + that are prefixed with a wildcard label (`*.`) are interpreted + as a suffix match. That means that a match for `*.example.com` + would match both `test.example.com`, and `foo.test.example.com`, + but not `example.com`. \n Support: Core" maxLength: 253 minLength: 1 pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ @@ -1505,7 +1875,7 @@ spec: kind: default: Secret description: Kind is kind of the referent. For example - "HTTPRoute" or "Service". + "Secret". maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -1516,9 +1886,10 @@ spec: minLength: 1 type: string namespace: - description: "Namespace is the namespace of the backend. - When unspecified, the local namespace is inferred. - \n Note that when a namespace is specified, a ReferenceGrant + description: "Namespace is the namespace of the referenced + object. When unspecified, the local namespace is + inferred. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. @@ -1537,13 +1908,13 @@ spec: description: "Mode defines the TLS behavior for the TLS session initiated by the client. There are two possible modes: \n - Terminate: The TLS session between the downstream - client and the Gateway is terminated at the Gateway. - This mode requires certificateRefs to be set and contain - at least one element. - Passthrough: The TLS session is - NOT terminated by the Gateway. This implies that the - Gateway can't decipher the TLS stream except for the - ClientHello message of the TLS protocol. CertificateRefs - field is ignored in this mode. \n Support: Core" + client and the Gateway is terminated at the Gateway. This + mode requires certificateRefs to be set and contain at + least one element. - Passthrough: The TLS session is NOT + terminated by the Gateway. This implies that the Gateway + can't decipher the TLS stream except for the ClientHello + message of the TLS protocol. CertificateRefs field is + ignored in this mode. \n Support: Core" enum: - Terminate - Passthrough @@ -1570,6 +1941,11 @@ spec: maxProperties: 16 type: object type: object + x-kubernetes-validations: + - message: certificateRefs must be specified when TLSModeType + is Terminate + rule: 'self.mode == ''Terminate'' ? size(self.certificateRefs) + > 0 : true' required: - name - port @@ -1581,6 +1957,24 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + x-kubernetes-validations: + - message: tls must be specified for protocols ['HTTPS', 'TLS'] + rule: 'self.all(l, l.protocol in [''HTTPS'', ''TLS''] ? has(l.tls) + : true)' + - message: tls must not be specified for protocols ['HTTP', 'TCP', + 'UDP'] + rule: 'self.all(l, l.protocol in [''HTTP'', ''TCP'', ''UDP''] ? + !has(l.tls) : true)' + - message: hostname must not be specified for protocols ['TCP', 'UDP'] + rule: 'self.all(l, l.protocol in [''TCP'', ''UDP''] ? (!has(l.hostname) + || l.hostname == '''') : true)' + - message: Listener name must be unique within the Gateway + rule: self.all(l1, self.exists_one(l2, l1.name == l2.name)) + - message: Combination of port, protocol and hostname must be unique + for each listener + rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol + == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -1590,19 +1984,40 @@ spec: conditions: - lastTransitionTime: "1970-01-01T00:00:00Z" message: Waiting for controller - reason: NotReconciled + reason: Pending status: Unknown type: Accepted + - lastTransitionTime: "1970-01-01T00:00:00Z" + message: Waiting for controller + reason: Pending + status: Unknown + type: Programmed description: Status defines the current state of Gateway. properties: addresses: - description: Addresses lists the IP addresses that have actually been - bound to the Gateway. These addresses may differ from the addresses - in the Spec, e.g. if the Gateway automatically assigns an address - from a reserved pool. + description: "Addresses lists the network addresses that have been + bound to the Gateway. \n This list may differ from the addresses + provided in the spec under some conditions: \n * no addresses are + specified, all addresses are dynamically assigned * a combination + of specified and dynamic addresses are assigned * a specified address + was unusable (e.g. already in use) \n " items: - description: GatewayAddress describes an address that can be bound - to a Gateway. + description: GatewayStatusAddress describes a network address that + is bound to a Gateway. + oneOf: + - properties: + type: + enum: + - IPAddress + value: + anyOf: + - format: ipv4 + - format: ipv6 + - properties: + type: + not: + enum: + - IPAddress properties: type: default: IPAddress @@ -1621,6 +2036,11 @@ spec: required: - value type: object + x-kubernetes-validations: + - message: Hostname value must only contain valid characters (matching + ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$) + rule: 'self.type == ''Hostname'' ? self.value.matches(r"""^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"""): + true' maxItems: 16 type: array conditions: @@ -1640,19 +2060,17 @@ spec: the `GatewayConditionType` and `GatewayConditionReason` constants so that operators and tools can converge on a common vocabulary to describe Gateway state. \n Known condition types are: \n * \"Accepted\" - * \"Ready\"" + * \"Programmed\" * \"Ready\"" items: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -1721,8 +2139,23 @@ spec: description: ListenerStatus is the status associated with a Listener. properties: attachedRoutes: - description: AttachedRoutes represents the total number of Routes - that have been successfully attached to this Listener. + description: "AttachedRoutes represents the total number of + Routes that have been successfully attached to this Listener. + \n Successful attachment of a Route to a Listener is based + solely on the combination of the AllowedRoutes field on the + corresponding Listener and the Route's ParentRefs field. A + Route is successfully attached to a Listener when it is selected + by the Listener's AllowedRoutes field AND the Route has a + valid ParentRef selecting the whole Gateway resource or a + specific Listener as a parent resource (more detail on attachment + semantics can be found in the documentation on the various + Route kinds ParentRefs fields). Listener or Route status does + not impact successful attachment, i.e. the AttachedRoutes + field count MUST be set for Listeners with condition Accepted: + false and MUST count successfully attached Routes that may + themselves have Accepted: false conditions. \n Uses for this + field include troubleshooting Route attachment and measuring + blast radius/impact of changes to a Listener." format: int32 type: integer conditions: @@ -1732,15 +2165,14 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path - .status.conditions. For example, \n \ttype FooStatus struct{ - \t // Represents the observations of a foo's current - state. \t // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type - \t // +patchStrategy=merge \t // +listType=map \t - \ // +listMapKey=type \t Conditions []metav1.Condition + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other - fields \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -1866,39 +2298,60 @@ status: acceptedNames: kind: "" plural: "" - conditions: [] - storedVersions: [] + conditions: null + storedVersions: null --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/1538 - gateway.networking.k8s.io/bundle-version: v0.6.2 + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 gateway.networking.k8s.io/channel: experimental creationTimestamp: null - name: udproutes.gateway.networking.k8s.io + name: grpcroutes.gateway.networking.k8s.io spec: group: gateway.networking.k8s.io names: categories: - gateway-api - kind: UDPRoute - listKind: UDPRouteList - plural: udproutes - singular: udproute + kind: GRPCRoute + listKind: GRPCRouteList + plural: grpcroutes + singular: grpcroute scope: Namespaced versions: - additionalPrinterColumns: + - jsonPath: .spec.hostnames + name: Hostnames + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date name: v1alpha2 schema: openAPIV3Schema: - description: UDPRoute provides a way to route UDP traffic. When combined with - a Gateway listener, it can be used to forward traffic on the port specified - by the listener to a set of backends specified by the UDPRoute. + description: "GRPCRoute provides a way to route gRPC requests. This includes + the capability to match requests by hostname, gRPC service, gRPC method, + or HTTP/2 header. Filters can be used to specify additional processing steps. + Backends specify where matching requests will be routed. \n GRPCRoute falls + under extended support within the Gateway API. Within the following specification, + the word \"MUST\" indicates that an implementation supporting GRPCRoute + must conform to the indicated requirement, but an implementation not supporting + this route type need not follow the requirement unless explicitly indicated. + \n Implementations supporting `GRPCRoute` with the `HTTPS` `ProtocolType` + MUST accept HTTP/2 connections without an initial upgrade from HTTP/1.1, + i.e. via ALPN. If the implementation does not support this, then it MUST + set the \"Accepted\" condition to \"False\" for the affected listener with + a reason of \"UnsupportedProtocol\". Implementations MAY also accept HTTP/2 + connections with an upgrade from HTTP/1. \n Implementations supporting `GRPCRoute` + with the `HTTP` `ProtocolType` MUST support HTTP/2 over cleartext TCP (h2c, + https://www.rfc-editor.org/rfc/rfc7540#section-3.1) without an initial upgrade + from HTTP/1.1, i.e. with prior knowledge (https://www.rfc-editor.org/rfc/rfc7540#section-3.4). + If the implementation does not support this, then it MUST set the \"Accepted\" + condition to \"False\" for the affected listener with a reason of \"UnsupportedProtocol\". + Implementations MAY also accept HTTP/2 connections with an upgrade from + HTTP/1, i.e. without prior knowledge." properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -1913,37 +2366,122 @@ spec: metadata: type: object spec: - description: Spec defines the desired state of UDPRoute. + description: Spec defines the desired state of GRPCRoute. properties: + hostnames: + description: "Hostnames defines a set of hostnames to match against + the GRPC Host header to select a GRPCRoute to process the request. + This matches the RFC 1123 definition of a hostname with 2 notable + exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed + with a wildcard label (`*.`). The wildcard label MUST appear by + itself as the first label. \n If a hostname is specified by both + the Listener and GRPCRoute, there MUST be at least one intersecting + hostname for the GRPCRoute to be attached to the Listener. For example: + \n * A Listener with `test.example.com` as the hostname matches + GRPCRoutes that have either not specified any hostnames, or have + specified at least one of `test.example.com` or `*.example.com`. + * A Listener with `*.example.com` as the hostname matches GRPCRoutes + that have either not specified any hostnames or have specified at + least one hostname that matches the Listener hostname. For example, + `test.example.com` and `*.example.com` would both match. On the + other hand, `example.com` and `test.example.net` would not match. + \n Hostnames that are prefixed with a wildcard label (`*.`) are + interpreted as a suffix match. That means that a match for `*.example.com` + would match both `test.example.com`, and `foo.test.example.com`, + but not `example.com`. \n If both the Listener and GRPCRoute have + specified hostnames, any GRPCRoute hostnames that do not match the + Listener hostname MUST be ignored. For example, if a Listener specified + `*.example.com`, and the GRPCRoute specified `test.example.com` + and `test.example.net`, `test.example.net` MUST NOT be considered + for a match. \n If both the Listener and GRPCRoute have specified + hostnames, and none match with the criteria above, then the GRPCRoute + MUST NOT be accepted by the implementation. The implementation MUST + raise an 'Accepted' Condition with a status of `False` in the corresponding + RouteParentStatus. \n If a Route (A) of type HTTPRoute or GRPCRoute + is attached to a Listener and that listener already has another + Route (B) of the other type attached and the intersection of the + hostnames of A and B is non-empty, then the implementation MUST + accept exactly one of these two routes, determined by the following + criteria, in order: \n * The oldest Route based on creation timestamp. + * The Route appearing first in alphabetical order by \"{namespace}/{name}\". + \n The rejected Route MUST raise an 'Accepted' condition with a + status of 'False' in the corresponding RouteParentStatus. \n Support: + Core" + items: + description: "Hostname is the fully qualified domain name of a network + host. This matches the RFC 1123 definition of a hostname with + 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname + may be prefixed with a wildcard label (`*.`). The wildcard label + must appear by itself as the first label. \n Hostname can be \"precise\" + which is a domain name without the terminating dot of a network + host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain + name prefixed with a single wildcard label (e.g. `*.example.com`). + \n Note that as per RFC1035 and RFC1123, a *label* must consist + of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed." + maxLength: 253 + minLength: 1 + pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 16 + type: array parentRefs: description: "ParentRefs references the resources (usually Gateways) that a Route wants to be attached to. Note that the referenced parent resource needs to allow this for the attachment to be complete. For Gateways, that means the Gateway needs to allow attachment from - Routes of this kind and namespace. \n The only kind of parent resource - with \"Core\" support is Gateway. This API may be extended in the - future to support additional kinds of parent resources such as one - of the route kinds. \n It is invalid to reference an identical parent - more than once. It is valid to reference multiple distinct sections - within the same parent resource, such as 2 Listeners within a Gateway. - \n It is possible to separately reference multiple distinct objects - that may be collapsed by an implementation. For example, some implementations - may choose to merge compatible Gateway Listeners together. If that - is the case, the list of routes attached to those resources should - also be merged. \n Note that for ParentRefs that cross namespace - boundaries, there are specific rules. Cross-namespace references - are only valid if they are explicitly allowed by something in the - namespace they are referring to. For example, Gateway has the AllowedRoutes - field, and ReferenceGrant provides a generic way to enable any other - kind of cross-namespace reference." + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " items: description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually - a route). The only kind of parent resource with \"Core\" support - is Gateway. This API may be extended in the future to support - additional kinds of parent resources, such as HTTPRoute. \n The - API object must be valid in the cluster; the Group and Kind must - be registered in the cluster for this reference to be valid." + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." properties: group: default: gateway.networking.k8s.io @@ -1957,8 +2495,11 @@ spec: type: string kind: default: Gateway - description: "Kind is kind of the referent. \n Support: Core - (Gateway) \n Support: Implementation-specific (Other Resources)" + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -1978,7 +2519,15 @@ spec: the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. - \n Support: Core" + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ @@ -1993,18 +2542,22 @@ spec: a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener - must match both specified values. \n Implementations MAY choose - to support other parent resources. Implementations supporting - other types of parent resources MUST clearly document how/if - Port is interpreted. \n For the purpose of status, an attachment - is considered successful as long as the parent resource accepts - it partially. For example, Gateway listeners can restrict - which Routes can attach to them by Route kind, namespace, - or hostname. If 1 of 2 Gateway listeners accept attachment - from the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this - Route, the Route MUST be considered detached from the Gateway. - \n Support: Extended \n " + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " format: int32 maximum: 65535 minimum: 1 @@ -2015,19 +2568,23 @@ spec: interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both - specified values. \n Implementations MAY choose to support - attaching Routes to other resources. If that is the case, - they MUST clearly document how SectionName is interpreted. - \n When unspecified (empty string), this will reference the - entire resource. For the purpose of status, an attachment - is considered successful if at least one section in the parent - resource accepts it. For example, Gateway listeners can restrict - which Routes can attach to them by Route kind, namespace, - or hostname. If 1 of 2 Gateway listeners accept attachment - from the referencing Route, the Route MUST be considered successfully - attached. If no Gateway listeners accept attachment from this - Route, the Route MUST be considered detached from the Gateway. - \n Support: Core" + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" maxLength: 253 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ @@ -2037,29 +2594,525 @@ spec: type: object maxItems: 32 type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) rules: - description: Rules are a list of UDP matchers and actions. + description: Rules are a list of GRPC matchers, filters and actions. items: - description: UDPRouteRule is the configuration for a given rule. + description: GRPCRouteRule defines the semantics for matching a + gRPC request based on conditions (matches), processing it (filters), + and forwarding the request to an API object (backendRefs). properties: backendRefs: description: "BackendRefs defines the backend(s) where matching - requests should be sent. If unspecified or invalid (refers - to a non-existent resource or a Service with no endpoints), - the underlying implementation MUST actively reject connection - attempts to this backend. Packet drops must respect weight; - if an invalid backend is requested to have 80% of the packets, - then 80% of packets must be dropped instead. \n Support: Core - for Kubernetes Service Support: Implementation-specific for - any other resource \n Support for weight: Extended" + requests should be sent. \n Failure behavior here depends + on how many BackendRefs are specified and how many are invalid. + \n If *all* entries in BackendRefs are invalid, and there + are also no filters specified in this route rule, *all* traffic + which matches this rule MUST receive an `UNAVAILABLE` status. + \n See the GRPCBackendRef definition for the rules about what + makes a single GRPCBackendRef invalid. \n When a GRPCBackendRef + is invalid, `UNAVAILABLE` statuses MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive an `UNAVAILABLE` + status. \n For example, if two backends are specified with + equal weights, and one is invalid, 50 percent of traffic MUST + receive an `UNAVAILABLE` status. Implementations may choose + how that 50 percent is determined. \n Support: Core for Kubernetes + Service \n Support: Implementation-specific for any other + resource \n Support for weight: Core" items: - description: "BackendRef defines how a Route should forward - a request to a Kubernetes resource. \n Note that when a - namespace is specified, a ReferenceGrant object is required - in the referent namespace to allow that namespace's owner - to accept the reference. See the ReferenceGrant documentation - for details." + description: "GRPCBackendRef defines how a GRPCRoute forwards + a gRPC request. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace to allow that + namespace's owner to accept the reference. See the ReferenceGrant + documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + " properties: + filters: + description: "Filters defined at this level MUST be executed + if and only if the request is being forwarded to the + backend defined here. \n Support: Implementation-specific + (For broader support of filters, use the Filters field + in GRPCRouteRule.)" + items: + description: GRPCRouteFilter defines processing steps + that must be completed during the request or response + lifecycle. GRPCRouteFilters are meant as an extension + point to express processing that may be done in Gateway + implementations. Some examples include request or + response modification, implementing authentication + strategies, rate-limiting, and traffic shaping. API + guarantee/conformance is defined based on the type + of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n Support: Implementation-specific \n + This filter can be used multiple times within + the same rule." + properties: + group: + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API + group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For + example "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema + for a filter that modifies request headers. \n + Support: Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for + a filter that mirrors requests. Requests are sent + to the specified destination, but responses from + that destination are ignored. \n This filter can + be used multiple times within the same rule. Note + that not all implementations will be able to support + mirroring to multiple backends. \n Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource + where mirrored requests are sent. \n Mirrored + requests must be sent only to a single destination + endpoint within this BackendRef, irrespective + of how many endpoints are present within this + BackendRef. \n If the referent cannot be found, + this BackendRef is invalid and must be dropped + from the Gateway. The controller must ensure + the \"ResolvedRefs\" condition on the Route + status is set to `status: False` and not configure + this backend in the underlying implementation. + \n If there is a cross-namespace reference + to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route + is set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the + underlying implementation. \n In either error + case, the Message of the `ResolvedRefs` Condition + should be used to provide more detail about + the problem. \n Support: Extended for Kubernetes + Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core + API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to + CNAME DNS records that may live outside + of the cluster and as such are difficult + to reason about in terms of conformance. + They also may not be safe to forward to + (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with + a type other than ExternalName) \n Support: + Implementation-specific (Services with + type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace + of the backend. When unspecified, the + local namespace is inferred. \n Note that + when a namespace different than the local + namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant + documentation for details. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination + port number to use for this resource. + Port is required when the referent is + a Kubernetes Service. In this case, the + port number is the service port number, + not the target port. For other resources, + destination port might be derived from + the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind + == ''Service'') ? has(self.port) : true' + required: + - backendRef + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n + Support: Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter + to apply. As with other API fields, types are + classified into three conformance levels: \n - + Core: Filter types and their corresponding configuration + defined by \"Support: Core\" in this package, + e.g. \"RequestHeaderModifier\". All implementations + supporting GRPCRoute MUST support core filters. + \n - Extended: Filter types and their corresponding + configuration defined by \"Support: Extended\" + in this package, e.g. \"RequestMirror\". Implementers + are encouraged to support extended filters. \n + - Implementation-specific: Filters that are defined + and supported by specific vendors. In the future, + filters showing convergence in behavior across + multiple implementations will be considered for + inclusion in extended or core conformance levels. + Filter-specific configuration for such filters + is specified using the ExtensionRef field. `Type` + MUST be set to \"ExtensionRef\" for custom filters. + \n Implementers are encouraged to define custom + implementation types to extend the core API with + implementation-specific behavior. \n If a reference + to a custom filter type cannot be resolved, the + filter MUST NOT be skipped. Instead, requests + that would have been processed by that filter + MUST receive a HTTP error response. \n " + enum: + - ResponseHeaderModifier + - RequestHeaderModifier + - RequestMirror + - ExtensionRef + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil + if the filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type + != ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type + == ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil + if the filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type + != ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for + RequestMirror filter.type + rule: '!(!has(self.requestMirror) && self.type == + ''RequestMirror'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for + ExtensionRef filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 group: default: "" description: Group is the group of the referent. For example, @@ -2070,9 +3123,17 @@ spec: type: string kind: default: Service - description: Kind is kind of the referent. For example - "HTTPRoute" or "Service". Defaults to "Service" when - not specified. + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -2085,11 +3146,11 @@ spec: namespace: description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n - Note that when a namespace is specified, a ReferenceGrant - object is required in the referent namespace to allow - that namespace's owner to accept the reference. See - the ReferenceGrant documentation for details. \n Support: - Core" + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ @@ -2127,18 +3188,580 @@ spec: required: - name type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' maxItems: 16 - minItems: 1 + type: array + filters: + description: "Filters define the filters that are applied to + requests that match this rule. \n The effects of ordering + of multiple behaviors are currently unspecified. This can + change in the future based on feedback during the alpha stage. + \n Conformance-levels at this level are defined based on the + type of filter: \n - ALL core filters MUST be supported by + all implementations that support GRPCRoute. - Implementers + are encouraged to support extended filters. - Implementation-specific + custom filters have no API guarantees across implementations. + \n Specifying the same filter multiple times is not supported + unless explicitly indicated in the filter. \n If an implementation + can not support a combination of filters, it must clearly + document that limitation. In cases where incompatible or unsupported + filters are specified and cause the `Accepted` condition to + be set to status `False`, implementations may use the `IncompatibleFilters` + reason to specify this configuration error. \n Support: Core" + items: + description: GRPCRouteFilter defines processing steps that + must be completed during the request or response lifecycle. + GRPCRouteFilters are meant as an extension point to express + processing that may be done in Gateway implementations. + Some examples include request or response modification, + implementing authentication strategies, rate-limiting, and + traffic shaping. API guarantee/conformance is defined based + on the type of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n Support: Implementation-specific \n This + filter can be used multiple times within the same rule." + properties: + group: + description: Group is the group of the referent. For + example, "gateway.networking.k8s.io". When unspecified + or empty string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For example + "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema for + a filter that modifies request headers. \n Support: + Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for a filter + that mirrors requests. Requests are sent to the specified + destination, but responses from that destination are + ignored. \n This filter can be used multiple times within + the same rule. Note that not all implementations will + be able to support mirroring to multiple backends. \n + Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource where + mirrored requests are sent. \n Mirrored requests + must be sent only to a single destination endpoint + within this BackendRef, irrespective of how many + endpoints are present within this BackendRef. \n + If the referent cannot be found, this BackendRef + is invalid and must be dropped from the Gateway. + The controller must ensure the \"ResolvedRefs\" + condition on the Route status is set to `status: + False` and not configure this backend in the underlying + implementation. \n If there is a cross-namespace + reference to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route is + set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the underlying + implementation. \n In either error case, the Message + of the `ResolvedRefs` Condition should be used to + provide more detail about the problem. \n Support: + Extended for Kubernetes Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". When + unspecified or empty string, core API group + is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to CNAME + DNS records that may live outside of the cluster + and as such are difficult to reason about in + terms of conformance. They also may not be safe + to forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with a + type other than ExternalName) \n Support: Implementation-specific + (Services with type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the + backend. When unspecified, the local namespace + is inferred. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port + number to use for this resource. Port is required + when the referent is a Kubernetes Service. In + this case, the port number is the service port + number, not the target port. For other resources, + destination port might be derived from the referent + resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + required: + - backendRef + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n Support: + Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter to apply. + As with other API fields, types are classified into + three conformance levels: \n - Core: Filter types and + their corresponding configuration defined by \"Support: + Core\" in this package, e.g. \"RequestHeaderModifier\". + All implementations supporting GRPCRoute MUST support + core filters. \n - Extended: Filter types and their + corresponding configuration defined by \"Support: Extended\" + in this package, e.g. \"RequestMirror\". Implementers + are encouraged to support extended filters. \n - Implementation-specific: + Filters that are defined and supported by specific vendors. + In the future, filters showing convergence in behavior + across multiple implementations will be considered for + inclusion in extended or core conformance levels. Filter-specific + configuration for such filters is specified using the + ExtensionRef field. `Type` MUST be set to \"ExtensionRef\" + for custom filters. \n Implementers are encouraged to + define custom implementation types to extend the core + API with implementation-specific behavior. \n If a reference + to a custom filter type cannot be resolved, the filter + MUST NOT be skipped. Instead, requests that would have + been processed by that filter MUST receive a HTTP error + response. \n " + enum: + - ResponseHeaderModifier + - RequestHeaderModifier + - RequestMirror + - ExtensionRef + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil if the + filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type != + ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type == + ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil if the + filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type != + ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for RequestMirror + filter.type + rule: '!(!has(self.requestMirror) && self.type == ''RequestMirror'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for ExtensionRef + filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 + matches: + description: "Matches define conditions used for matching the + rule against incoming gRPC requests. Each match is independent, + i.e. this rule will be matched if **any** one of the matches + is satisfied. \n For example, take the following matches configuration: + \n ``` matches: - method: service: foo.bar headers: values: + version: 2 - method: service: foo.bar.v2 ``` \n For a request + to match against this rule, it MUST satisfy EITHER of the + two conditions: \n - service of foo.bar AND contains the header + `version: 2` - service of foo.bar.v2 \n See the documentation + for GRPCRouteMatch on how to specify multiple match conditions + to be ANDed together. \n If no matches are specified, the + implementation MUST match every gRPC request. \n Proxy or + Load Balancer routing configuration generated from GRPCRoutes + MUST prioritize rules based on the following criteria, continuing + on ties. Merging MUST not be done between GRPCRoutes and HTTPRoutes. + Precedence MUST be given to the rule with the largest number + of: \n * Characters in a matching non-wildcard hostname. * + Characters in a matching hostname. * Characters in a matching + service. * Characters in a matching method. * Header matches. + \n If ties still exist across multiple Routes, matching precedence + MUST be determined in order of the following criteria, continuing + on ties: \n * The oldest Route based on creation timestamp. + * The Route appearing first in alphabetical order by \"{namespace}/{name}\". + \n If ties still exist within the Route that has been given + precedence, matching precedence MUST be granted to the first + matching rule meeting the above criteria." + items: + description: "GRPCRouteMatch defines the predicate used to + match requests to a given action. Multiple match types are + ANDed together, i.e. the match will evaluate to true only + if all conditions are satisfied. \n For example, the match + below will match a gRPC request only if its service is `foo` + AND it contains the `version: v1` header: \n ``` matches: + - method: type: Exact service: \"foo\" headers: - name: + \"version\" value \"v1\" \n ```" + properties: + headers: + description: Headers specifies gRPC request header matchers. + Multiple match values are ANDed together, meaning, a + request MUST match all the specified headers to select + the route. + items: + description: GRPCHeaderMatch describes how to select + a gRPC route by matching gRPC request headers. + properties: + name: + description: "Name is the name of the gRPC Header + to be matched. \n If multiple entries specify + equivalent header names, only the first entry + with an equivalent name MUST be considered for + a match. Subsequent entries with an equivalent + header name MUST be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: + default: Exact + description: Type specifies how to match against + the value of the header. + enum: + - Exact + - RegularExpression + type: string + value: + description: Value is the value of the gRPC Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + method: + description: Method specifies a gRPC request service/method + matcher. If this field is not specified, all services + and methods will match. + properties: + method: + description: "Value of the method to match against. + If left empty or omitted, will match all services. + \n At least one of Service and Method MUST be a + non-empty string." + maxLength: 1024 + type: string + service: + description: "Value of the service to match against. + If left empty or omitted, will match any service. + \n At least one of Service and Method MUST be a + non-empty string." + maxLength: 1024 + type: string + type: + default: Exact + description: "Type specifies how to match against + the service and/or method. Support: Core (Exact + with service and method specified) \n Support: Implementation-specific + (Exact with method specified but no service specified) + \n Support: Implementation-specific (RegularExpression)" + enum: + - Exact + - RegularExpression + type: string + type: object + x-kubernetes-validations: + - message: One or both of 'service' or 'method' must be + specified + rule: 'has(self.type) ? has(self.service) || has(self.method) + : true' + - message: service must only contain valid characters + (matching ^(?i)\.?[a-z_][a-z_0-9]*(\.[a-z_][a-z_0-9]*)*$) + rule: '(!has(self.type) || self.type == ''Exact'') && + has(self.service) ? self.service.matches(r"""^(?i)\.?[a-z_][a-z_0-9]*(\.[a-z_][a-z_0-9]*)*$"""): + true' + - message: method must only contain valid characters (matching + ^[A-Za-z_][A-Za-z_0-9]*$) + rule: '(!has(self.type) || self.type == ''Exact'') && + has(self.method) ? self.method.matches(r"""^[A-Za-z_][A-Za-z_0-9]*$"""): + true' + type: object + maxItems: 8 type: array type: object maxItems: 16 - minItems: 1 type: array - required: - - rules type: object status: - description: Status defines the current state of UDPRoute. + description: Status defines the current state of GRPCRoute. properties: parents: description: "Parents is a list of parent resources (usually Gateways) @@ -2178,15 +3801,14 @@ spec: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path - .status.conditions. For example, \n \ttype FooStatus struct{ - \t // Represents the observations of a foo's current - state. \t // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type - \t // +patchStrategy=merge \t // +listType=map \t - \ // +listMapKey=type \t Conditions []metav1.Condition + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other - fields \t}" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" properties: lastTransitionTime: description: lastTransitionTime is the last time the condition @@ -2283,9 +3905,11 @@ spec: type: string kind: default: Gateway - description: "Kind is kind of the referent. \n Support: - Core (Gateway) \n Support: Implementation-specific (Other - Resources)" + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -2305,7 +3929,16 @@ spec: in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace - reference. \n Support: Core" + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ @@ -2321,8 +3954,12 @@ spec: a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match - both specified values. \n Implementations MAY choose to - support other parent resources. Implementations supporting + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the @@ -2333,7 +3970,7 @@ spec: the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. - \n Support: Extended \n " + \n Support: Extended \n " format: int32 maximum: 65535 minimum: 1 @@ -2344,8 +3981,7219 @@ spec: is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener - must match both specified values. \n Implementations MAY - choose to support attaching Routes to other resources. + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this + will reference the entire resource. For the purpose of + status, an attachment is considered successful if at least + one section in the parent resource accepts it. For example, + Gateway listeners can restrict which Routes can attach + to them by Route kind, namespace, or hostname. If 1 of + 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 + gateway.networking.k8s.io/channel: experimental + creationTimestamp: null + name: httproutes.gateway.networking.k8s.io +spec: + group: gateway.networking.k8s.io + names: + categories: + - gateway-api + kind: HTTPRoute + listKind: HTTPRouteList + plural: httproutes + singular: httproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.hostnames + name: Hostnames + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: HTTPRoute provides a way to route HTTP requests. This includes + the capability to match requests by hostname, path, header, or query param. + Filters can be used to specify additional processing steps. Backends specify + where matching requests should be routed. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of HTTPRoute. + properties: + hostnames: + description: "Hostnames defines a set of hostnames that should match + against the HTTP Host header to select a HTTPRoute used to process + the request. Implementations MUST ignore any port value specified + in the HTTP Host header while performing a match and (absent of + any applicable header modification configuration) MUST forward this + header unmodified to the backend. \n Valid values for Hostnames + are determined by RFC 1123 definition of a hostname with 2 notable + exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed + with a wildcard label (`*.`). The wildcard label must appear by + itself as the first label. \n If a hostname is specified by both + the Listener and HTTPRoute, there must be at least one intersecting + hostname for the HTTPRoute to be attached to the Listener. For example: + \n * A Listener with `test.example.com` as the hostname matches + HTTPRoutes that have either not specified any hostnames, or have + specified at least one of `test.example.com` or `*.example.com`. + * A Listener with `*.example.com` as the hostname matches HTTPRoutes + that have either not specified any hostnames or have specified at + least one hostname that matches the Listener hostname. For example, + `*.example.com`, `test.example.com`, and `foo.test.example.com` + would all match. On the other hand, `example.com` and `test.example.net` + would not match. \n Hostnames that are prefixed with a wildcard + label (`*.`) are interpreted as a suffix match. That means that + a match for `*.example.com` would match both `test.example.com`, + and `foo.test.example.com`, but not `example.com`. \n If both the + Listener and HTTPRoute have specified hostnames, any HTTPRoute hostnames + that do not match the Listener hostname MUST be ignored. For example, + if a Listener specified `*.example.com`, and the HTTPRoute specified + `test.example.com` and `test.example.net`, `test.example.net` must + not be considered for a match. \n If both the Listener and HTTPRoute + have specified hostnames, and none match with the criteria above, + then the HTTPRoute is not accepted. The implementation must raise + an 'Accepted' Condition with a status of `False` in the corresponding + RouteParentStatus. \n In the event that multiple HTTPRoutes specify + intersecting hostnames (e.g. overlapping wildcard matching and exact + matching hostnames), precedence must be given to rules from the + HTTPRoute with the largest number of: \n * Characters in a matching + non-wildcard hostname. * Characters in a matching hostname. \n If + ties exist across multiple Routes, the matching precedence rules + for HTTPRouteMatches takes over. \n Support: Core" + items: + description: "Hostname is the fully qualified domain name of a network + host. This matches the RFC 1123 definition of a hostname with + 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname + may be prefixed with a wildcard label (`*.`). The wildcard label + must appear by itself as the first label. \n Hostname can be \"precise\" + which is a domain name without the terminating dot of a network + host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain + name prefixed with a single wildcard label (e.g. `*.example.com`). + \n Note that as per RFC1035 and RFC1123, a *label* must consist + of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed." + maxLength: 253 + minLength: 1 + pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 16 + type: array + parentRefs: + description: "ParentRefs references the resources (usually Gateways) + that a Route wants to be attached to. Note that the referenced parent + resource needs to allow this for the attachment to be complete. + For Gateways, that means the Gateway needs to allow attachment from + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " + items: + description: "ParentReference identifies an API object (usually + a Gateway) that can be considered a parent of this resource (usually + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the core + API group (such as for a \"Service\" kind referent), Group + must be explicitly set to \"\" (empty string). \n Support: + Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. When + unspecified, this refers to the local namespace of the Route. + \n Note that there are specific rules for ParentRefs which + cross namespace boundaries. Cross-namespace references are + only valid if they are explicitly allowed by something in + the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. It + can be interpreted differently based on the type of parent + resource. \n When the parent resource is a Gateway, this targets + all listeners listening on the specified port that also support + this kind of Route(and select this Route). It's not recommended + to set `Port` unless the networking behaviors specified in + a Route must apply to a specific port as opposed to a listener(s) + whose port(s) may be changed. When both Port and SectionName + are specified, the name and port of the selected listener + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within the + target resource. In the following resources, SectionName is + interpreted as the following: \n * Gateway: Listener Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match both + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) + rules: + default: + - matches: + - path: + type: PathPrefix + value: / + description: Rules are a list of HTTP matchers, filters and actions. + items: + description: HTTPRouteRule defines semantics for matching an HTTP + request based on conditions (matches), processing it (filters), + and forwarding the request to an API object (backendRefs). + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. \n Failure behavior here depends + on how many BackendRefs are specified and how many are invalid. + \n If *all* entries in BackendRefs are invalid, and there + are also no filters specified in this route rule, *all* traffic + which matches this rule MUST receive a 500 status code. \n + See the HTTPBackendRef definition for the rules about what + makes a single HTTPBackendRef invalid. \n When a HTTPBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined. + \n Support: Core for Kubernetes Service \n Support: Extended + for Kubernetes ServiceImport \n Support: Implementation-specific + for any other resource \n Support for weight: Core" + items: + description: "HTTPBackendRef defines how a HTTPRoute forwards + a HTTP request. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace to allow that + namespace's owner to accept the reference. See the ReferenceGrant + documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + " + properties: + filters: + description: "Filters defined at this level should be + executed if and only if the request is being forwarded + to the backend defined here. \n Support: Implementation-specific + (For broader support of filters, use the Filters field + in HTTPRouteRule.)" + items: + description: HTTPRouteFilter defines processing steps + that must be completed during the request or response + lifecycle. HTTPRouteFilters are meant as an extension + point to express processing that may be done in Gateway + implementations. Some examples include request or + response modification, implementing authentication + strategies, rate-limiting, and traffic shaping. API + guarantee/conformance is defined based on the type + of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n This filter can be used multiple times + within the same rule. \n Support: Implementation-specific" + properties: + group: + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API + group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For + example "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema + for a filter that modifies request headers. \n + Support: Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for + a filter that mirrors requests. Requests are sent + to the specified destination, but responses from + that destination are ignored. \n This filter can + be used multiple times within the same rule. Note + that not all implementations will be able to support + mirroring to multiple backends. \n Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource + where mirrored requests are sent. \n Mirrored + requests must be sent only to a single destination + endpoint within this BackendRef, irrespective + of how many endpoints are present within this + BackendRef. \n If the referent cannot be found, + this BackendRef is invalid and must be dropped + from the Gateway. The controller must ensure + the \"ResolvedRefs\" condition on the Route + status is set to `status: False` and not configure + this backend in the underlying implementation. + \n If there is a cross-namespace reference + to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route + is set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the + underlying implementation. \n In either error + case, the Message of the `ResolvedRefs` Condition + should be used to provide more detail about + the problem. \n Support: Extended for Kubernetes + Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core + API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to + CNAME DNS records that may live outside + of the cluster and as such are difficult + to reason about in terms of conformance. + They also may not be safe to forward to + (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with + a type other than ExternalName) \n Support: + Implementation-specific (Services with + type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace + of the backend. When unspecified, the + local namespace is inferred. \n Note that + when a namespace different than the local + namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant + documentation for details. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination + port number to use for this resource. + Port is required when the referent is + a Kubernetes Service. In this case, the + port number is the service port number, + not the target port. For other resources, + destination port might be derived from + the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind + == ''Service'') ? has(self.port) : true' + required: + - backendRef + type: object + requestRedirect: + description: "RequestRedirect defines a schema for + a filter that responds to the request with an + HTTP redirection. \n Support: Core" + properties: + hostname: + description: "Hostname is the hostname to be + used in the value of the `Location` header + in the response. When empty, the hostname + in the `Host` header of the request is used. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines parameters used to + modify the path of the incoming request. The + modified path is then used to construct the + `Location` header. When empty, the request + path is used as-is. \n Support: Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the + value with which to replace the full path + of a request during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies + the value with which to replace the prefix + match of a request during a rewrite or + redirect. For example, a request to \"/foo/bar\" + with a prefix match of \"/foo\" and a + ReplacePrefixMatch of \"/xyz\" would be + modified to \"/xyz/bar\". \n Note that + this matches the behavior of the PathPrefix + match type. This matches full path elements. + A path element refers to the list of labels + in the path split by the `/` separator. + When specified, a trailing `/` is ignored. + For example, the paths `/abc`, `/abc/`, + and `/abc/def` would all match the prefix + `/abc`, but the path `/abcd` would not. + \n ReplacePrefixMatch is only compatible + with a `PathPrefix` HTTPRouteMatch. Using + any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the + Route to `status: False`. \n Request Path + | Prefix Match | Replace Prefix | Modified + Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | + /xyz/ | /xyz/bar /foo/bar | + /foo/ | /xyz | /xyz/bar + /foo/bar | /foo/ | /xyz/ | + /xyz/bar /foo | /foo | + /xyz | /xyz /foo/ | /foo + \ | /xyz | /xyz/ /foo/bar + \ | /foo | | + /bar /foo/ | /foo | | / /foo | /foo | + | / /foo/ | /foo + \ | / | / /foo | + /foo | / | /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path + modifier. Additional types may be added + in a future release of the API. \n Note + that values may be added to this enum, + implementations must ensure that unknown + values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the + Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified + when type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? + has(self.replaceFullPath) : true' + - message: type must be 'ReplaceFullPath' when + replaceFullPath is set + rule: 'has(self.replaceFullPath) ? self.type + == ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified + when type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' + ? has(self.replacePrefixMatch) : true' + - message: type must be 'ReplacePrefixMatch' + when replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + port: + description: "Port is the port to be used in + the value of the `Location` header in the + response. \n If no port is specified, the + redirect port MUST be derived using the following + rules: \n * If redirect scheme is not-empty, + the redirect port MUST be the well-known port + associated with the redirect scheme. Specifically + \"http\" to port 80 and \"https\" to port + 443. If the redirect scheme does not have + a well-known port, the listener port of the + Gateway SHOULD be used. * If redirect scheme + is empty, the redirect port MUST be the Gateway + Listener port. \n Implementations SHOULD NOT + add the port number in the 'Location' header + in the following cases: \n * A Location header + that will use HTTP (whether that is determined + via the Listener protocol or the Scheme field) + _and_ use port 80. * A Location header that + will use HTTPS (whether that is determined + via the Listener protocol or the Scheme field) + _and_ use port 443. \n Support: Extended" + format: int32 + maximum: 65535 + minimum: 1 + type: integer + scheme: + description: "Scheme is the scheme to be used + in the value of the `Location` header in the + response. When empty, the scheme of the request + is used. \n Scheme redirects can affect the + port of the redirect, for more information, + refer to the documentation for the port field + of this filter. \n Note that values may be + added to this enum, implementations must ensure + that unknown values will not cause a crash. + \n Unknown values here must result in the + implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason + of `UnsupportedValue`. \n Support: Extended" + enum: + - http + - https + type: string + statusCode: + default: 302 + description: "StatusCode is the HTTP status + code to be used in response. \n Note that + values may be added to this enum, implementations + must ensure that unknown values will not cause + a crash. \n Unknown values here must result + in the implementation setting the Accepted + Condition for the Route to `status: False`, + with a Reason of `UnsupportedValue`. \n Support: + Core" + enum: + - 301 + - 302 + type: integer + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n + Support: Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter + to apply. As with other API fields, types are + classified into three conformance levels: \n - + Core: Filter types and their corresponding configuration + defined by \"Support: Core\" in this package, + e.g. \"RequestHeaderModifier\". All implementations + must support core filters. \n - Extended: Filter + types and their corresponding configuration defined + by \"Support: Extended\" in this package, e.g. + \"RequestMirror\". Implementers are encouraged + to support extended filters. \n - Implementation-specific: + Filters that are defined and supported by specific + vendors. In the future, filters showing convergence + in behavior across multiple implementations will + be considered for inclusion in extended or core + conformance levels. Filter-specific configuration + for such filters is specified using the ExtensionRef + field. `Type` should be set to \"ExtensionRef\" + for custom filters. \n Implementers are encouraged + to define custom implementation types to extend + the core API with implementation-specific behavior. + \n If a reference to a custom filter type cannot + be resolved, the filter MUST NOT be skipped. Instead, + requests that would have been processed by that + filter MUST receive a HTTP error response. \n + Note that values may be added to this enum, implementations + must ensure that unknown values will not cause + a crash. \n Unknown values here must result in + the implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - RequestHeaderModifier + - ResponseHeaderModifier + - RequestMirror + - RequestRedirect + - URLRewrite + - ExtensionRef + type: string + urlRewrite: + description: "URLRewrite defines a schema for a + filter that modifies a request during forwarding. + \n Support: Extended" + properties: + hostname: + description: "Hostname is the value to be used + to replace the Host header value during forwarding. + \n Support: Extended" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines a path rewrite. \n + Support: Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the + value with which to replace the full path + of a request during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies + the value with which to replace the prefix + match of a request during a rewrite or + redirect. For example, a request to \"/foo/bar\" + with a prefix match of \"/foo\" and a + ReplacePrefixMatch of \"/xyz\" would be + modified to \"/xyz/bar\". \n Note that + this matches the behavior of the PathPrefix + match type. This matches full path elements. + A path element refers to the list of labels + in the path split by the `/` separator. + When specified, a trailing `/` is ignored. + For example, the paths `/abc`, `/abc/`, + and `/abc/def` would all match the prefix + `/abc`, but the path `/abcd` would not. + \n ReplacePrefixMatch is only compatible + with a `PathPrefix` HTTPRouteMatch. Using + any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the + Route to `status: False`. \n Request Path + | Prefix Match | Replace Prefix | Modified + Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | + /xyz/ | /xyz/bar /foo/bar | + /foo/ | /xyz | /xyz/bar + /foo/bar | /foo/ | /xyz/ | + /xyz/bar /foo | /foo | + /xyz | /xyz /foo/ | /foo + \ | /xyz | /xyz/ /foo/bar + \ | /foo | | + /bar /foo/ | /foo | | / /foo | /foo | + | / /foo/ | /foo + \ | / | / /foo | + /foo | / | /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path + modifier. Additional types may be added + in a future release of the API. \n Note + that values may be added to this enum, + implementations must ensure that unknown + values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the + Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified + when type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? + has(self.replaceFullPath) : true' + - message: type must be 'ReplaceFullPath' when + replaceFullPath is set + rule: 'has(self.replaceFullPath) ? self.type + == ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified + when type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' + ? has(self.replacePrefixMatch) : true' + - message: type must be 'ReplacePrefixMatch' + when replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil + if the filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type + != ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type + == ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil + if the filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type + != ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for + RequestMirror filter.type + rule: '!(!has(self.requestMirror) && self.type == + ''RequestMirror'')' + - message: filter.requestRedirect must be nil if the + filter.type is not RequestRedirect + rule: '!(has(self.requestRedirect) && self.type != + ''RequestRedirect'')' + - message: filter.requestRedirect must be specified + for RequestRedirect filter.type + rule: '!(!has(self.requestRedirect) && self.type == + ''RequestRedirect'')' + - message: filter.urlRewrite must be nil if the filter.type + is not URLRewrite + rule: '!(has(self.urlRewrite) && self.type != ''URLRewrite'')' + - message: filter.urlRewrite must be specified for URLRewrite + filter.type + rule: '!(!has(self.urlRewrite) && self.type == ''URLRewrite'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for + ExtensionRef filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') + && self.exists(f, f.type == ''URLRewrite''))' + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') + && self.exists(f, f.type == ''URLRewrite''))' + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 + - message: RequestRedirect filter cannot be repeated + rule: self.filter(f, f.type == 'RequestRedirect').size() + <= 1 + - message: URLRewrite filter cannot be repeated + rule: self.filter(f, f.type == 'URLRewrite').size() + <= 1 + group: + default: "" + description: Group is the group of the referent. For example, + "gateway.networking.k8s.io". When unspecified or empty + string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the backend. + When unspecified, the local namespace is inferred. \n + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port number + to use for this resource. Port is required when the + referent is a Kubernetes Service. In this case, the + port number is the service port number, not the target + port. For other resources, destination port might be + derived from the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + weight: + default: 1 + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1. \n Support for this field varies based + on the context where used." + format: int32 + maximum: 1000000 + minimum: 0 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + maxItems: 16 + type: array + filters: + description: "Filters define the filters that are applied to + requests that match this rule. \n The effects of ordering + of multiple behaviors are currently unspecified. This can + change in the future based on feedback during the alpha stage. + \n Conformance-levels at this level are defined based on the + type of filter: \n - ALL core filters MUST be supported by + all implementations. - Implementers are encouraged to support + extended filters. - Implementation-specific custom filters + have no API guarantees across implementations. \n Specifying + the same filter multiple times is not supported unless explicitly + indicated in the filter. \n All filters are expected to be + compatible with each other except for the URLRewrite and RequestRedirect + filters, which may not be combined. If an implementation can + not support other combinations of filters, they must clearly + document that limitation. In cases where incompatible or unsupported + filters are specified and cause the `Accepted` condition to + be set to status `False`, implementations may use the `IncompatibleFilters` + reason to specify this configuration error. \n Support: Core" + items: + description: HTTPRouteFilter defines processing steps that + must be completed during the request or response lifecycle. + HTTPRouteFilters are meant as an extension point to express + processing that may be done in Gateway implementations. + Some examples include request or response modification, + implementing authentication strategies, rate-limiting, and + traffic shaping. API guarantee/conformance is defined based + on the type of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n This filter can be used multiple times within + the same rule. \n Support: Implementation-specific" + properties: + group: + description: Group is the group of the referent. For + example, "gateway.networking.k8s.io". When unspecified + or empty string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For example + "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema for + a filter that modifies request headers. \n Support: + Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for a filter + that mirrors requests. Requests are sent to the specified + destination, but responses from that destination are + ignored. \n This filter can be used multiple times within + the same rule. Note that not all implementations will + be able to support mirroring to multiple backends. \n + Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource where + mirrored requests are sent. \n Mirrored requests + must be sent only to a single destination endpoint + within this BackendRef, irrespective of how many + endpoints are present within this BackendRef. \n + If the referent cannot be found, this BackendRef + is invalid and must be dropped from the Gateway. + The controller must ensure the \"ResolvedRefs\" + condition on the Route status is set to `status: + False` and not configure this backend in the underlying + implementation. \n If there is a cross-namespace + reference to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route is + set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the underlying + implementation. \n In either error case, the Message + of the `ResolvedRefs` Condition should be used to + provide more detail about the problem. \n Support: + Extended for Kubernetes Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". When + unspecified or empty string, core API group + is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to CNAME + DNS records that may live outside of the cluster + and as such are difficult to reason about in + terms of conformance. They also may not be safe + to forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with a + type other than ExternalName) \n Support: Implementation-specific + (Services with type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the + backend. When unspecified, the local namespace + is inferred. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port + number to use for this resource. Port is required + when the referent is a Kubernetes Service. In + this case, the port number is the service port + number, not the target port. For other resources, + destination port might be derived from the referent + resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + required: + - backendRef + type: object + requestRedirect: + description: "RequestRedirect defines a schema for a filter + that responds to the request with an HTTP redirection. + \n Support: Core" + properties: + hostname: + description: "Hostname is the hostname to be used + in the value of the `Location` header in the response. + When empty, the hostname in the `Host` header of + the request is used. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines parameters used to modify + the path of the incoming request. The modified path + is then used to construct the `Location` header. + When empty, the request path is used as-is. \n Support: + Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the value + with which to replace the full path of a request + during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies the + value with which to replace the prefix match + of a request during a rewrite or redirect. For + example, a request to \"/foo/bar\" with a prefix + match of \"/foo\" and a ReplacePrefixMatch of + \"/xyz\" would be modified to \"/xyz/bar\". + \n Note that this matches the behavior of the + PathPrefix match type. This matches full path + elements. A path element refers to the list + of labels in the path split by the `/` separator. + When specified, a trailing `/` is ignored. For + example, the paths `/abc`, `/abc/`, and `/abc/def` + would all match the prefix `/abc`, but the path + `/abcd` would not. \n ReplacePrefixMatch is + only compatible with a `PathPrefix` HTTPRouteMatch. + Using any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the Route + to `status: False`. \n Request Path | Prefix + Match | Replace Prefix | Modified Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | /xyz/ + \ | /xyz/bar /foo/bar | /foo/ | + /xyz | /xyz/bar /foo/bar | /foo/ + \ | /xyz/ | /xyz/bar /foo | + /foo | /xyz | /xyz /foo/ | + /foo | /xyz | /xyz/ /foo/bar + \ | /foo | | /bar + /foo/ | /foo | + | / /foo | /foo | + | / /foo/ | /foo | / | + / /foo | /foo | / | + /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path modifier. + Additional types may be added in a future release + of the API. \n Note that values may be added + to this enum, implementations must ensure that + unknown values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the Route + to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified when + type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? has(self.replaceFullPath) + : true' + - message: type must be 'ReplaceFullPath' when replaceFullPath + is set + rule: 'has(self.replaceFullPath) ? self.type == + ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified when + type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' ? has(self.replacePrefixMatch) + : true' + - message: type must be 'ReplacePrefixMatch' when + replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + port: + description: "Port is the port to be used in the value + of the `Location` header in the response. \n If + no port is specified, the redirect port MUST be + derived using the following rules: \n * If redirect + scheme is not-empty, the redirect port MUST be the + well-known port associated with the redirect scheme. + Specifically \"http\" to port 80 and \"https\" to + port 443. If the redirect scheme does not have a + well-known port, the listener port of the Gateway + SHOULD be used. * If redirect scheme is empty, the + redirect port MUST be the Gateway Listener port. + \n Implementations SHOULD NOT add the port number + in the 'Location' header in the following cases: + \n * A Location header that will use HTTP (whether + that is determined via the Listener protocol or + the Scheme field) _and_ use port 80. * A Location + header that will use HTTPS (whether that is determined + via the Listener protocol or the Scheme field) _and_ + use port 443. \n Support: Extended" + format: int32 + maximum: 65535 + minimum: 1 + type: integer + scheme: + description: "Scheme is the scheme to be used in the + value of the `Location` header in the response. + When empty, the scheme of the request is used. \n + Scheme redirects can affect the port of the redirect, + for more information, refer to the documentation + for the port field of this filter. \n Note that + values may be added to this enum, implementations + must ensure that unknown values will not cause a + crash. \n Unknown values here must result in the + implementation setting the Accepted Condition for + the Route to `status: False`, with a Reason of `UnsupportedValue`. + \n Support: Extended" + enum: + - http + - https + type: string + statusCode: + default: 302 + description: "StatusCode is the HTTP status code to + be used in response. \n Note that values may be + added to this enum, implementations must ensure + that unknown values will not cause a crash. \n Unknown + values here must result in the implementation setting + the Accepted Condition for the Route to `status: + False`, with a Reason of `UnsupportedValue`. \n + Support: Core" + enum: + - 301 + - 302 + type: integer + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n Support: + Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter to apply. + As with other API fields, types are classified into + three conformance levels: \n - Core: Filter types and + their corresponding configuration defined by \"Support: + Core\" in this package, e.g. \"RequestHeaderModifier\". + All implementations must support core filters. \n - + Extended: Filter types and their corresponding configuration + defined by \"Support: Extended\" in this package, e.g. + \"RequestMirror\". Implementers are encouraged to support + extended filters. \n - Implementation-specific: Filters + that are defined and supported by specific vendors. + In the future, filters showing convergence in behavior + across multiple implementations will be considered for + inclusion in extended or core conformance levels. Filter-specific + configuration for such filters is specified using the + ExtensionRef field. `Type` should be set to \"ExtensionRef\" + for custom filters. \n Implementers are encouraged to + define custom implementation types to extend the core + API with implementation-specific behavior. \n If a reference + to a custom filter type cannot be resolved, the filter + MUST NOT be skipped. Instead, requests that would have + been processed by that filter MUST receive a HTTP error + response. \n Note that values may be added to this enum, + implementations must ensure that unknown values will + not cause a crash. \n Unknown values here must result + in the implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - RequestHeaderModifier + - ResponseHeaderModifier + - RequestMirror + - RequestRedirect + - URLRewrite + - ExtensionRef + type: string + urlRewrite: + description: "URLRewrite defines a schema for a filter + that modifies a request during forwarding. \n Support: + Extended" + properties: + hostname: + description: "Hostname is the value to be used to + replace the Host header value during forwarding. + \n Support: Extended" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines a path rewrite. \n Support: + Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the value + with which to replace the full path of a request + during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies the + value with which to replace the prefix match + of a request during a rewrite or redirect. For + example, a request to \"/foo/bar\" with a prefix + match of \"/foo\" and a ReplacePrefixMatch of + \"/xyz\" would be modified to \"/xyz/bar\". + \n Note that this matches the behavior of the + PathPrefix match type. This matches full path + elements. A path element refers to the list + of labels in the path split by the `/` separator. + When specified, a trailing `/` is ignored. For + example, the paths `/abc`, `/abc/`, and `/abc/def` + would all match the prefix `/abc`, but the path + `/abcd` would not. \n ReplacePrefixMatch is + only compatible with a `PathPrefix` HTTPRouteMatch. + Using any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the Route + to `status: False`. \n Request Path | Prefix + Match | Replace Prefix | Modified Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | /xyz/ + \ | /xyz/bar /foo/bar | /foo/ | + /xyz | /xyz/bar /foo/bar | /foo/ + \ | /xyz/ | /xyz/bar /foo | + /foo | /xyz | /xyz /foo/ | + /foo | /xyz | /xyz/ /foo/bar + \ | /foo | | /bar + /foo/ | /foo | + | / /foo | /foo | + | / /foo/ | /foo | / | + / /foo | /foo | / | + /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path modifier. + Additional types may be added in a future release + of the API. \n Note that values may be added + to this enum, implementations must ensure that + unknown values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the Route + to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified when + type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? has(self.replaceFullPath) + : true' + - message: type must be 'ReplaceFullPath' when replaceFullPath + is set + rule: 'has(self.replaceFullPath) ? self.type == + ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified when + type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' ? has(self.replacePrefixMatch) + : true' + - message: type must be 'ReplacePrefixMatch' when + replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil if the + filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type != + ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type == + ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil if the + filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type != + ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for RequestMirror + filter.type + rule: '!(!has(self.requestMirror) && self.type == ''RequestMirror'')' + - message: filter.requestRedirect must be nil if the filter.type + is not RequestRedirect + rule: '!(has(self.requestRedirect) && self.type != ''RequestRedirect'')' + - message: filter.requestRedirect must be specified for RequestRedirect + filter.type + rule: '!(!has(self.requestRedirect) && self.type == ''RequestRedirect'')' + - message: filter.urlRewrite must be nil if the filter.type + is not URLRewrite + rule: '!(has(self.urlRewrite) && self.type != ''URLRewrite'')' + - message: filter.urlRewrite must be specified for URLRewrite + filter.type + rule: '!(!has(self.urlRewrite) && self.type == ''URLRewrite'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for ExtensionRef + filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') && + self.exists(f, f.type == ''URLRewrite''))' + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 + - message: RequestRedirect filter cannot be repeated + rule: self.filter(f, f.type == 'RequestRedirect').size() <= + 1 + - message: URLRewrite filter cannot be repeated + rule: self.filter(f, f.type == 'URLRewrite').size() <= 1 + matches: + default: + - path: + type: PathPrefix + value: / + description: "Matches define conditions used for matching the + rule against incoming HTTP requests. Each match is independent, + i.e. this rule will be matched if **any** one of the matches + is satisfied. \n For example, take the following matches configuration: + \n ``` matches: - path: value: \"/foo\" headers: - name: \"version\" + value: \"v2\" - path: value: \"/v2/foo\" ``` \n For a request + to match against this rule, a request must satisfy EITHER + of the two conditions: \n - path prefixed with `/foo` AND + contains the header `version: v2` - path prefix of `/v2/foo` + \n See the documentation for HTTPRouteMatch on how to specify + multiple match conditions that should be ANDed together. \n + If no matches are specified, the default is a prefix path + match on \"/\", which has the effect of matching every HTTP + request. \n Proxy or Load Balancer routing configuration generated + from HTTPRoutes MUST prioritize matches based on the following + criteria, continuing on ties. Across all rules specified on + applicable Routes, precedence must be given to the match having: + \n * \"Exact\" path match. * \"Prefix\" path match with largest + number of characters. * Method match. * Largest number of + header matches. * Largest number of query param matches. \n + Note: The precedence of RegularExpression path matches are + implementation-specific. \n If ties still exist across multiple + Routes, matching precedence MUST be determined in order of + the following criteria, continuing on ties: \n * The oldest + Route based on creation timestamp. * The Route appearing first + in alphabetical order by \"{namespace}/{name}\". \n If ties + still exist within an HTTPRoute, matching precedence MUST + be granted to the FIRST matching rule (in list order) with + a match meeting the above criteria. \n When no rules matching + a request have been successfully attached to the parent a + request is coming from, a HTTP 404 status code MUST be returned." + items: + description: "HTTPRouteMatch defines the predicate used to + match requests to a given action. Multiple match types are + ANDed together, i.e. the match will evaluate to true only + if all conditions are satisfied. \n For example, the match + below will match a HTTP request only if its path starts + with `/foo` AND it contains the `version: v1` header: \n + ``` match: \n path: value: \"/foo\" headers: - name: \"version\" + value \"v1\" \n ```" + properties: + headers: + description: Headers specifies HTTP request header matchers. + Multiple match values are ANDed together, meaning, a + request must match all the specified headers to select + the route. + items: + description: HTTPHeaderMatch describes how to select + a HTTP route by matching HTTP request headers. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case insensitive. + (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent header + names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST be + ignored. Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered equivalent. + \n When a header is repeated in an HTTP request, + it is implementation-specific behavior as to how + this is represented. Generally, proxies should + follow the guidance from the RFC: https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 + regarding processing a repeated header, with special + handling for \"Set-Cookie\"." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: + default: Exact + description: "Type specifies how to match against + the value of the header. \n Support: Core (Exact) + \n Support: Implementation-specific (RegularExpression) + \n Since RegularExpression HeaderMatchType has + implementation-specific conformance, implementations + can support POSIX, PCRE or any other dialects + of regular expressions. Please read the implementation's + documentation to determine the supported dialect." + enum: + - Exact + - RegularExpression + type: string + value: + description: Value is the value of HTTP Header to + be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + method: + description: "Method specifies HTTP method matcher. When + specified, this route will be matched only if the request + has the specified method. \n Support: Extended" + enum: + - GET + - HEAD + - POST + - PUT + - DELETE + - CONNECT + - OPTIONS + - TRACE + - PATCH + type: string + path: + default: + type: PathPrefix + value: / + description: Path specifies a HTTP request path matcher. + If this field is not specified, a default prefix match + on the "/" path is provided. + properties: + type: + default: PathPrefix + description: "Type specifies how to match against + the path Value. \n Support: Core (Exact, PathPrefix) + \n Support: Implementation-specific (RegularExpression)" + enum: + - Exact + - PathPrefix + - RegularExpression + type: string + value: + default: / + description: Value of the HTTP path to match against. + maxLength: 1024 + type: string + type: object + x-kubernetes-validations: + - message: value must be an absolute path and start with + '/' when type one of ['Exact', 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? self.value.startsWith(''/'') + : true' + - message: must not contain '//' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''//'') + : true' + - message: must not contain '/./' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''/./'') + : true' + - message: must not contain '/../' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''/../'') + : true' + - message: must not contain '%2f' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''%2f'') + : true' + - message: must not contain '%2F' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''%2F'') + : true' + - message: must not contain '#' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''#'') + : true' + - message: must not end with '/..' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.endsWith(''/..'') + : true' + - message: must not end with '/.' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.endsWith(''/.'') + : true' + - message: type must be one of ['Exact', 'PathPrefix', + 'RegularExpression'] + rule: self.type in ['Exact','PathPrefix'] || self.type + == 'RegularExpression' + - message: must only contain valid characters (matching + ^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$) + for types ['Exact', 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? self.value.matches(r"""^(?:[-A-Za-z0-9/._~!$&''()*+,;=:@]|[%][0-9a-fA-F]{2})+$""") + : true' + queryParams: + description: "QueryParams specifies HTTP query parameter + matchers. Multiple match values are ANDed together, + meaning, a request must match all the specified query + parameters to select the route. \n Support: Extended" + items: + description: HTTPQueryParamMatch describes how to select + a HTTP route by matching HTTP query parameters. + properties: + name: + description: "Name is the name of the HTTP query + param to be matched. This must be an exact string + match. (See https://tools.ietf.org/html/rfc7230#section-2.7.3). + \n If multiple entries specify equivalent query + param names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent query param name MUST + be ignored. \n If a query param is repeated in + an HTTP request, the behavior is purposely left + undefined, since different data planes have different + capabilities. However, it is *recommended* that + implementations should match against the first + value of the param if the data plane supports + it, as this behavior is expected in other load + balancing contexts outside of the Gateway API. + \n Users SHOULD NOT route traffic based on repeated + query params to guard themselves against potential + differences in the implementations." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: + default: Exact + description: "Type specifies how to match against + the value of the query parameter. \n Support: + Extended (Exact) \n Support: Implementation-specific + (RegularExpression) \n Since RegularExpression + QueryParamMatchType has Implementation-specific + conformance, implementations can support POSIX, + PCRE or any other dialects of regular expressions. + Please read the implementation's documentation + to determine the supported dialect." + enum: + - Exact + - RegularExpression + type: string + value: + description: Value is the value of HTTP query param + to be matched. + maxLength: 1024 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + maxItems: 8 + type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' + type: object + x-kubernetes-validations: + - message: RequestRedirect filter must not be used together with + backendRefs + rule: '(has(self.backendRefs) && size(self.backendRefs) > 0) ? + (!has(self.filters) || self.filters.all(f, !has(f.requestRedirect))): + true' + - message: When using RequestRedirect filter with path.replacePrefixMatch, + exactly one PathPrefix match must be specified + rule: '(has(self.filters) && self.filters.exists_one(f, has(f.requestRedirect) + && has(f.requestRedirect.path) && f.requestRedirect.path.type + == ''ReplacePrefixMatch'' && has(f.requestRedirect.path.replacePrefixMatch))) + ? ((size(self.matches) != 1 || !has(self.matches[0].path) || + self.matches[0].path.type != ''PathPrefix'') ? false : true) + : true' + - message: When using URLRewrite filter with path.replacePrefixMatch, + exactly one PathPrefix match must be specified + rule: '(has(self.filters) && self.filters.exists_one(f, has(f.urlRewrite) + && has(f.urlRewrite.path) && f.urlRewrite.path.type == ''ReplacePrefixMatch'' + && has(f.urlRewrite.path.replacePrefixMatch))) ? ((size(self.matches) + != 1 || !has(self.matches[0].path) || self.matches[0].path.type + != ''PathPrefix'') ? false : true) : true' + - message: Within backendRefs, when using RequestRedirect filter + with path.replacePrefixMatch, exactly one PathPrefix match must + be specified + rule: '(has(self.backendRefs) && self.backendRefs.exists_one(b, + (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) + && has(f.requestRedirect.path) && f.requestRedirect.path.type + == ''ReplacePrefixMatch'' && has(f.requestRedirect.path.replacePrefixMatch))) + )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) + || self.matches[0].path.type != ''PathPrefix'') ? false : true) + : true' + - message: Within backendRefs, When using URLRewrite filter with + path.replacePrefixMatch, exactly one PathPrefix match must be + specified + rule: '(has(self.backendRefs) && self.backendRefs.exists_one(b, + (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) + && has(f.urlRewrite.path) && f.urlRewrite.path.type == ''ReplacePrefixMatch'' + && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) + != 1 || !has(self.matches[0].path) || self.matches[0].path.type + != ''PathPrefix'') ? false : true) : true' + maxItems: 16 + type: array + type: object + status: + description: Status defines the current state of HTTPRoute. + properties: + parents: + description: "Parents is a list of parent resources (usually Gateways) + that are associated with the route, and the status of the route + with respect to each parent. When this route attaches to a parent, + the controller that manages the parent must add an entry to this + list when the controller first sees the route and should update + the entry as appropriate when the route or gateway is modified. + \n Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this + API can only populate Route status for the Gateways/parent resources + they are responsible for. \n A maximum of 32 Gateways will be represented + in this list. An empty list means the route has not been attached + to any Gateway." + items: + description: RouteParentStatus describes the status of a route with + respect to an associated Parent. + properties: + conditions: + description: "Conditions describes the status of the route with + respect to the Gateway. Note that the route's availability + is also subject to the Gateway's own status conditions and + listener status. \n If the Route's ParentRef specifies an + existing Gateway that supports Routes of this kind AND that + Gateway's controller has sufficient access, then that Gateway's + controller MUST set the \"Accepted\" condition on the Route, + to indicate whether the route has been accepted or rejected + by the Gateway, and why. \n A Route MUST be considered \"Accepted\" + if at least one of the Route's rules is implemented by the + Gateway. \n There are a number of cases where the \"Accepted\" + condition may not be set due to lack of controller visibility, + that includes when: \n * The Route refers to a non-existent + parent. * The Route is of a type that the controller does + not support. * The Route is in a namespace the controller + does not have access to." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: "ControllerName is a domain/path string that indicates + the name of the controller that wrote this status. This corresponds + with the controllerName field on GatewayClass. \n Example: + \"example.net/gateway-controller\". \n The format of this + field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid + Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + \n Controllers MUST populate this field when writing status. + Controllers should ensure that entries to status populated + with their ControllerName are cleaned up when they are no + longer necessary." + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: ParentRef corresponds with a ParentRef in the spec + that this RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the + core API group (such as for a \"Service\" kind referent), + Group must be explicitly set to \"\" (empty string). \n + Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. + When unspecified, this refers to the local namespace of + the Route. \n Note that there are specific rules for ParentRefs + which cross namespace boundaries. Cross-namespace references + are only valid if they are explicitly allowed by something + in the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides + a generic way to enable any other kind of cross-namespace + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. + It can be interpreted differently based on the type of + parent resource. \n When the parent resource is a Gateway, + this targets all listeners listening on the specified + port that also support this kind of Route(and select this + Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to + a specific port as opposed to a listener(s) whose port(s) + may be changed. When both Port and SectionName are specified, + the name and port of the selected listener must match + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting + other types of parent resources MUST clearly document + how/if Port is interpreted. \n For the purpose of status, + an attachment is considered successful as long as the + parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them + by Route kind, namespace, or hostname. If 1 of 2 Gateway + listeners accept attachment from the referencing Route, + the Route MUST be considered successfully attached. If + no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within + the target resource. In the following resources, SectionName + is interpreted as the following: \n * Gateway: Listener + Name. When both Port (experimental) and SectionName are + specified, the name and port of the selected listener + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this + will reference the entire resource. For the purpose of + status, an attachment is considered successful if at least + one section in the parent resource accepts it. For example, + Gateway listeners can restrict which Routes can attach + to them by Route kind, namespace, or hostname. If 1 of + 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.hostnames + name: Hostnames + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: HTTPRoute provides a way to route HTTP requests. This includes + the capability to match requests by hostname, path, header, or query param. + Filters can be used to specify additional processing steps. Backends specify + where matching requests should be routed. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of HTTPRoute. + properties: + hostnames: + description: "Hostnames defines a set of hostnames that should match + against the HTTP Host header to select a HTTPRoute used to process + the request. Implementations MUST ignore any port value specified + in the HTTP Host header while performing a match and (absent of + any applicable header modification configuration) MUST forward this + header unmodified to the backend. \n Valid values for Hostnames + are determined by RFC 1123 definition of a hostname with 2 notable + exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed + with a wildcard label (`*.`). The wildcard label must appear by + itself as the first label. \n If a hostname is specified by both + the Listener and HTTPRoute, there must be at least one intersecting + hostname for the HTTPRoute to be attached to the Listener. For example: + \n * A Listener with `test.example.com` as the hostname matches + HTTPRoutes that have either not specified any hostnames, or have + specified at least one of `test.example.com` or `*.example.com`. + * A Listener with `*.example.com` as the hostname matches HTTPRoutes + that have either not specified any hostnames or have specified at + least one hostname that matches the Listener hostname. For example, + `*.example.com`, `test.example.com`, and `foo.test.example.com` + would all match. On the other hand, `example.com` and `test.example.net` + would not match. \n Hostnames that are prefixed with a wildcard + label (`*.`) are interpreted as a suffix match. That means that + a match for `*.example.com` would match both `test.example.com`, + and `foo.test.example.com`, but not `example.com`. \n If both the + Listener and HTTPRoute have specified hostnames, any HTTPRoute hostnames + that do not match the Listener hostname MUST be ignored. For example, + if a Listener specified `*.example.com`, and the HTTPRoute specified + `test.example.com` and `test.example.net`, `test.example.net` must + not be considered for a match. \n If both the Listener and HTTPRoute + have specified hostnames, and none match with the criteria above, + then the HTTPRoute is not accepted. The implementation must raise + an 'Accepted' Condition with a status of `False` in the corresponding + RouteParentStatus. \n In the event that multiple HTTPRoutes specify + intersecting hostnames (e.g. overlapping wildcard matching and exact + matching hostnames), precedence must be given to rules from the + HTTPRoute with the largest number of: \n * Characters in a matching + non-wildcard hostname. * Characters in a matching hostname. \n If + ties exist across multiple Routes, the matching precedence rules + for HTTPRouteMatches takes over. \n Support: Core" + items: + description: "Hostname is the fully qualified domain name of a network + host. This matches the RFC 1123 definition of a hostname with + 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname + may be prefixed with a wildcard label (`*.`). The wildcard label + must appear by itself as the first label. \n Hostname can be \"precise\" + which is a domain name without the terminating dot of a network + host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain + name prefixed with a single wildcard label (e.g. `*.example.com`). + \n Note that as per RFC1035 and RFC1123, a *label* must consist + of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed." + maxLength: 253 + minLength: 1 + pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 16 + type: array + parentRefs: + description: "ParentRefs references the resources (usually Gateways) + that a Route wants to be attached to. Note that the referenced parent + resource needs to allow this for the attachment to be complete. + For Gateways, that means the Gateway needs to allow attachment from + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " + items: + description: "ParentReference identifies an API object (usually + a Gateway) that can be considered a parent of this resource (usually + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the core + API group (such as for a \"Service\" kind referent), Group + must be explicitly set to \"\" (empty string). \n Support: + Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. When + unspecified, this refers to the local namespace of the Route. + \n Note that there are specific rules for ParentRefs which + cross namespace boundaries. Cross-namespace references are + only valid if they are explicitly allowed by something in + the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. It + can be interpreted differently based on the type of parent + resource. \n When the parent resource is a Gateway, this targets + all listeners listening on the specified port that also support + this kind of Route(and select this Route). It's not recommended + to set `Port` unless the networking behaviors specified in + a Route must apply to a specific port as opposed to a listener(s) + whose port(s) may be changed. When both Port and SectionName + are specified, the name and port of the selected listener + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within the + target resource. In the following resources, SectionName is + interpreted as the following: \n * Gateway: Listener Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match both + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) + rules: + default: + - matches: + - path: + type: PathPrefix + value: / + description: Rules are a list of HTTP matchers, filters and actions. + items: + description: HTTPRouteRule defines semantics for matching an HTTP + request based on conditions (matches), processing it (filters), + and forwarding the request to an API object (backendRefs). + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. \n Failure behavior here depends + on how many BackendRefs are specified and how many are invalid. + \n If *all* entries in BackendRefs are invalid, and there + are also no filters specified in this route rule, *all* traffic + which matches this rule MUST receive a 500 status code. \n + See the HTTPBackendRef definition for the rules about what + makes a single HTTPBackendRef invalid. \n When a HTTPBackendRef + is invalid, 500 status codes MUST be returned for requests + that would have otherwise been routed to an invalid backend. + If multiple backends are specified, and some are invalid, + the proportion of requests that would otherwise have been + routed to an invalid backend MUST receive a 500 status code. + \n For example, if two backends are specified with equal weights, + and one is invalid, 50 percent of traffic must receive a 500. + Implementations may choose how that 50 percent is determined. + \n Support: Core for Kubernetes Service \n Support: Extended + for Kubernetes ServiceImport \n Support: Implementation-specific + for any other resource \n Support for weight: Core" + items: + description: "HTTPBackendRef defines how a HTTPRoute forwards + a HTTP request. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace to allow that + namespace's owner to accept the reference. See the ReferenceGrant + documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + " + properties: + filters: + description: "Filters defined at this level should be + executed if and only if the request is being forwarded + to the backend defined here. \n Support: Implementation-specific + (For broader support of filters, use the Filters field + in HTTPRouteRule.)" + items: + description: HTTPRouteFilter defines processing steps + that must be completed during the request or response + lifecycle. HTTPRouteFilters are meant as an extension + point to express processing that may be done in Gateway + implementations. Some examples include request or + response modification, implementing authentication + strategies, rate-limiting, and traffic shaping. API + guarantee/conformance is defined based on the type + of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n This filter can be used multiple times + within the same rule. \n Support: Implementation-specific" + properties: + group: + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API + group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For + example "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema + for a filter that modifies request headers. \n + Support: Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for + a filter that mirrors requests. Requests are sent + to the specified destination, but responses from + that destination are ignored. \n This filter can + be used multiple times within the same rule. Note + that not all implementations will be able to support + mirroring to multiple backends. \n Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource + where mirrored requests are sent. \n Mirrored + requests must be sent only to a single destination + endpoint within this BackendRef, irrespective + of how many endpoints are present within this + BackendRef. \n If the referent cannot be found, + this BackendRef is invalid and must be dropped + from the Gateway. The controller must ensure + the \"ResolvedRefs\" condition on the Route + status is set to `status: False` and not configure + this backend in the underlying implementation. + \n If there is a cross-namespace reference + to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route + is set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the + underlying implementation. \n In either error + case, the Message of the `ResolvedRefs` Condition + should be used to provide more detail about + the problem. \n Support: Extended for Kubernetes + Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". + When unspecified or empty string, core + API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to + CNAME DNS records that may live outside + of the cluster and as such are difficult + to reason about in terms of conformance. + They also may not be safe to forward to + (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with + a type other than ExternalName) \n Support: + Implementation-specific (Services with + type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace + of the backend. When unspecified, the + local namespace is inferred. \n Note that + when a namespace different than the local + namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant + documentation for details. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination + port number to use for this resource. + Port is required when the referent is + a Kubernetes Service. In this case, the + port number is the service port number, + not the target port. For other resources, + destination port might be derived from + the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind + == ''Service'') ? has(self.port) : true' + required: + - backendRef + type: object + requestRedirect: + description: "RequestRedirect defines a schema for + a filter that responds to the request with an + HTTP redirection. \n Support: Core" + properties: + hostname: + description: "Hostname is the hostname to be + used in the value of the `Location` header + in the response. When empty, the hostname + in the `Host` header of the request is used. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines parameters used to + modify the path of the incoming request. The + modified path is then used to construct the + `Location` header. When empty, the request + path is used as-is. \n Support: Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the + value with which to replace the full path + of a request during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies + the value with which to replace the prefix + match of a request during a rewrite or + redirect. For example, a request to \"/foo/bar\" + with a prefix match of \"/foo\" and a + ReplacePrefixMatch of \"/xyz\" would be + modified to \"/xyz/bar\". \n Note that + this matches the behavior of the PathPrefix + match type. This matches full path elements. + A path element refers to the list of labels + in the path split by the `/` separator. + When specified, a trailing `/` is ignored. + For example, the paths `/abc`, `/abc/`, + and `/abc/def` would all match the prefix + `/abc`, but the path `/abcd` would not. + \n ReplacePrefixMatch is only compatible + with a `PathPrefix` HTTPRouteMatch. Using + any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the + Route to `status: False`. \n Request Path + | Prefix Match | Replace Prefix | Modified + Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | + /xyz/ | /xyz/bar /foo/bar | + /foo/ | /xyz | /xyz/bar + /foo/bar | /foo/ | /xyz/ | + /xyz/bar /foo | /foo | + /xyz | /xyz /foo/ | /foo + \ | /xyz | /xyz/ /foo/bar + \ | /foo | | + /bar /foo/ | /foo | | / /foo | /foo | + | / /foo/ | /foo + \ | / | / /foo | + /foo | / | /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path + modifier. Additional types may be added + in a future release of the API. \n Note + that values may be added to this enum, + implementations must ensure that unknown + values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the + Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified + when type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? + has(self.replaceFullPath) : true' + - message: type must be 'ReplaceFullPath' when + replaceFullPath is set + rule: 'has(self.replaceFullPath) ? self.type + == ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified + when type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' + ? has(self.replacePrefixMatch) : true' + - message: type must be 'ReplacePrefixMatch' + when replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + port: + description: "Port is the port to be used in + the value of the `Location` header in the + response. \n If no port is specified, the + redirect port MUST be derived using the following + rules: \n * If redirect scheme is not-empty, + the redirect port MUST be the well-known port + associated with the redirect scheme. Specifically + \"http\" to port 80 and \"https\" to port + 443. If the redirect scheme does not have + a well-known port, the listener port of the + Gateway SHOULD be used. * If redirect scheme + is empty, the redirect port MUST be the Gateway + Listener port. \n Implementations SHOULD NOT + add the port number in the 'Location' header + in the following cases: \n * A Location header + that will use HTTP (whether that is determined + via the Listener protocol or the Scheme field) + _and_ use port 80. * A Location header that + will use HTTPS (whether that is determined + via the Listener protocol or the Scheme field) + _and_ use port 443. \n Support: Extended" + format: int32 + maximum: 65535 + minimum: 1 + type: integer + scheme: + description: "Scheme is the scheme to be used + in the value of the `Location` header in the + response. When empty, the scheme of the request + is used. \n Scheme redirects can affect the + port of the redirect, for more information, + refer to the documentation for the port field + of this filter. \n Note that values may be + added to this enum, implementations must ensure + that unknown values will not cause a crash. + \n Unknown values here must result in the + implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason + of `UnsupportedValue`. \n Support: Extended" + enum: + - http + - https + type: string + statusCode: + default: 302 + description: "StatusCode is the HTTP status + code to be used in response. \n Note that + values may be added to this enum, implementations + must ensure that unknown values will not cause + a crash. \n Unknown values here must result + in the implementation setting the Accepted + Condition for the Route to `status: False`, + with a Reason of `UnsupportedValue`. \n Support: + Core" + enum: + - 301 + - 302 + type: integer + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n + Support: Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It + appends to any existing values associated + with the header name. \n Input: GET /foo HTTP/1.1 + my-header: foo \n Config: add: - name: \"my-header\" + value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 + my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from + the HTTP request before the action. The value + of Remove is a list of HTTP header names. + Note that the header names are case-insensitive + (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo + my-header2: bar my-header3: baz \n Config: + remove: [\"my-header1\", \"my-header3\"] \n + Output: GET /foo HTTP/1.1 my-header2: bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with + the given header (name, value) before the + action. \n Input: GET /foo HTTP/1.1 my-header: + foo \n Config: set: - name: \"my-header\" + value: \"bar\" \n Output: GET /foo HTTP/1.1 + my-header: bar" + items: + description: HTTPHeader represents an HTTP + Header name and value as defined by RFC + 7230. + properties: + name: + description: "Name is the name of the + HTTP Header to be matched. Name matching + MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an + equivalent name MUST be considered for + a match. Subsequent entries with an + equivalent header name MUST be ignored. + Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP + Header to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter + to apply. As with other API fields, types are + classified into three conformance levels: \n - + Core: Filter types and their corresponding configuration + defined by \"Support: Core\" in this package, + e.g. \"RequestHeaderModifier\". All implementations + must support core filters. \n - Extended: Filter + types and their corresponding configuration defined + by \"Support: Extended\" in this package, e.g. + \"RequestMirror\". Implementers are encouraged + to support extended filters. \n - Implementation-specific: + Filters that are defined and supported by specific + vendors. In the future, filters showing convergence + in behavior across multiple implementations will + be considered for inclusion in extended or core + conformance levels. Filter-specific configuration + for such filters is specified using the ExtensionRef + field. `Type` should be set to \"ExtensionRef\" + for custom filters. \n Implementers are encouraged + to define custom implementation types to extend + the core API with implementation-specific behavior. + \n If a reference to a custom filter type cannot + be resolved, the filter MUST NOT be skipped. Instead, + requests that would have been processed by that + filter MUST receive a HTTP error response. \n + Note that values may be added to this enum, implementations + must ensure that unknown values will not cause + a crash. \n Unknown values here must result in + the implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - RequestHeaderModifier + - ResponseHeaderModifier + - RequestMirror + - RequestRedirect + - URLRewrite + - ExtensionRef + type: string + urlRewrite: + description: "URLRewrite defines a schema for a + filter that modifies a request during forwarding. + \n Support: Extended" + properties: + hostname: + description: "Hostname is the value to be used + to replace the Host header value during forwarding. + \n Support: Extended" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines a path rewrite. \n + Support: Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the + value with which to replace the full path + of a request during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies + the value with which to replace the prefix + match of a request during a rewrite or + redirect. For example, a request to \"/foo/bar\" + with a prefix match of \"/foo\" and a + ReplacePrefixMatch of \"/xyz\" would be + modified to \"/xyz/bar\". \n Note that + this matches the behavior of the PathPrefix + match type. This matches full path elements. + A path element refers to the list of labels + in the path split by the `/` separator. + When specified, a trailing `/` is ignored. + For example, the paths `/abc`, `/abc/`, + and `/abc/def` would all match the prefix + `/abc`, but the path `/abcd` would not. + \n ReplacePrefixMatch is only compatible + with a `PathPrefix` HTTPRouteMatch. Using + any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the + Route to `status: False`. \n Request Path + | Prefix Match | Replace Prefix | Modified + Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | + /xyz/ | /xyz/bar /foo/bar | + /foo/ | /xyz | /xyz/bar + /foo/bar | /foo/ | /xyz/ | + /xyz/bar /foo | /foo | + /xyz | /xyz /foo/ | /foo + \ | /xyz | /xyz/ /foo/bar + \ | /foo | | + /bar /foo/ | /foo | | / /foo | /foo | + | / /foo/ | /foo + \ | / | / /foo | + /foo | / | /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path + modifier. Additional types may be added + in a future release of the API. \n Note + that values may be added to this enum, + implementations must ensure that unknown + values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the + Route to `status: False`, with a Reason + of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified + when type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? + has(self.replaceFullPath) : true' + - message: type must be 'ReplaceFullPath' when + replaceFullPath is set + rule: 'has(self.replaceFullPath) ? self.type + == ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified + when type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' + ? has(self.replacePrefixMatch) : true' + - message: type must be 'ReplacePrefixMatch' + when replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil + if the filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type + != ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type + == ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil + if the filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type + != ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for + RequestMirror filter.type + rule: '!(!has(self.requestMirror) && self.type == + ''RequestMirror'')' + - message: filter.requestRedirect must be nil if the + filter.type is not RequestRedirect + rule: '!(has(self.requestRedirect) && self.type != + ''RequestRedirect'')' + - message: filter.requestRedirect must be specified + for RequestRedirect filter.type + rule: '!(!has(self.requestRedirect) && self.type == + ''RequestRedirect'')' + - message: filter.urlRewrite must be nil if the filter.type + is not URLRewrite + rule: '!(has(self.urlRewrite) && self.type != ''URLRewrite'')' + - message: filter.urlRewrite must be specified for URLRewrite + filter.type + rule: '!(!has(self.urlRewrite) && self.type == ''URLRewrite'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for + ExtensionRef filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') + && self.exists(f, f.type == ''URLRewrite''))' + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') + && self.exists(f, f.type == ''URLRewrite''))' + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 + - message: RequestRedirect filter cannot be repeated + rule: self.filter(f, f.type == 'RequestRedirect').size() + <= 1 + - message: URLRewrite filter cannot be repeated + rule: self.filter(f, f.type == 'URLRewrite').size() + <= 1 + group: + default: "" + description: Group is the group of the referent. For example, + "gateway.networking.k8s.io". When unspecified or empty + string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the backend. + When unspecified, the local namespace is inferred. \n + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port number + to use for this resource. Port is required when the + referent is a Kubernetes Service. In this case, the + port number is the service port number, not the target + port. For other resources, destination port might be + derived from the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + weight: + default: 1 + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1. \n Support for this field varies based + on the context where used." + format: int32 + maximum: 1000000 + minimum: 0 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + maxItems: 16 + type: array + filters: + description: "Filters define the filters that are applied to + requests that match this rule. \n The effects of ordering + of multiple behaviors are currently unspecified. This can + change in the future based on feedback during the alpha stage. + \n Conformance-levels at this level are defined based on the + type of filter: \n - ALL core filters MUST be supported by + all implementations. - Implementers are encouraged to support + extended filters. - Implementation-specific custom filters + have no API guarantees across implementations. \n Specifying + the same filter multiple times is not supported unless explicitly + indicated in the filter. \n All filters are expected to be + compatible with each other except for the URLRewrite and RequestRedirect + filters, which may not be combined. If an implementation can + not support other combinations of filters, they must clearly + document that limitation. In cases where incompatible or unsupported + filters are specified and cause the `Accepted` condition to + be set to status `False`, implementations may use the `IncompatibleFilters` + reason to specify this configuration error. \n Support: Core" + items: + description: HTTPRouteFilter defines processing steps that + must be completed during the request or response lifecycle. + HTTPRouteFilters are meant as an extension point to express + processing that may be done in Gateway implementations. + Some examples include request or response modification, + implementing authentication strategies, rate-limiting, and + traffic shaping. API guarantee/conformance is defined based + on the type of the filter. + properties: + extensionRef: + description: "ExtensionRef is an optional, implementation-specific + extension to the \"filter\" behavior. For example, + resource \"myroutefilter\" in group \"networking.example.net\"). + ExtensionRef MUST NOT be used for core and extended + filters. \n This filter can be used multiple times within + the same rule. \n Support: Implementation-specific" + properties: + group: + description: Group is the group of the referent. For + example, "gateway.networking.k8s.io". When unspecified + or empty string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: Kind is kind of the referent. For example + "HTTPRoute" or "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + - name + type: object + requestHeaderModifier: + description: "RequestHeaderModifier defines a schema for + a filter that modifies request headers. \n Support: + Core" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + requestMirror: + description: "RequestMirror defines a schema for a filter + that mirrors requests. Requests are sent to the specified + destination, but responses from that destination are + ignored. \n This filter can be used multiple times within + the same rule. Note that not all implementations will + be able to support mirroring to multiple backends. \n + Support: Extended" + properties: + backendRef: + description: "BackendRef references a resource where + mirrored requests are sent. \n Mirrored requests + must be sent only to a single destination endpoint + within this BackendRef, irrespective of how many + endpoints are present within this BackendRef. \n + If the referent cannot be found, this BackendRef + is invalid and must be dropped from the Gateway. + The controller must ensure the \"ResolvedRefs\" + condition on the Route status is set to `status: + False` and not configure this backend in the underlying + implementation. \n If there is a cross-namespace + reference to an *existing* object that is not allowed + by a ReferenceGrant, the controller must ensure + the \"ResolvedRefs\" condition on the Route is + set to `status: False`, with the \"RefNotPermitted\" + reason and not configure this backend in the underlying + implementation. \n In either error case, the Message + of the `ResolvedRefs` Condition should be used to + provide more detail about the problem. \n Support: + Extended for Kubernetes Service \n Support: Implementation-specific + for any other resource" + properties: + group: + default: "" + description: Group is the group of the referent. + For example, "gateway.networking.k8s.io". When + unspecified or empty string, core API group + is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource + kind of the referent. For example \"Service\". + \n Defaults to \"Service\" when not specified. + \n ExternalName services can refer to CNAME + DNS records that may live outside of the cluster + and as such are difficult to reason about in + terms of conformance. They also may not be safe + to forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName + Services. \n Support: Core (Services with a + type other than ExternalName) \n Support: Implementation-specific + (Services with type ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the + backend. When unspecified, the local namespace + is inferred. \n Note that when a namespace different + than the local namespace is specified, a ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port + number to use for this resource. Port is required + when the referent is a Kubernetes Service. In + this case, the port number is the service port + number, not the target port. For other resources, + destination port might be derived from the referent + resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + required: + - backendRef + type: object + requestRedirect: + description: "RequestRedirect defines a schema for a filter + that responds to the request with an HTTP redirection. + \n Support: Core" + properties: + hostname: + description: "Hostname is the hostname to be used + in the value of the `Location` header in the response. + When empty, the hostname in the `Host` header of + the request is used. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines parameters used to modify + the path of the incoming request. The modified path + is then used to construct the `Location` header. + When empty, the request path is used as-is. \n Support: + Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the value + with which to replace the full path of a request + during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies the + value with which to replace the prefix match + of a request during a rewrite or redirect. For + example, a request to \"/foo/bar\" with a prefix + match of \"/foo\" and a ReplacePrefixMatch of + \"/xyz\" would be modified to \"/xyz/bar\". + \n Note that this matches the behavior of the + PathPrefix match type. This matches full path + elements. A path element refers to the list + of labels in the path split by the `/` separator. + When specified, a trailing `/` is ignored. For + example, the paths `/abc`, `/abc/`, and `/abc/def` + would all match the prefix `/abc`, but the path + `/abcd` would not. \n ReplacePrefixMatch is + only compatible with a `PathPrefix` HTTPRouteMatch. + Using any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the Route + to `status: False`. \n Request Path | Prefix + Match | Replace Prefix | Modified Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | /xyz/ + \ | /xyz/bar /foo/bar | /foo/ | + /xyz | /xyz/bar /foo/bar | /foo/ + \ | /xyz/ | /xyz/bar /foo | + /foo | /xyz | /xyz /foo/ | + /foo | /xyz | /xyz/ /foo/bar + \ | /foo | | /bar + /foo/ | /foo | + | / /foo | /foo | + | / /foo/ | /foo | / | + / /foo | /foo | / | + /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path modifier. + Additional types may be added in a future release + of the API. \n Note that values may be added + to this enum, implementations must ensure that + unknown values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the Route + to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified when + type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? has(self.replaceFullPath) + : true' + - message: type must be 'ReplaceFullPath' when replaceFullPath + is set + rule: 'has(self.replaceFullPath) ? self.type == + ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified when + type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' ? has(self.replacePrefixMatch) + : true' + - message: type must be 'ReplacePrefixMatch' when + replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + port: + description: "Port is the port to be used in the value + of the `Location` header in the response. \n If + no port is specified, the redirect port MUST be + derived using the following rules: \n * If redirect + scheme is not-empty, the redirect port MUST be the + well-known port associated with the redirect scheme. + Specifically \"http\" to port 80 and \"https\" to + port 443. If the redirect scheme does not have a + well-known port, the listener port of the Gateway + SHOULD be used. * If redirect scheme is empty, the + redirect port MUST be the Gateway Listener port. + \n Implementations SHOULD NOT add the port number + in the 'Location' header in the following cases: + \n * A Location header that will use HTTP (whether + that is determined via the Listener protocol or + the Scheme field) _and_ use port 80. * A Location + header that will use HTTPS (whether that is determined + via the Listener protocol or the Scheme field) _and_ + use port 443. \n Support: Extended" + format: int32 + maximum: 65535 + minimum: 1 + type: integer + scheme: + description: "Scheme is the scheme to be used in the + value of the `Location` header in the response. + When empty, the scheme of the request is used. \n + Scheme redirects can affect the port of the redirect, + for more information, refer to the documentation + for the port field of this filter. \n Note that + values may be added to this enum, implementations + must ensure that unknown values will not cause a + crash. \n Unknown values here must result in the + implementation setting the Accepted Condition for + the Route to `status: False`, with a Reason of `UnsupportedValue`. + \n Support: Extended" + enum: + - http + - https + type: string + statusCode: + default: 302 + description: "StatusCode is the HTTP status code to + be used in response. \n Note that values may be + added to this enum, implementations must ensure + that unknown values will not cause a crash. \n Unknown + values here must result in the implementation setting + the Accepted Condition for the Route to `status: + False`, with a Reason of `UnsupportedValue`. \n + Support: Core" + enum: + - 301 + - 302 + type: integer + type: object + responseHeaderModifier: + description: "ResponseHeaderModifier defines a schema + for a filter that modifies response headers. \n Support: + Extended" + properties: + add: + description: "Add adds the given header(s) (name, + value) to the request before the action. It appends + to any existing values associated with the header + name. \n Input: GET /foo HTTP/1.1 my-header: foo + \n Config: add: - name: \"my-header\" value: \"bar,baz\" + \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + remove: + description: "Remove the given header(s) from the + HTTP request before the action. The value of Remove + is a list of HTTP header names. Note that the header + names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). + \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: + bar my-header3: baz \n Config: remove: [\"my-header1\", + \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: + bar" + items: + type: string + maxItems: 16 + type: array + x-kubernetes-list-type: set + set: + description: "Set overwrites the request with the + given header (name, value) before the action. \n + Input: GET /foo HTTP/1.1 my-header: foo \n Config: + set: - name: \"my-header\" value: \"bar\" \n Output: + GET /foo HTTP/1.1 my-header: bar" + items: + description: HTTPHeader represents an HTTP Header + name and value as defined by RFC 7230. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case + insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent + header names, the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST + be ignored. Due to the case-insensitivity + of header names, \"foo\" and \"Foo\" are considered + equivalent." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + value: + description: Value is the value of HTTP Header + to be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: "Type identifies the type of filter to apply. + As with other API fields, types are classified into + three conformance levels: \n - Core: Filter types and + their corresponding configuration defined by \"Support: + Core\" in this package, e.g. \"RequestHeaderModifier\". + All implementations must support core filters. \n - + Extended: Filter types and their corresponding configuration + defined by \"Support: Extended\" in this package, e.g. + \"RequestMirror\". Implementers are encouraged to support + extended filters. \n - Implementation-specific: Filters + that are defined and supported by specific vendors. + In the future, filters showing convergence in behavior + across multiple implementations will be considered for + inclusion in extended or core conformance levels. Filter-specific + configuration for such filters is specified using the + ExtensionRef field. `Type` should be set to \"ExtensionRef\" + for custom filters. \n Implementers are encouraged to + define custom implementation types to extend the core + API with implementation-specific behavior. \n If a reference + to a custom filter type cannot be resolved, the filter + MUST NOT be skipped. Instead, requests that would have + been processed by that filter MUST receive a HTTP error + response. \n Note that values may be added to this enum, + implementations must ensure that unknown values will + not cause a crash. \n Unknown values here must result + in the implementation setting the Accepted Condition + for the Route to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - RequestHeaderModifier + - ResponseHeaderModifier + - RequestMirror + - RequestRedirect + - URLRewrite + - ExtensionRef + type: string + urlRewrite: + description: "URLRewrite defines a schema for a filter + that modifies a request during forwarding. \n Support: + Extended" + properties: + hostname: + description: "Hostname is the value to be used to + replace the Host header value during forwarding. + \n Support: Extended" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + path: + description: "Path defines a path rewrite. \n Support: + Extended" + properties: + replaceFullPath: + description: ReplaceFullPath specifies the value + with which to replace the full path of a request + during a rewrite or redirect. + maxLength: 1024 + type: string + replacePrefixMatch: + description: "ReplacePrefixMatch specifies the + value with which to replace the prefix match + of a request during a rewrite or redirect. For + example, a request to \"/foo/bar\" with a prefix + match of \"/foo\" and a ReplacePrefixMatch of + \"/xyz\" would be modified to \"/xyz/bar\". + \n Note that this matches the behavior of the + PathPrefix match type. This matches full path + elements. A path element refers to the list + of labels in the path split by the `/` separator. + When specified, a trailing `/` is ignored. For + example, the paths `/abc`, `/abc/`, and `/abc/def` + would all match the prefix `/abc`, but the path + `/abcd` would not. \n ReplacePrefixMatch is + only compatible with a `PathPrefix` HTTPRouteMatch. + Using any other HTTPRouteMatch type on the same + HTTPRouteRule will result in the implementation + setting the Accepted Condition for the Route + to `status: False`. \n Request Path | Prefix + Match | Replace Prefix | Modified Path -------------|--------------|----------------|---------- + /foo/bar | /foo | /xyz | + /xyz/bar /foo/bar | /foo | /xyz/ + \ | /xyz/bar /foo/bar | /foo/ | + /xyz | /xyz/bar /foo/bar | /foo/ + \ | /xyz/ | /xyz/bar /foo | + /foo | /xyz | /xyz /foo/ | + /foo | /xyz | /xyz/ /foo/bar + \ | /foo | | /bar + /foo/ | /foo | + | / /foo | /foo | + | / /foo/ | /foo | / | + / /foo | /foo | / | + /" + maxLength: 1024 + type: string + type: + description: "Type defines the type of path modifier. + Additional types may be added in a future release + of the API. \n Note that values may be added + to this enum, implementations must ensure that + unknown values will not cause a crash. \n Unknown + values here must result in the implementation + setting the Accepted Condition for the Route + to `status: False`, with a Reason of `UnsupportedValue`." + enum: + - ReplaceFullPath + - ReplacePrefixMatch + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: replaceFullPath must be specified when + type is set to 'ReplaceFullPath' + rule: 'self.type == ''ReplaceFullPath'' ? has(self.replaceFullPath) + : true' + - message: type must be 'ReplaceFullPath' when replaceFullPath + is set + rule: 'has(self.replaceFullPath) ? self.type == + ''ReplaceFullPath'' : true' + - message: replacePrefixMatch must be specified when + type is set to 'ReplacePrefixMatch' + rule: 'self.type == ''ReplacePrefixMatch'' ? has(self.replacePrefixMatch) + : true' + - message: type must be 'ReplacePrefixMatch' when + replacePrefixMatch is set + rule: 'has(self.replacePrefixMatch) ? self.type + == ''ReplacePrefixMatch'' : true' + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: filter.requestHeaderModifier must be nil if the + filter.type is not RequestHeaderModifier + rule: '!(has(self.requestHeaderModifier) && self.type != + ''RequestHeaderModifier'')' + - message: filter.requestHeaderModifier must be specified + for RequestHeaderModifier filter.type + rule: '!(!has(self.requestHeaderModifier) && self.type == + ''RequestHeaderModifier'')' + - message: filter.responseHeaderModifier must be nil if the + filter.type is not ResponseHeaderModifier + rule: '!(has(self.responseHeaderModifier) && self.type != + ''ResponseHeaderModifier'')' + - message: filter.responseHeaderModifier must be specified + for ResponseHeaderModifier filter.type + rule: '!(!has(self.responseHeaderModifier) && self.type + == ''ResponseHeaderModifier'')' + - message: filter.requestMirror must be nil if the filter.type + is not RequestMirror + rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')' + - message: filter.requestMirror must be specified for RequestMirror + filter.type + rule: '!(!has(self.requestMirror) && self.type == ''RequestMirror'')' + - message: filter.requestRedirect must be nil if the filter.type + is not RequestRedirect + rule: '!(has(self.requestRedirect) && self.type != ''RequestRedirect'')' + - message: filter.requestRedirect must be specified for RequestRedirect + filter.type + rule: '!(!has(self.requestRedirect) && self.type == ''RequestRedirect'')' + - message: filter.urlRewrite must be nil if the filter.type + is not URLRewrite + rule: '!(has(self.urlRewrite) && self.type != ''URLRewrite'')' + - message: filter.urlRewrite must be specified for URLRewrite + filter.type + rule: '!(!has(self.urlRewrite) && self.type == ''URLRewrite'')' + - message: filter.extensionRef must be nil if the filter.type + is not ExtensionRef + rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')' + - message: filter.extensionRef must be specified for ExtensionRef + filter.type + rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')' + maxItems: 16 + type: array + x-kubernetes-validations: + - message: May specify either httpRouteFilterRequestRedirect + or httpRouteFilterRequestRewrite, but not both + rule: '!(self.exists(f, f.type == ''RequestRedirect'') && + self.exists(f, f.type == ''URLRewrite''))' + - message: RequestHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'RequestHeaderModifier').size() + <= 1 + - message: ResponseHeaderModifier filter cannot be repeated + rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() + <= 1 + - message: RequestRedirect filter cannot be repeated + rule: self.filter(f, f.type == 'RequestRedirect').size() <= + 1 + - message: URLRewrite filter cannot be repeated + rule: self.filter(f, f.type == 'URLRewrite').size() <= 1 + matches: + default: + - path: + type: PathPrefix + value: / + description: "Matches define conditions used for matching the + rule against incoming HTTP requests. Each match is independent, + i.e. this rule will be matched if **any** one of the matches + is satisfied. \n For example, take the following matches configuration: + \n ``` matches: - path: value: \"/foo\" headers: - name: \"version\" + value: \"v2\" - path: value: \"/v2/foo\" ``` \n For a request + to match against this rule, a request must satisfy EITHER + of the two conditions: \n - path prefixed with `/foo` AND + contains the header `version: v2` - path prefix of `/v2/foo` + \n See the documentation for HTTPRouteMatch on how to specify + multiple match conditions that should be ANDed together. \n + If no matches are specified, the default is a prefix path + match on \"/\", which has the effect of matching every HTTP + request. \n Proxy or Load Balancer routing configuration generated + from HTTPRoutes MUST prioritize matches based on the following + criteria, continuing on ties. Across all rules specified on + applicable Routes, precedence must be given to the match having: + \n * \"Exact\" path match. * \"Prefix\" path match with largest + number of characters. * Method match. * Largest number of + header matches. * Largest number of query param matches. \n + Note: The precedence of RegularExpression path matches are + implementation-specific. \n If ties still exist across multiple + Routes, matching precedence MUST be determined in order of + the following criteria, continuing on ties: \n * The oldest + Route based on creation timestamp. * The Route appearing first + in alphabetical order by \"{namespace}/{name}\". \n If ties + still exist within an HTTPRoute, matching precedence MUST + be granted to the FIRST matching rule (in list order) with + a match meeting the above criteria. \n When no rules matching + a request have been successfully attached to the parent a + request is coming from, a HTTP 404 status code MUST be returned." + items: + description: "HTTPRouteMatch defines the predicate used to + match requests to a given action. Multiple match types are + ANDed together, i.e. the match will evaluate to true only + if all conditions are satisfied. \n For example, the match + below will match a HTTP request only if its path starts + with `/foo` AND it contains the `version: v1` header: \n + ``` match: \n path: value: \"/foo\" headers: - name: \"version\" + value \"v1\" \n ```" + properties: + headers: + description: Headers specifies HTTP request header matchers. + Multiple match values are ANDed together, meaning, a + request must match all the specified headers to select + the route. + items: + description: HTTPHeaderMatch describes how to select + a HTTP route by matching HTTP request headers. + properties: + name: + description: "Name is the name of the HTTP Header + to be matched. Name matching MUST be case insensitive. + (See https://tools.ietf.org/html/rfc7230#section-3.2). + \n If multiple entries specify equivalent header + names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent header name MUST be + ignored. Due to the case-insensitivity of header + names, \"foo\" and \"Foo\" are considered equivalent. + \n When a header is repeated in an HTTP request, + it is implementation-specific behavior as to how + this is represented. Generally, proxies should + follow the guidance from the RFC: https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 + regarding processing a repeated header, with special + handling for \"Set-Cookie\"." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: + default: Exact + description: "Type specifies how to match against + the value of the header. \n Support: Core (Exact) + \n Support: Implementation-specific (RegularExpression) + \n Since RegularExpression HeaderMatchType has + implementation-specific conformance, implementations + can support POSIX, PCRE or any other dialects + of regular expressions. Please read the implementation's + documentation to determine the supported dialect." + enum: + - Exact + - RegularExpression + type: string + value: + description: Value is the value of HTTP Header to + be matched. + maxLength: 4096 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + method: + description: "Method specifies HTTP method matcher. When + specified, this route will be matched only if the request + has the specified method. \n Support: Extended" + enum: + - GET + - HEAD + - POST + - PUT + - DELETE + - CONNECT + - OPTIONS + - TRACE + - PATCH + type: string + path: + default: + type: PathPrefix + value: / + description: Path specifies a HTTP request path matcher. + If this field is not specified, a default prefix match + on the "/" path is provided. + properties: + type: + default: PathPrefix + description: "Type specifies how to match against + the path Value. \n Support: Core (Exact, PathPrefix) + \n Support: Implementation-specific (RegularExpression)" + enum: + - Exact + - PathPrefix + - RegularExpression + type: string + value: + default: / + description: Value of the HTTP path to match against. + maxLength: 1024 + type: string + type: object + x-kubernetes-validations: + - message: value must be an absolute path and start with + '/' when type one of ['Exact', 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? self.value.startsWith(''/'') + : true' + - message: must not contain '//' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''//'') + : true' + - message: must not contain '/./' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''/./'') + : true' + - message: must not contain '/../' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''/../'') + : true' + - message: must not contain '%2f' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''%2f'') + : true' + - message: must not contain '%2F' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''%2F'') + : true' + - message: must not contain '#' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.contains(''#'') + : true' + - message: must not end with '/..' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.endsWith(''/..'') + : true' + - message: must not end with '/.' when type one of ['Exact', + 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? !self.value.endsWith(''/.'') + : true' + - message: type must be one of ['Exact', 'PathPrefix', + 'RegularExpression'] + rule: self.type in ['Exact','PathPrefix'] || self.type + == 'RegularExpression' + - message: must only contain valid characters (matching + ^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$) + for types ['Exact', 'PathPrefix'] + rule: '(self.type in [''Exact'',''PathPrefix'']) ? self.value.matches(r"""^(?:[-A-Za-z0-9/._~!$&''()*+,;=:@]|[%][0-9a-fA-F]{2})+$""") + : true' + queryParams: + description: "QueryParams specifies HTTP query parameter + matchers. Multiple match values are ANDed together, + meaning, a request must match all the specified query + parameters to select the route. \n Support: Extended" + items: + description: HTTPQueryParamMatch describes how to select + a HTTP route by matching HTTP query parameters. + properties: + name: + description: "Name is the name of the HTTP query + param to be matched. This must be an exact string + match. (See https://tools.ietf.org/html/rfc7230#section-2.7.3). + \n If multiple entries specify equivalent query + param names, only the first entry with an equivalent + name MUST be considered for a match. Subsequent + entries with an equivalent query param name MUST + be ignored. \n If a query param is repeated in + an HTTP request, the behavior is purposely left + undefined, since different data planes have different + capabilities. However, it is *recommended* that + implementations should match against the first + value of the param if the data plane supports + it, as this behavior is expected in other load + balancing contexts outside of the Gateway API. + \n Users SHOULD NOT route traffic based on repeated + query params to guard themselves against potential + differences in the implementations." + maxLength: 256 + minLength: 1 + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: + default: Exact + description: "Type specifies how to match against + the value of the query parameter. \n Support: + Extended (Exact) \n Support: Implementation-specific + (RegularExpression) \n Since RegularExpression + QueryParamMatchType has Implementation-specific + conformance, implementations can support POSIX, + PCRE or any other dialects of regular expressions. + Please read the implementation's documentation + to determine the supported dialect." + enum: + - Exact + - RegularExpression + type: string + value: + description: Value is the value of HTTP query param + to be matched. + maxLength: 1024 + minLength: 1 + type: string + required: + - name + - value + type: object + maxItems: 16 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + maxItems: 8 + type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' + type: object + x-kubernetes-validations: + - message: RequestRedirect filter must not be used together with + backendRefs + rule: '(has(self.backendRefs) && size(self.backendRefs) > 0) ? + (!has(self.filters) || self.filters.all(f, !has(f.requestRedirect))): + true' + - message: When using RequestRedirect filter with path.replacePrefixMatch, + exactly one PathPrefix match must be specified + rule: '(has(self.filters) && self.filters.exists_one(f, has(f.requestRedirect) + && has(f.requestRedirect.path) && f.requestRedirect.path.type + == ''ReplacePrefixMatch'' && has(f.requestRedirect.path.replacePrefixMatch))) + ? ((size(self.matches) != 1 || !has(self.matches[0].path) || + self.matches[0].path.type != ''PathPrefix'') ? false : true) + : true' + - message: When using URLRewrite filter with path.replacePrefixMatch, + exactly one PathPrefix match must be specified + rule: '(has(self.filters) && self.filters.exists_one(f, has(f.urlRewrite) + && has(f.urlRewrite.path) && f.urlRewrite.path.type == ''ReplacePrefixMatch'' + && has(f.urlRewrite.path.replacePrefixMatch))) ? ((size(self.matches) + != 1 || !has(self.matches[0].path) || self.matches[0].path.type + != ''PathPrefix'') ? false : true) : true' + - message: Within backendRefs, when using RequestRedirect filter + with path.replacePrefixMatch, exactly one PathPrefix match must + be specified + rule: '(has(self.backendRefs) && self.backendRefs.exists_one(b, + (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) + && has(f.requestRedirect.path) && f.requestRedirect.path.type + == ''ReplacePrefixMatch'' && has(f.requestRedirect.path.replacePrefixMatch))) + )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) + || self.matches[0].path.type != ''PathPrefix'') ? false : true) + : true' + - message: Within backendRefs, When using URLRewrite filter with + path.replacePrefixMatch, exactly one PathPrefix match must be + specified + rule: '(has(self.backendRefs) && self.backendRefs.exists_one(b, + (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) + && has(f.urlRewrite.path) && f.urlRewrite.path.type == ''ReplacePrefixMatch'' + && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) + != 1 || !has(self.matches[0].path) || self.matches[0].path.type + != ''PathPrefix'') ? false : true) : true' + maxItems: 16 + type: array + type: object + status: + description: Status defines the current state of HTTPRoute. + properties: + parents: + description: "Parents is a list of parent resources (usually Gateways) + that are associated with the route, and the status of the route + with respect to each parent. When this route attaches to a parent, + the controller that manages the parent must add an entry to this + list when the controller first sees the route and should update + the entry as appropriate when the route or gateway is modified. + \n Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this + API can only populate Route status for the Gateways/parent resources + they are responsible for. \n A maximum of 32 Gateways will be represented + in this list. An empty list means the route has not been attached + to any Gateway." + items: + description: RouteParentStatus describes the status of a route with + respect to an associated Parent. + properties: + conditions: + description: "Conditions describes the status of the route with + respect to the Gateway. Note that the route's availability + is also subject to the Gateway's own status conditions and + listener status. \n If the Route's ParentRef specifies an + existing Gateway that supports Routes of this kind AND that + Gateway's controller has sufficient access, then that Gateway's + controller MUST set the \"Accepted\" condition on the Route, + to indicate whether the route has been accepted or rejected + by the Gateway, and why. \n A Route MUST be considered \"Accepted\" + if at least one of the Route's rules is implemented by the + Gateway. \n There are a number of cases where the \"Accepted\" + condition may not be set due to lack of controller visibility, + that includes when: \n * The Route refers to a non-existent + parent. * The Route is of a type that the controller does + not support. * The Route is in a namespace the controller + does not have access to." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: "ControllerName is a domain/path string that indicates + the name of the controller that wrote this status. This corresponds + with the controllerName field on GatewayClass. \n Example: + \"example.net/gateway-controller\". \n The format of this + field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid + Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + \n Controllers MUST populate this field when writing status. + Controllers should ensure that entries to status populated + with their ControllerName are cleaned up when they are no + longer necessary." + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: ParentRef corresponds with a ParentRef in the spec + that this RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the + core API group (such as for a \"Service\" kind referent), + Group must be explicitly set to \"\" (empty string). \n + Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. + When unspecified, this refers to the local namespace of + the Route. \n Note that there are specific rules for ParentRefs + which cross namespace boundaries. Cross-namespace references + are only valid if they are explicitly allowed by something + in the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides + a generic way to enable any other kind of cross-namespace + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. + It can be interpreted differently based on the type of + parent resource. \n When the parent resource is a Gateway, + this targets all listeners listening on the specified + port that also support this kind of Route(and select this + Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to + a specific port as opposed to a listener(s) whose port(s) + may be changed. When both Port and SectionName are specified, + the name and port of the selected listener must match + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting + other types of parent resources MUST clearly document + how/if Port is interpreted. \n For the purpose of status, + an attachment is considered successful as long as the + parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them + by Route kind, namespace, or hostname. If 1 of 2 Gateway + listeners accept attachment from the referencing Route, + the Route MUST be considered successfully attached. If + no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within + the target resource. In the following resources, SectionName + is interpreted as the following: \n * Gateway: Listener + Name. When both Port (experimental) and SectionName are + specified, the name and port of the selected listener + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this + will reference the entire resource. For the purpose of + status, an attachment is considered successful if at least + one section in the parent resource accepts it. For example, + Gateway listeners can restrict which Routes can attach + to them by Route kind, namespace, or hostname. If 1 of + 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 + gateway.networking.k8s.io/channel: experimental + creationTimestamp: null + name: referencegrants.gateway.networking.k8s.io +spec: + group: gateway.networking.k8s.io + names: + categories: + - gateway-api + kind: ReferenceGrant + listKind: ReferenceGrantList + plural: referencegrants + shortNames: + - refgrant + singular: referencegrant + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: The v1alpha2 version of ReferenceGrant has been deprecated + and will be removed in a future release of the API. Please upgrade to v1beta1. + name: v1alpha2 + schema: + openAPIV3Schema: + description: "ReferenceGrant identifies kinds of resources in other namespaces + that are trusted to reference the specified kinds of resources in the same + namespace as the policy. \n Each ReferenceGrant can be used to represent + a unique trust relationship. Additional Reference Grants can be used to + add to the set of trusted sources of inbound references for the namespace + they are defined within. \n A ReferenceGrant is required for all cross-namespace + references in Gateway API (with the exception of cross-namespace Route-Gateway + attachment, which is governed by the AllowedRoutes configuration on the + Gateway, and cross-namespace Service ParentRefs on a \"consumer\" mesh Route, + which defines routing rules applicable only to workloads in the Route namespace). + ReferenceGrants allowing a reference from a Route to a Service are only + applicable to BackendRefs. \n ReferenceGrant is a form of runtime verification + allowing users to assert which cross-namespace object references are permitted. + Implementations that support ReferenceGrant MUST NOT permit cross-namespace + references which have no grant, and MUST respond to the removal of a grant + by revoking the access that the grant allowed." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of ReferenceGrant. + properties: + from: + description: "From describes the trusted namespaces and kinds that + can reference the resources described in \"To\". Each entry in this + list MUST be considered to be an additional place that references + can be valid from, or to put this another way, entries MUST be combined + using OR. \n Support: Core" + items: + description: ReferenceGrantFrom describes trusted namespaces and + kinds. + properties: + group: + description: "Group is the group of the referent. When empty, + the Kubernetes core API group is inferred. \n Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: "Kind is the kind of the referent. Although implementations + may support additional resources, the following types are + part of the \"Core\" support level for this field. \n When + used to permit a SecretObjectReference: \n * Gateway \n When + used to permit a BackendObjectReference: \n * GRPCRoute * + HTTPRoute * TCPRoute * TLSRoute * UDPRoute" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + namespace: + description: "Namespace is the namespace of the referent. \n + Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - group + - kind + - namespace + type: object + maxItems: 16 + minItems: 1 + type: array + to: + description: "To describes the resources that may be referenced by + the resources described in \"From\". Each entry in this list MUST + be considered to be an additional place that references can be valid + to, or to put this another way, entries MUST be combined using OR. + \n Support: Core" + items: + description: ReferenceGrantTo describes what Kinds are allowed as + targets of the references. + properties: + group: + description: "Group is the group of the referent. When empty, + the Kubernetes core API group is inferred. \n Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: "Kind is the kind of the referent. Although implementations + may support additional resources, the following types are + part of the \"Core\" support level for this field: \n * Secret + when used to permit a SecretObjectReference * Service when + used to permit a BackendObjectReference" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. When unspecified, + this policy refers to all resources of the specified Group + and Kind in the local namespace. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - from + - to + type: object + type: object + served: true + storage: false + subresources: {} + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: "ReferenceGrant identifies kinds of resources in other namespaces + that are trusted to reference the specified kinds of resources in the same + namespace as the policy. \n Each ReferenceGrant can be used to represent + a unique trust relationship. Additional Reference Grants can be used to + add to the set of trusted sources of inbound references for the namespace + they are defined within. \n All cross-namespace references in Gateway API + (with the exception of cross-namespace Gateway-route attachment) require + a ReferenceGrant. \n ReferenceGrant is a form of runtime verification allowing + users to assert which cross-namespace object references are permitted. Implementations + that support ReferenceGrant MUST NOT permit cross-namespace references which + have no grant, and MUST respond to the removal of a grant by revoking the + access that the grant allowed." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of ReferenceGrant. + properties: + from: + description: "From describes the trusted namespaces and kinds that + can reference the resources described in \"To\". Each entry in this + list MUST be considered to be an additional place that references + can be valid from, or to put this another way, entries MUST be combined + using OR. \n Support: Core" + items: + description: ReferenceGrantFrom describes trusted namespaces and + kinds. + properties: + group: + description: "Group is the group of the referent. When empty, + the Kubernetes core API group is inferred. \n Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: "Kind is the kind of the referent. Although implementations + may support additional resources, the following types are + part of the \"Core\" support level for this field. \n When + used to permit a SecretObjectReference: \n * Gateway \n When + used to permit a BackendObjectReference: \n * GRPCRoute * + HTTPRoute * TCPRoute * TLSRoute * UDPRoute" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + namespace: + description: "Namespace is the namespace of the referent. \n + Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - group + - kind + - namespace + type: object + maxItems: 16 + minItems: 1 + type: array + to: + description: "To describes the resources that may be referenced by + the resources described in \"From\". Each entry in this list MUST + be considered to be an additional place that references can be valid + to, or to put this another way, entries MUST be combined using OR. + \n Support: Core" + items: + description: ReferenceGrantTo describes what Kinds are allowed as + targets of the references. + properties: + group: + description: "Group is the group of the referent. When empty, + the Kubernetes core API group is inferred. \n Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + description: "Kind is the kind of the referent. Although implementations + may support additional resources, the following types are + part of the \"Core\" support level for this field: \n * Secret + when used to permit a SecretObjectReference * Service when + used to permit a BackendObjectReference" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. When unspecified, + this policy refers to all resources of the specified Group + and Kind in the local namespace. + maxLength: 253 + minLength: 1 + type: string + required: + - group + - kind + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - from + - to + type: object + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 + gateway.networking.k8s.io/channel: experimental + creationTimestamp: null + name: tcproutes.gateway.networking.k8s.io +spec: + group: gateway.networking.k8s.io + names: + categories: + - gateway-api + kind: TCPRoute + listKind: TCPRouteList + plural: tcproutes + singular: tcproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: TCPRoute provides a way to route TCP requests. When combined + with a Gateway listener, it can be used to forward connections on the port + specified by the listener to a set of backends specified by the TCPRoute. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of TCPRoute. + properties: + parentRefs: + description: "ParentRefs references the resources (usually Gateways) + that a Route wants to be attached to. Note that the referenced parent + resource needs to allow this for the attachment to be complete. + For Gateways, that means the Gateway needs to allow attachment from + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " + items: + description: "ParentReference identifies an API object (usually + a Gateway) that can be considered a parent of this resource (usually + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the core + API group (such as for a \"Service\" kind referent), Group + must be explicitly set to \"\" (empty string). \n Support: + Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. When + unspecified, this refers to the local namespace of the Route. + \n Note that there are specific rules for ParentRefs which + cross namespace boundaries. Cross-namespace references are + only valid if they are explicitly allowed by something in + the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. It + can be interpreted differently based on the type of parent + resource. \n When the parent resource is a Gateway, this targets + all listeners listening on the specified port that also support + this kind of Route(and select this Route). It's not recommended + to set `Port` unless the networking behaviors specified in + a Route must apply to a specific port as opposed to a listener(s) + whose port(s) may be changed. When both Port and SectionName + are specified, the name and port of the selected listener + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within the + target resource. In the following resources, SectionName is + interpreted as the following: \n * Gateway: Listener Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match both + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) + rules: + description: Rules are a list of TCP matchers and actions. + items: + description: TCPRouteRule is the configuration for a given rule. + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. If unspecified or invalid (refers + to a non-existent resource or a Service with no endpoints), + the underlying implementation MUST actively reject connection + attempts to this backend. Connection rejections must respect + weight; if an invalid backend is requested to have 80% of + connections, then 80% of connections must be rejected instead. + \n Support: Core for Kubernetes Service \n Support: Extended + for Kubernetes ServiceImport \n Support: Implementation-specific + for any other resource \n Support for weight: Extended" + items: + description: "BackendRef defines how a Route should forward + a request to a Kubernetes resource. \n Note that when a + namespace different than the local namespace is specified, + a ReferenceGrant object is required in the referent namespace + to allow that namespace's owner to accept the reference. + See the ReferenceGrant documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + \n Note that when the + BackendTLSPolicy object is enabled by the implementation, + there are some extra rules about validity to consider here. + See the fields where this struct is used for more information + about the exact behavior." + properties: + group: + default: "" + description: Group is the group of the referent. For example, + "gateway.networking.k8s.io". When unspecified or empty + string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the backend. + When unspecified, the local namespace is inferred. \n + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port number + to use for this resource. Port is required when the + referent is a Kubernetes Service. In this case, the + port number is the service port number, not the target + port. For other resources, destination port might be + derived from the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + weight: + default: 1 + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1. \n Support for this field varies based + on the context where used." + format: int32 + maximum: 1000000 + minimum: 0 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + maxItems: 16 + minItems: 1 + type: array + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - rules + type: object + status: + description: Status defines the current state of TCPRoute. + properties: + parents: + description: "Parents is a list of parent resources (usually Gateways) + that are associated with the route, and the status of the route + with respect to each parent. When this route attaches to a parent, + the controller that manages the parent must add an entry to this + list when the controller first sees the route and should update + the entry as appropriate when the route or gateway is modified. + \n Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this + API can only populate Route status for the Gateways/parent resources + they are responsible for. \n A maximum of 32 Gateways will be represented + in this list. An empty list means the route has not been attached + to any Gateway." + items: + description: RouteParentStatus describes the status of a route with + respect to an associated Parent. + properties: + conditions: + description: "Conditions describes the status of the route with + respect to the Gateway. Note that the route's availability + is also subject to the Gateway's own status conditions and + listener status. \n If the Route's ParentRef specifies an + existing Gateway that supports Routes of this kind AND that + Gateway's controller has sufficient access, then that Gateway's + controller MUST set the \"Accepted\" condition on the Route, + to indicate whether the route has been accepted or rejected + by the Gateway, and why. \n A Route MUST be considered \"Accepted\" + if at least one of the Route's rules is implemented by the + Gateway. \n There are a number of cases where the \"Accepted\" + condition may not be set due to lack of controller visibility, + that includes when: \n * The Route refers to a non-existent + parent. * The Route is of a type that the controller does + not support. * The Route is in a namespace the controller + does not have access to." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: "ControllerName is a domain/path string that indicates + the name of the controller that wrote this status. This corresponds + with the controllerName field on GatewayClass. \n Example: + \"example.net/gateway-controller\". \n The format of this + field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid + Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + \n Controllers MUST populate this field when writing status. + Controllers should ensure that entries to status populated + with their ControllerName are cleaned up when they are no + longer necessary." + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: ParentRef corresponds with a ParentRef in the spec + that this RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the + core API group (such as for a \"Service\" kind referent), + Group must be explicitly set to \"\" (empty string). \n + Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. + When unspecified, this refers to the local namespace of + the Route. \n Note that there are specific rules for ParentRefs + which cross namespace boundaries. Cross-namespace references + are only valid if they are explicitly allowed by something + in the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides + a generic way to enable any other kind of cross-namespace + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. + It can be interpreted differently based on the type of + parent resource. \n When the parent resource is a Gateway, + this targets all listeners listening on the specified + port that also support this kind of Route(and select this + Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to + a specific port as opposed to a listener(s) whose port(s) + may be changed. When both Port and SectionName are specified, + the name and port of the selected listener must match + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting + other types of parent resources MUST clearly document + how/if Port is interpreted. \n For the purpose of status, + an attachment is considered successful as long as the + parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them + by Route kind, namespace, or hostname. If 1 of 2 Gateway + listeners accept attachment from the referencing Route, + the Route MUST be considered successfully attached. If + no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within + the target resource. In the following resources, SectionName + is interpreted as the following: \n * Gateway: Listener + Name. When both Port (experimental) and SectionName are + specified, the name and port of the selected listener + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this + will reference the entire resource. For the purpose of + status, an attachment is considered successful if at least + one section in the parent resource accepts it. For example, + Gateway listeners can restrict which Routes can attach + to them by Route kind, namespace, or hostname. If 1 of + 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 + gateway.networking.k8s.io/channel: experimental + creationTimestamp: null + name: tlsroutes.gateway.networking.k8s.io +spec: + group: gateway.networking.k8s.io + names: + categories: + - gateway-api + kind: TLSRoute + listKind: TLSRouteList + plural: tlsroutes + singular: tlsroute + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: "The TLSRoute resource is similar to TCPRoute, but can be configured + to match against TLS-specific metadata. This allows more flexibility in + matching streams for a given TLS listener. \n If you need to forward traffic + to a single target for a TLS listener, you could choose to use a TCPRoute + with a TLS listener." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of TLSRoute. + properties: + hostnames: + description: "Hostnames defines a set of SNI names that should match + against the SNI attribute of TLS ClientHello message in TLS handshake. + This matches the RFC 1123 definition of a hostname with 2 notable + exceptions: \n 1. IPs are not allowed in SNI names per RFC 6066. + 2. A hostname may be prefixed with a wildcard label (`*.`). The + wildcard label must appear by itself as the first label. \n If a + hostname is specified by both the Listener and TLSRoute, there must + be at least one intersecting hostname for the TLSRoute to be attached + to the Listener. For example: \n * A Listener with `test.example.com` + as the hostname matches TLSRoutes that have either not specified + any hostnames, or have specified at least one of `test.example.com` + or `*.example.com`. * A Listener with `*.example.com` as the hostname + matches TLSRoutes that have either not specified any hostnames or + have specified at least one hostname that matches the Listener hostname. + For example, `test.example.com` and `*.example.com` would both match. + On the other hand, `example.com` and `test.example.net` would not + match. \n If both the Listener and TLSRoute have specified hostnames, + any TLSRoute hostnames that do not match the Listener hostname MUST + be ignored. For example, if a Listener specified `*.example.com`, + and the TLSRoute specified `test.example.com` and `test.example.net`, + `test.example.net` must not be considered for a match. \n If both + the Listener and TLSRoute have specified hostnames, and none match + with the criteria above, then the TLSRoute is not accepted. The + implementation must raise an 'Accepted' Condition with a status + of `False` in the corresponding RouteParentStatus. \n Support: Core" + items: + description: "Hostname is the fully qualified domain name of a network + host. This matches the RFC 1123 definition of a hostname with + 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname + may be prefixed with a wildcard label (`*.`). The wildcard label + must appear by itself as the first label. \n Hostname can be \"precise\" + which is a domain name without the terminating dot of a network + host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain + name prefixed with a single wildcard label (e.g. `*.example.com`). + \n Note that as per RFC1035 and RFC1123, a *label* must consist + of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed." + maxLength: 253 + minLength: 1 + pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 16 + type: array + parentRefs: + description: "ParentRefs references the resources (usually Gateways) + that a Route wants to be attached to. Note that the referenced parent + resource needs to allow this for the attachment to be complete. + For Gateways, that means the Gateway needs to allow attachment from + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " + items: + description: "ParentReference identifies an API object (usually + a Gateway) that can be considered a parent of this resource (usually + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the core + API group (such as for a \"Service\" kind referent), Group + must be explicitly set to \"\" (empty string). \n Support: + Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. When + unspecified, this refers to the local namespace of the Route. + \n Note that there are specific rules for ParentRefs which + cross namespace boundaries. Cross-namespace references are + only valid if they are explicitly allowed by something in + the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. It + can be interpreted differently based on the type of parent + resource. \n When the parent resource is a Gateway, this targets + all listeners listening on the specified port that also support + this kind of Route(and select this Route). It's not recommended + to set `Port` unless the networking behaviors specified in + a Route must apply to a specific port as opposed to a listener(s) + whose port(s) may be changed. When both Port and SectionName + are specified, the name and port of the selected listener + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within the + target resource. In the following resources, SectionName is + interpreted as the following: \n * Gateway: Listener Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match both + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) + rules: + description: Rules are a list of TLS matchers and actions. + items: + description: TLSRouteRule is the configuration for a given rule. + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. If unspecified or invalid (refers + to a non-existent resource or a Service with no endpoints), + the rule performs no forwarding; if no filters are specified + that would result in a response being sent, the underlying + implementation must actively reject request attempts to this + backend, by rejecting the connection or returning a 500 status + code. Request rejections must respect weight; if an invalid + backend is requested to have 80% of requests, then 80% of + requests must be rejected instead. \n Support: Core for Kubernetes + Service \n Support: Extended for Kubernetes ServiceImport + \n Support: Implementation-specific for any other resource + \n Support for weight: Extended" + items: + description: "BackendRef defines how a Route should forward + a request to a Kubernetes resource. \n Note that when a + namespace different than the local namespace is specified, + a ReferenceGrant object is required in the referent namespace + to allow that namespace's owner to accept the reference. + See the ReferenceGrant documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + \n Note that when the + BackendTLSPolicy object is enabled by the implementation, + there are some extra rules about validity to consider here. + See the fields where this struct is used for more information + about the exact behavior." + properties: + group: + default: "" + description: Group is the group of the referent. For example, + "gateway.networking.k8s.io". When unspecified or empty + string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the backend. + When unspecified, the local namespace is inferred. \n + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port number + to use for this resource. Port is required when the + referent is a Kubernetes Service. In this case, the + port number is the service port number, not the target + port. For other resources, destination port might be + derived from the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + weight: + default: 1 + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1. \n Support for this field varies based + on the context where used." + format: int32 + maximum: 1000000 + minimum: 0 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + maxItems: 16 + minItems: 1 + type: array + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - rules + type: object + status: + description: Status defines the current state of TLSRoute. + properties: + parents: + description: "Parents is a list of parent resources (usually Gateways) + that are associated with the route, and the status of the route + with respect to each parent. When this route attaches to a parent, + the controller that manages the parent must add an entry to this + list when the controller first sees the route and should update + the entry as appropriate when the route or gateway is modified. + \n Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this + API can only populate Route status for the Gateways/parent resources + they are responsible for. \n A maximum of 32 Gateways will be represented + in this list. An empty list means the route has not been attached + to any Gateway." + items: + description: RouteParentStatus describes the status of a route with + respect to an associated Parent. + properties: + conditions: + description: "Conditions describes the status of the route with + respect to the Gateway. Note that the route's availability + is also subject to the Gateway's own status conditions and + listener status. \n If the Route's ParentRef specifies an + existing Gateway that supports Routes of this kind AND that + Gateway's controller has sufficient access, then that Gateway's + controller MUST set the \"Accepted\" condition on the Route, + to indicate whether the route has been accepted or rejected + by the Gateway, and why. \n A Route MUST be considered \"Accepted\" + if at least one of the Route's rules is implemented by the + Gateway. \n There are a number of cases where the \"Accepted\" + condition may not be set due to lack of controller visibility, + that includes when: \n * The Route refers to a non-existent + parent. * The Route is of a type that the controller does + not support. * The Route is in a namespace the controller + does not have access to." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: "ControllerName is a domain/path string that indicates + the name of the controller that wrote this status. This corresponds + with the controllerName field on GatewayClass. \n Example: + \"example.net/gateway-controller\". \n The format of this + field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid + Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + \n Controllers MUST populate this field when writing status. + Controllers should ensure that entries to status populated + with their ControllerName are cleaned up when they are no + longer necessary." + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: ParentRef corresponds with a ParentRef in the spec + that this RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the + core API group (such as for a \"Service\" kind referent), + Group must be explicitly set to \"\" (empty string). \n + Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. + When unspecified, this refers to the local namespace of + the Route. \n Note that there are specific rules for ParentRefs + which cross namespace boundaries. Cross-namespace references + are only valid if they are explicitly allowed by something + in the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides + a generic way to enable any other kind of cross-namespace + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. + It can be interpreted differently based on the type of + parent resource. \n When the parent resource is a Gateway, + this targets all listeners listening on the specified + port that also support this kind of Route(and select this + Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to + a specific port as opposed to a listener(s) whose port(s) + may be changed. When both Port and SectionName are specified, + the name and port of the selected listener must match + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting + other types of parent resources MUST clearly document + how/if Port is interpreted. \n For the purpose of status, + an attachment is considered successful as long as the + parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them + by Route kind, namespace, or hostname. If 1 of 2 Gateway + listeners accept attachment from the referencing Route, + the Route MUST be considered successfully attached. If + no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within + the target resource. In the following resources, SectionName + is interpreted as the following: \n * Gateway: Listener + Name. When both Port (experimental) and SectionName are + specified, the name and port of the selected listener + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this + will reference the entire resource. For the purpose of + status, an attachment is considered successful if at least + one section in the parent resource accepts it. For example, + Gateway listeners can restrict which Routes can attach + to them by Route kind, namespace, or hostname. If 1 of + 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466 + gateway.networking.k8s.io/bundle-version: v1.0.0 + gateway.networking.k8s.io/channel: experimental + creationTimestamp: null + name: udproutes.gateway.networking.k8s.io +spec: + group: gateway.networking.k8s.io + names: + categories: + - gateway-api + kind: UDPRoute + listKind: UDPRouteList + plural: udproutes + singular: udproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: UDPRoute provides a way to route UDP traffic. When combined with + a Gateway listener, it can be used to forward traffic on the port specified + by the listener to a set of backends specified by the UDPRoute. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of UDPRoute. + properties: + parentRefs: + description: "ParentRefs references the resources (usually Gateways) + that a Route wants to be attached to. Note that the referenced parent + resource needs to allow this for the attachment to be complete. + For Gateways, that means the Gateway needs to allow attachment from + Routes of this kind and namespace. For Services, that means the + Service must either be in the same namespace for a \"producer\" + route, or the mesh implementation must support and allow \"consumer\" + routes for the referenced Service. ReferenceGrant is not applicable + for governing ParentRefs to Services - it is not possible to create + a \"producer\" route for a Service in a different namespace from + the Route. \n There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services only) This + API may be extended in the future to support additional kinds of + parent resources. \n ParentRefs must be _distinct_. This means either + that: \n * They select different objects. If this is the case, + then parentRef entries are distinct. In terms of fields, this means + that the multi-part key defined by `group`, `kind`, `namespace`, + and `name` must be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field + used, each ParentRef that selects the same object must set the same + set of optional fields to different values. If one ParentRef sets + a combination of optional fields, all must set the same combination. + \n Some examples: \n * If one ParentRef sets `sectionName`, all + ParentRefs referencing the same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. * If one ParentRef sets `sectionName` + and `port`, all ParentRefs referencing the same object must also + set `sectionName` and `port`. \n It is possible to separately reference + multiple distinct objects that may be collapsed by an implementation. + For example, some implementations may choose to merge compatible + Gateway Listeners together. If that is the case, the list of routes + attached to those resources should also be merged. \n Note that + for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For + example, Gateway has the AllowedRoutes field, and ReferenceGrant + provides a generic way to enable other kinds of cross-namespace + reference. \n ParentRefs from a Route to a Service in the same + namespace are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. \n ParentRefs + from a Route to a Service in a different namespace are \"consumer\" + routes, and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for which the + intended destination of the connections are a Service targeted as + a ParentRef of the Route. \n " + items: + description: "ParentReference identifies an API object (usually + a Gateway) that can be considered a parent of this resource (usually + a route). There are two kinds of parent resources with \"Core\" + support: \n * Gateway (Gateway conformance profile) * Service + (Mesh conformance profile, experimental, ClusterIP Services only) + \n This API may be extended in the future to support additional + kinds of parent resources. \n The API object must be valid in + the cluster; the Group and Kind must be registered in the cluster + for this reference to be valid." + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the core + API group (such as for a \"Service\" kind referent), Group + must be explicitly set to \"\" (empty string). \n Support: + Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are two + kinds of parent resources with \"Core\" support: \n * Gateway + (Gateway conformance profile) * Service (Mesh conformance + profile, experimental, ClusterIP Services only) \n Support + for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. When + unspecified, this refers to the local namespace of the Route. + \n Note that there are specific rules for ParentRefs which + cross namespace boundaries. Cross-namespace references are + only valid if they are explicitly allowed by something in + the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + \n ParentRefs from a Route to a Service in the same namespace + are \"producer\" routes, which apply default routing rules + to inbound connections from any namespace to the Service. + \n ParentRefs from a Route to a Service in a different namespace + are \"consumer\" routes, and these routing rules are only + applied to outbound connections originating from the same + namespace as the Route, for which the intended destination + of the connections are a Service targeted as a ParentRef of + the Route. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. It + can be interpreted differently based on the type of parent + resource. \n When the parent resource is a Gateway, this targets + all listeners listening on the specified port that also support + this kind of Route(and select this Route). It's not recommended + to set `Port` unless the networking behaviors specified in + a Route must apply to a specific port as opposed to a listener(s) + whose port(s) may be changed. When both Port and SectionName + are specified, the name and port of the selected listener + must match both specified values. \n When the parent resource + is a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified + values. \n Implementations MAY choose to support other parent + resources. Implementations supporting other types of parent + resources MUST clearly document how/if Port is interpreted. + \n For the purpose of status, an attachment is considered + successful as long as the parent resource accepts it partially. + For example, Gateway listeners can restrict which Routes can + attach to them by Route kind, namespace, or hostname. If 1 + of 2 Gateway listeners accept attachment from the referencing + Route, the Route MUST be considered successfully attached. + If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. \n + Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within the + target resource. In the following resources, SectionName is + interpreted as the following: \n * Gateway: Listener Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match both + specified values. * Service: Port Name. When both Port (experimental) + and SectionName are specified, the name and port of the selected + listener must match both specified values. Note that attaching + Routes to Services as Parents is part of experimental Mesh + support and is not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName + is interpreted. \n When unspecified (empty string), this will + reference the entire resource. For the purpose of status, + an attachment is considered successful if at least one section + in the parent resource accepts it. For example, Gateway listeners + can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept + attachment from the referencing Route, the Route MUST be considered + successfully attached. If no Gateway listeners accept attachment + from this Route, the Route MUST be considered detached from + the Gateway. \n Support: Core" + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + x-kubernetes-validations: + - message: sectionName or port must be specified when parentRefs includes + 2 or more references to the same parent + rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ + == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) + || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName + == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) + || p2.port == 0)): true))' + - message: sectionName or port must be unique when parentRefs includes + 2 or more references to the same parent + rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind + == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) + || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ + == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && + p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) + || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName + == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName + == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) + || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port + == p2.port)))) + rules: + description: Rules are a list of UDP matchers and actions. + items: + description: UDPRouteRule is the configuration for a given rule. + properties: + backendRefs: + description: "BackendRefs defines the backend(s) where matching + requests should be sent. If unspecified or invalid (refers + to a non-existent resource or a Service with no endpoints), + the underlying implementation MUST actively reject connection + attempts to this backend. Packet drops must respect weight; + if an invalid backend is requested to have 80% of the packets, + then 80% of packets must be dropped instead. \n Support: Core + for Kubernetes Service \n Support: Extended for Kubernetes + ServiceImport \n Support: Implementation-specific for any + other resource \n Support for weight: Extended" + items: + description: "BackendRef defines how a Route should forward + a request to a Kubernetes resource. \n Note that when a + namespace different than the local namespace is specified, + a ReferenceGrant object is required in the referent namespace + to allow that namespace's owner to accept the reference. + See the ReferenceGrant documentation for details. \n + \n When the BackendRef points to a Kubernetes Service, implementations + SHOULD honor the appProtocol field if it is set for the + target Service Port. \n Implementations supporting appProtocol + SHOULD recognize the Kubernetes Standard Application Protocols + defined in KEP-3726. \n If a Service appProtocol isn't specified, + an implementation MAY infer the backend protocol through + its own means. Implementations MAY infer the protocol from + the Route type referring to the backend Service. \n If a + Route is not able to send traffic to the backend using the + specified protocol then the backend is considered invalid. + Implementations MUST set the \"ResolvedRefs\" condition + to \"False\" with the \"UnsupportedProtocol\" reason. \n + \n Note that when the + BackendTLSPolicy object is enabled by the implementation, + there are some extra rules about validity to consider here. + See the fields where this struct is used for more information + about the exact behavior." + properties: + group: + default: "" + description: Group is the group of the referent. For example, + "gateway.networking.k8s.io". When unspecified or empty + string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: "Kind is the Kubernetes resource kind of + the referent. For example \"Service\". \n Defaults to + \"Service\" when not specified. \n ExternalName services + can refer to CNAME DNS records that may live outside + of the cluster and as such are difficult to reason about + in terms of conformance. They also may not be safe to + forward to (see CVE-2021-25740 for more information). + Implementations SHOULD NOT support ExternalName Services. + \n Support: Core (Services with a type other than ExternalName) + \n Support: Implementation-specific (Services with type + ExternalName)" + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the backend. + When unspecified, the local namespace is inferred. \n + Note that when a namespace different than the local + namespace is specified, a ReferenceGrant object is required + in the referent namespace to allow that namespace's + owner to accept the reference. See the ReferenceGrant + documentation for details. \n Support: Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: Port specifies the destination port number + to use for this resource. Port is required when the + referent is a Kubernetes Service. In this case, the + port number is the service port number, not the target + port. For other resources, destination port might be + derived from the referent resource or this field. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + weight: + default: 1 + description: "Weight specifies the proportion of requests + forwarded to the referenced backend. This is computed + as weight/(sum of all weights in this BackendRefs list). + For non-zero values, there may be some epsilon from + the exact proportion defined here depending on the precision + an implementation supports. Weight is not a percentage + and the sum of weights does not need to equal 100. \n + If only one backend is specified and it has a weight + greater than 0, 100% of the traffic is forwarded to + that backend. If weight is set to 0, no traffic should + be forwarded for this entry. If unspecified, weight + defaults to 1. \n Support for this field varies based + on the context where used." + format: int32 + maximum: 1000000 + minimum: 0 + type: integer + required: + - name + type: object + x-kubernetes-validations: + - message: Must have port for Service reference + rule: '(size(self.group) == 0 && self.kind == ''Service'') + ? has(self.port) : true' + maxItems: 16 + minItems: 1 + type: array + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - rules + type: object + status: + description: Status defines the current state of UDPRoute. + properties: + parents: + description: "Parents is a list of parent resources (usually Gateways) + that are associated with the route, and the status of the route + with respect to each parent. When this route attaches to a parent, + the controller that manages the parent must add an entry to this + list when the controller first sees the route and should update + the entry as appropriate when the route or gateway is modified. + \n Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this + API can only populate Route status for the Gateways/parent resources + they are responsible for. \n A maximum of 32 Gateways will be represented + in this list. An empty list means the route has not been attached + to any Gateway." + items: + description: RouteParentStatus describes the status of a route with + respect to an associated Parent. + properties: + conditions: + description: "Conditions describes the status of the route with + respect to the Gateway. Note that the route's availability + is also subject to the Gateway's own status conditions and + listener status. \n If the Route's ParentRef specifies an + existing Gateway that supports Routes of this kind AND that + Gateway's controller has sufficient access, then that Gateway's + controller MUST set the \"Accepted\" condition on the Route, + to indicate whether the route has been accepted or rejected + by the Gateway, and why. \n A Route MUST be considered \"Accepted\" + if at least one of the Route's rules is implemented by the + Gateway. \n There are a number of cases where the \"Accepted\" + condition may not be set due to lack of controller visibility, + that includes when: \n * The Route refers to a non-existent + parent. * The Route is of a type that the controller does + not support. * The Route is in a namespace the controller + does not have access to." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: "ControllerName is a domain/path string that indicates + the name of the controller that wrote this status. This corresponds + with the controllerName field on GatewayClass. \n Example: + \"example.net/gateway-controller\". \n The format of this + field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid + Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + \n Controllers MUST populate this field when writing status. + Controllers should ensure that entries to status populated + with their ControllerName are cleaned up when they are no + longer necessary." + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: ParentRef corresponds with a ParentRef in the spec + that this RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: "Group is the group of the referent. When unspecified, + \"gateway.networking.k8s.io\" is inferred. To set the + core API group (such as for a \"Service\" kind referent), + Group must be explicitly set to \"\" (empty string). \n + Support: Core" + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: "Kind is kind of the referent. \n There are + two kinds of parent resources with \"Core\" support: \n + * Gateway (Gateway conformance profile) * Service (Mesh + conformance profile, experimental, ClusterIP Services + only) \n Support for other resources is Implementation-Specific." + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: "Name is the name of the referent. \n Support: + Core" + maxLength: 253 + minLength: 1 + type: string + namespace: + description: "Namespace is the namespace of the referent. + When unspecified, this refers to the local namespace of + the Route. \n Note that there are specific rules for ParentRefs + which cross namespace boundaries. Cross-namespace references + are only valid if they are explicitly allowed by something + in the namespace they are referring to. For example: Gateway + has the AllowedRoutes field, and ReferenceGrant provides + a generic way to enable any other kind of cross-namespace + reference. \n ParentRefs from a Route to a Service in + the same namespace are \"producer\" routes, which apply + default routing rules to inbound connections from any + namespace to the Service. \n ParentRefs from a Route to + a Service in a different namespace are \"consumer\" routes, + and these routing rules are only applied to outbound connections + originating from the same namespace as the Route, for + which the intended destination of the connections are + a Service targeted as a ParentRef of the Route. \n Support: + Core" + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: "Port is the network port this Route targets. + It can be interpreted differently based on the type of + parent resource. \n When the parent resource is a Gateway, + this targets all listeners listening on the specified + port that also support this kind of Route(and select this + Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to + a specific port as opposed to a listener(s) whose port(s) + may be changed. When both Port and SectionName are specified, + the name and port of the selected listener must match + both specified values. \n When the parent resource is + a Service, this targets a specific port in the Service + spec. When both Port (experimental) and SectionName are + specified, the name and port of the selected port must + match both specified values. \n Implementations MAY choose + to support other parent resources. Implementations supporting + other types of parent resources MUST clearly document + how/if Port is interpreted. \n For the purpose of status, + an attachment is considered successful as long as the + parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them + by Route kind, namespace, or hostname. If 1 of 2 Gateway + listeners accept attachment from the referencing Route, + the Route MUST be considered successfully attached. If + no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + \n Support: Extended \n " + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: "SectionName is the name of a section within + the target resource. In the following resources, SectionName + is interpreted as the following: \n * Gateway: Listener + Name. When both Port (experimental) and SectionName are + specified, the name and port of the selected listener + must match both specified values. * Service: Port Name. + When both Port (experimental) and SectionName are specified, + the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services + as Parents is part of experimental Mesh support and is + not supported for any other purpose. \n Implementations + MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of @@ -2385,5 +11233,5 @@ status: acceptedNames: kind: "" plural: "" - conditions: [] - storedVersions: [] \ No newline at end of file + conditions: null + storedVersions: null diff --git a/deploy/manifests/static/stunner-crd.yaml b/deploy/manifests/static/stunner-crd.yaml index 04457aed..db856cb4 100644 --- a/deploy/manifests/static/stunner-crd.yaml +++ b/deploy/manifests/static/stunner-crd.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: gatewayconfigs.stunner.l7mp.io spec: group: stunner.l7mp.io @@ -15,10 +14,160 @@ spec: listKind: GatewayConfigList plural: gatewayconfigs shortNames: - - gtwconf + - gwconf singular: gatewayconfig scope: Namespaced versions: + - additionalPrinterColumns: + - jsonPath: .spec.realm + name: Realm + type: string + - jsonPath: .spec.dataplane + name: Dataplane + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: GatewayConfig is the Schema for the gatewayconfigs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: GatewayConfigSpec defines the desired state of GatewayConfig + properties: + authLifetime: + description: AuthLifetime defines the lifetime of "longterm" authentication + credentials in seconds. + format: int32 + type: integer + authRef: + description: |- + Note that externally set credentials override any inline auth credentials (AuthType, + AuthUsername, etc.): if AuthRef is nonempty then it is expected that the referenced + Secret exists and *all* authentication credentials are correctly set in the referenced + Secret (username/password or shared secret). Mixing of credential sources + (inline/external) is not supported. + properties: + group: + default: "" + description: |- + Group is the group of the referent. For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Secret + description: Kind is kind of the referent. For example "Secret". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the referenced object. When unspecified, the local + namespace is inferred. + + + Note that when a namespace different than the local namespace is specified, + a ReferenceGrant object is required in the referent namespace to allow that + namespace's owner to accept the reference. See the ReferenceGrant + documentation for details. + + + Support: Core + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - name + type: object + authType: + default: plaintext + description: AuthType is the type of the STUN/TURN authentication + mechanism. + pattern: ^plaintext|static|longterm|ephemeral|timewindowed$ + type: string + dataplane: + default: default + description: |- + Dataplane defines the dataplane (stunnerd image, version, etc) for STUNner gateways + using this GatewayConfig. + type: string + loadBalancerServiceAnnotations: + additionalProperties: + type: string + description: |- + LoadBalancerServiceAnnotations is a list of annotations that will go into the + LoadBalancer services created automatically by the operator to wrap Gateways. + + + NOTE: removing annotations from a GatewayConfig will not result in the removal of the + corresponding annotations from the LoadBalancer service, in order to prevent the + accidental removal of an annotation installed there by Kubernetes or the cloud + provider. If you really want to remove an annotation, do this manually or simply remove + all Gateways (which will remove the corresponding LoadBalancer services), update the + GatewayConfig and then recreate the Gateways, so that the newly created LoadBalancer + services will contain the required annotations. + type: object + logLevel: + description: LogLevel specifies the default loglevel for the STUNner + daemon. + type: string + password: + description: Password defines the `password` credential for "plaintext" + authentication. + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + realm: + default: stunner.l7mp.io + description: |- + Realm defines the STUN/TURN authentication realm to be used for clients toauthenticate + with STUNner. + + + The realm must consist of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed. + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + sharedSecret: + description: SharedSecret defines the shared secret to be used for + "longterm" authentication. + type: string + userName: + description: Username defines the `username` credential for "plaintext" + authentication. + pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$ + type: string + type: object + type: object + served: true + storage: true + subresources: {} - additionalPrinterColumns: - jsonPath: .spec.realm name: Realm @@ -35,14 +184,19 @@ spec: description: GatewayConfig is the Schema for the gatewayconfigs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -55,25 +209,24 @@ spec: format: int32 type: integer authRef: - description: 'Note that externally set credentials override any inline - auth credentials (AuthType, AuthUsername, etc.): if AuthRef is nonempty - then it is expected that the referenced Secret exists and *all* - authentication credentials are correctly set in the referenced Secret - (username/password or shared secret). Mixing of credential sources - (inline/external) is not supported.' + description: |- + Note that externally set credentials override any inline auth credentials (AuthType, + AuthUsername, etc.): if AuthRef is nonempty then it is expected that the referenced + Secret exists and *all* authentication credentials are correctly set in the referenced + Secret (username/password or shared secret). Mixing of credential sources + (inline/external) is not supported. properties: group: default: "" - description: Group is the group of the referent. For example, - "gateway.networking.k8s.io". When unspecified or empty string, - core API group is inferred. + description: |- + Group is the group of the referent. For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API group is inferred. maxLength: 253 pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ type: string kind: default: Secret - description: Kind is kind of the referent. For example "HTTPRoute" - or "Service". + description: Kind is kind of the referent. For example "Secret". maxLength: 63 minLength: 1 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ @@ -84,12 +237,18 @@ spec: minLength: 1 type: string namespace: - description: "Namespace is the namespace of the backend. When - unspecified, the local namespace is inferred. \n Note that when - a namespace is specified, a ReferenceGrant object is required - in the referent namespace to allow that namespace's owner to - accept the reference. See the ReferenceGrant documentation for - details. \n Support: Core" + description: |- + Namespace is the namespace of the referenced object. When unspecified, the local + namespace is inferred. + + + Note that when a namespace different than the local namespace is specified, + a ReferenceGrant object is required in the referent namespace to allow that + namespace's owner to accept the reference. See the ReferenceGrant + documentation for details. + + + Support: Core maxLength: 63 minLength: 1 pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ @@ -103,27 +262,35 @@ spec: mechanism. pattern: ^plaintext|static|longterm|ephemeral|timewindowed$ type: string + dataplane: + default: default + description: |- + Dataplane defines the TURN server to set up for the STUNner Gateways using this + GatewayConfig. Can be used to select the stunnerd image repo and version or deploy into + the host-network namespace. + type: string healthCheckEndpoint: - description: HealthCheckEndpoint is the URI of the form `http://address:port` - exposed for external HTTP health-checking. A liveness probe responder - will be exposed on path `/live` and readiness probe on path `/ready`. - The scheme (`http://`) is mandatory, default is to enable health-checking - at "http://0.0.0.0:8086". + description: |- + HealthCheckEndpoint is the URI of the form `http://address:port` exposed for external + HTTP health-checking. A liveness probe responder will be exposed on path `/live` and + readiness probe on path `/ready`. The scheme (`http://`) is mandatory, default is to + enable health-checking at "http://0.0.0.0:8086". type: string loadBalancerServiceAnnotations: additionalProperties: type: string - description: "LoadBalancerServiceAnnotations is a list of annotations - that will go into the LoadBalancer services created automatically - by the operator to wrap Gateways. \n NOTE: removing annotations - from a GatewayConfig will not result in the removal of the corresponding - annotations from the LoadBalancer service, in order to prevent the - accidental removal of an annotation installed there by Kubernetes - or the cloud provider. If you really want to remove an annotation, - do this manually or simply remove all Gateways (which will remove - the corresponding LoadBalancer services), update the GatewayConfig - and then recreate the Gateways, so that the newly created LoadBalancer - services will contain the required annotations." + description: |- + LoadBalancerServiceAnnotations is a list of annotations that will go into the + LoadBalancer services created automatically by the operator to wrap Gateways. + + + NOTE: removing annotations from a GatewayConfig will not result in the removal of the + corresponding annotations from the LoadBalancer service, in order to prevent the + accidental removal of an annotation installed there by Kubernetes or the cloud + provider. If you really want to remove an annotation, do this manually or simply remove + all Gateways (which will remove the corresponding LoadBalancer services), update the + GatewayConfig and then recreate the Gateways, so that the newly created LoadBalancer + services will contain the required annotations. type: object logLevel: description: LogLevel specifies the default loglevel for the STUNner @@ -135,9 +302,10 @@ spec: format: int32 type: integer metricsEndpoint: - description: MetricsEndpoint is the URI in the form `http://address:port/path` - exposed for metric scraping (Prometheus). The scheme (`http://`) - is mandatory. Default is to expose no metric endpoint. + description: |- + MetricsEndpoint is the URI in the form `http://address:port/path` exposed for metric + scraping (Prometheus). The scheme (`http://`) is mandatory. Default is to expose no + metric endpoint. type: string minPort: description: MinRelayPort is the smallest relay port assigned for @@ -151,11 +319,13 @@ spec: type: string realm: default: stunner.l7mp.io - description: "Realm defines the STUN/TURN authentication realm to - be used for clients toauthenticate with STUNner. \n The realm must - consist of lower case alphanumeric characters or '-', and must start - and end with an alphanumeric character. No other punctuation is - allowed." + description: |- + Realm defines the STUN/TURN authentication realm to be used for clients toauthenticate + with STUNner. + + + The realm must consist of lower case alphanumeric characters or '-', and must start and + end with an alphanumeric character. No other punctuation is allowed. pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ type: string sharedSecret: @@ -164,8 +334,9 @@ spec: type: string stunnerConfig: default: stunnerd-config - description: StunnerConfig specifies the name of the ConfigMap into - which the operator renders the stunnerd configfile. + description: |- + StunnerConfig specifies the name of the ConfigMap into which the operator renders the + stunnerd configfile. maxLength: 64 pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ type: string @@ -177,11 +348,4024 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] \ No newline at end of file +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: staticservices.stunner.l7mp.io +spec: + group: stunner.l7mp.io + names: + categories: + - stunner + kind: StaticService + listKind: StaticServiceList + plural: staticservices + shortNames: + - ssvc + singular: staticservice + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + StaticService is a set of static IP address prefixes STUNner allows access to via a UDPRoute (or + TCPRoute in the future). In contrast to Kubernetes Services, StaticServices expose all ports on + the given IPs. See also https://github.com/kubernetes/enhancements/pull/2611. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the behavior of a service. + properties: + prefixes: + description: Prefixes is a list of IP address prefixes reachable via + this route. + items: + type: string + type: array + required: + - prefixes + type: object + type: object + served: true + storage: true + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + StaticService is a set of static IP address prefixes STUNner allows access to via a Route. The + purpose is to allow a Service-like CRD containing a set of static IP address prefixes to be set + as the backend of a UDPRoute (or TCPRoute). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the behavior of a service. + properties: + ports: + description: The list of ports reachable via this service (currently + omitted). + items: + description: ServicePort contains information on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + prefixes: + description: Prefixes is a list of IP address prefixes reachable via + this route. + items: + type: string + type: array + required: + - prefixes + type: object + type: object + served: true + storage: false +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: dataplanes.stunner.l7mp.io +spec: + group: stunner.l7mp.io + names: + categories: + - stunner + kind: Dataplane + listKind: DataplaneList + plural: dataplanes + shortNames: + - dps + singular: dataplane + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + Dataplane is a collection of configuration parameters that can be used for spawning a `stunnerd` + instance for a Gateway. Labels and annotations on the Dataplane object will be copied verbatim + into the target Deployment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the behavior of a Dataplane resource. + properties: + affinity: + description: Scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: |- + Custom annotations to add to dataplane pods. Note that this does not affect the + annotations added to the Deployment (this come from the correspnding Gateway), just the + pods. Note also that mandatory pod annotations override whatever you set here on + conflict, and the annotations set here override annotations manually added to the pods. + type: object + args: + description: Arguments to the entrypoint. + items: + type: string + type: array + command: + description: 'Entrypoint array. Defaults: "stunnerd".' + items: + type: string + type: array + containerSecurityContext: + description: |- + ContainerSecurityContext holds container-level security attributes specifically for the + stunnerd container. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + disableHealthCheck: + description: |- + Disable health-checking. Default is to enable HTTP health-checks on port 8086: a + liveness probe responder will be exposed on path `/live` and readiness probe on path + `/ready`. + type: boolean + enableMetricsEndpoint: + description: |- + EnableMetricsEnpoint can be used to enable metrics scraping (Prometheus). If enabled, a + metrics endpoint will be available at http://0.0.0.0:8080 at all dataplane pods. Default + is no metrics collection. + type: boolean + env: + description: List of environment variables to set in the stunnerd + container. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in + the stunnerd container. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + hostNetwork: + description: |- + Host networking requested for the stunnerd pod to use the host's network namespace. + Can be used to implement public TURN servers with Kubernetes. Defaults to false. + type: boolean + image: + description: Container image name. + type: string + imagePullPolicy: + description: Image pull policy. One of Always, Never, IfNotPresent. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets to use for pulling the + stunnerd image. Note that the referenced secrets are not watched by the operator, so + modifications will in effect only for newly created pods. Also note that the Secret is + always searched in the same namespace as the Gateway, which allows to use separate pull + secrets per each namespace. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: array + labels: + additionalProperties: + type: string + description: |- + Custom labels to add to dataplane pods. Note that this does not affect the labels added + to the Deployment (those come from the Gateway), just the pods. Note also that mandatory + pod labels override whatever you set here on conflict. The only way to set pod labels is + here: whatever you set manually on the dataplane pod will be reset by the opetator. + type: object + replicas: + description: |- + Number of desired pods. If empty or set to 1, use whatever is in the target Deployment. + Otherwise, enforce this setting, overwiting whatever is set in the Deployment (this may + block autoscaling the dataplane though). Defaults to 1. + format: int32 + type: integer + resources: + description: Resources required by stunnerd. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the stunnerd needs to terminate + gracefully. Defaults to 3600 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how stunnerd pods ought to spread across topology + domains. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + type: object + served: true + storage: true + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + Dataplane is a collection of configuration parameters that can be used for spawning a `stunnerd` + instance for a Gateway. Labels and annotations on the Dataplane object will be copied verbatim + into the target Deployment. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the behavior of a Dataplane resource. + properties: + affinity: + description: Scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + args: + description: Arguments to the entrypoint. + items: + type: string + type: array + command: + description: 'Entrypoint array. Defaults: "stunnerd".' + items: + type: string + type: array + env: + description: List of environment variables to set in the stunnerd + container. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + healthCheckPort: + description: If specified, the health-check port. + type: integer + hostNetwork: + description: |- + Host networking requested for the stunnerd pod to use the host's network namespace. + Can be used to implement public TURN servers with Kubernetes. Defaults to false. + type: boolean + image: + description: Container image name. + type: string + imagePullPolicy: + description: Image pull policy. One of Always, Never, IfNotPresent. + type: string + replicas: + description: |- + Number of desired pods. This is a pointer to distinguish between explicit zero and not + specified. Defaults to 1. + format: int32 + type: integer + resources: + description: Resources required by stunnerd. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the stunnerd needs to terminate + gracefully. Defaults to 3600 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + served: true + storage: false +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: udproutes.stunner.l7mp.io +spec: + group: stunner.l7mp.io + names: + categories: + - stunner + kind: UDPRoute + listKind: UDPRouteList + plural: udproutes + singular: udproute + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + UDPRoute provides a way to route UDP traffic. When combined with a Gateway listener, it can be + used to forward traffic on the port specified by the listener to a set of backends specified by + the UDPRoute. + + + Differences from Gateway API UDPRoutes + - port-ranges are correctly handled ([port, endPort]) + - port is not mandatory + - backend weight is not supported + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of UDPRoute. + properties: + parentRefs: + description: |- + ParentRefs references the resources (usually Gateways) that a Route wants + to be attached to. Note that the referenced parent resource needs to + allow this for the attachment to be complete. For Gateways, that means + the Gateway needs to allow attachment from Routes of this kind and + namespace. For Services, that means the Service must either be in the same + namespace for a "producer" route, or the mesh implementation must support + and allow "consumer" routes for the referenced Service. ReferenceGrant is + not applicable for governing ParentRefs to Services - it is not possible to + create a "producer" route for a Service in a different namespace from the + Route. + + + There are two kinds of parent resources with "Core" support: + + + * Gateway (Gateway conformance profile) + + * Service (Mesh conformance profile, experimental, ClusterIP Services only) + + This API may be extended in the future to support additional kinds of parent + resources. + + + ParentRefs must be _distinct_. This means either that: + + + * They select different objects. If this is the case, then parentRef + entries are distinct. In terms of fields, this means that the + multi-part key defined by `group`, `kind`, `namespace`, and `name` must + be unique across all parentRef entries in the Route. + * They do not select different objects, but for each optional field used, + each ParentRef that selects the same object must set the same set of + optional fields to different values. If one ParentRef sets a + combination of optional fields, all must set the same combination. + + + Some examples: + + + * If one ParentRef sets `sectionName`, all ParentRefs referencing the + same object must also set `sectionName`. + * If one ParentRef sets `port`, all ParentRefs referencing the same + object must also set `port`. + * If one ParentRef sets `sectionName` and `port`, all ParentRefs + referencing the same object must also set `sectionName` and `port`. + + + It is possible to separately reference multiple distinct objects that may + be collapsed by an implementation. For example, some implementations may + choose to merge compatible Gateway Listeners together. If that is the + case, the list of routes attached to those resources should also be + merged. + + + Note that for ParentRefs that cross namespace boundaries, there are specific + rules. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For example, + Gateway has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable other kinds of cross-namespace reference. + + + + ParentRefs from a Route to a Service in the same namespace are "producer" + routes, which apply default routing rules to inbound connections from + any namespace to the Service. + + + ParentRefs from a Route to a Service in a different namespace are + "consumer" routes, and these routing rules are only applied to outbound + connections originating from the same namespace as the Route, for which + the intended destination of the connections are a Service targeted as a + ParentRef of the Route. + + + + + + + + items: + description: |- + ParentReference identifies an API object (usually a Gateway) that can be considered + a parent of this resource (usually a route). There are two kinds of parent resources + with "Core" support: + + + * Gateway (Gateway conformance profile) + * Service (Mesh conformance profile, experimental, ClusterIP Services only) + + + This API may be extended in the future to support additional kinds of parent + resources. + + + The API object must be valid in the cluster; the Group and Kind must + be registered in the cluster for this reference to be valid. + properties: + group: + default: gateway.networking.k8s.io + description: |- + Group is the group of the referent. + When unspecified, "gateway.networking.k8s.io" is inferred. + To set the core API group (such as for a "Service" kind referent), + Group must be explicitly set to "" (empty string). + + + Support: Core + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: |- + Kind is kind of the referent. + + + There are two kinds of parent resources with "Core" support: + + + * Gateway (Gateway conformance profile) + * Service (Mesh conformance profile, experimental, ClusterIP Services only) + + + Support for other resources is Implementation-Specific. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + Name is the name of the referent. + + + Support: Core + maxLength: 253 + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the referent. When unspecified, this refers + to the local namespace of the Route. + + + Note that there are specific rules for ParentRefs which cross namespace + boundaries. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For example: + Gateway has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + + + + ParentRefs from a Route to a Service in the same namespace are "producer" + routes, which apply default routing rules to inbound connections from + any namespace to the Service. + + + ParentRefs from a Route to a Service in a different namespace are + "consumer" routes, and these routing rules are only applied to outbound + connections originating from the same namespace as the Route, for which + the intended destination of the connections are a Service targeted as a + ParentRef of the Route. + + + + Support: Core + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: |- + Port is the network port this Route targets. It can be interpreted + differently based on the type of parent resource. + + + When the parent resource is a Gateway, this targets all listeners + listening on the specified port that also support this kind of Route(and + select this Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to a specific port + as opposed to a listener(s) whose port(s) may be changed. When both Port + and SectionName are specified, the name and port of the selected listener + must match both specified values. + + + + When the parent resource is a Service, this targets a specific port in the + Service spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified values. + + + + Implementations MAY choose to support other parent resources. + Implementations supporting other types of parent resources MUST clearly + document how/if Port is interpreted. + + + For the purpose of status, an attachment is considered successful as + long as the parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept attachment + from the referencing Route, the Route MUST be considered successfully + attached. If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + + + Support: Extended + + + + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: |- + SectionName is the name of a section within the target resource. In the + following resources, SectionName is interpreted as the following: + + + * Gateway: Listener Name. When both Port (experimental) and SectionName + are specified, the name and port of the selected listener must match + both specified values. + * Service: Port Name. When both Port (experimental) and SectionName + are specified, the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services as Parents + is part of experimental Mesh support and is not supported for any other + purpose. + + + Implementations MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName is + interpreted. + + + When unspecified (empty string), this will reference the entire resource. + For the purpose of status, an attachment is considered successful if at + least one section in the parent resource accepts it. For example, Gateway + listeners can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from + the referencing Route, the Route MUST be considered successfully + attached. If no Gateway listeners accept attachment from this Route, the + Route MUST be considered detached from the Gateway. + + + Support: Core + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + maxItems: 32 + type: array + rules: + description: Rules are a list of UDP matchers and actions. + items: + description: UDPRouteRule is the configuration for a given rule. + properties: + backendRefs: + description: |- + BackendRefs defines the backend(s) where matching requests should be + sent. UDPRouteRules correctly handle port ranges. + items: + description: BackendRef defines how a Route should forward + a request to a Kubernetes resource. + properties: + endPort: + description: EndPort specifies the upper threshold of + the port-range. Only considered of port is also specified. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + group: + default: "" + description: |- + Group is the group of the referent. For example, "gateway.networking.k8s.io". + When unspecified or empty string, core API group is inferred. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Service + description: |- + Kind is the Kubernetes resource kind of the referent. For example + "Service". + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: Name is the name of the referent. + maxLength: 253 + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the backend. When unspecified, the local + namespace is inferred. + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: |- + Port specifies the destination port number to use for this resource. If port is not + specified, all ports are allowed. If port is defined but endPort is not, allow only + access to the given port. If both are specified, allows access in the port-range [port, + endPort] inclusive. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - name + type: object + maxItems: 16 + minItems: 1 + type: array + type: object + maxItems: 16 + minItems: 1 + type: array + required: + - rules + type: object + status: + description: Status defines the current state of UDPRoute. + properties: + parents: + description: |- + Parents is a list of parent resources (usually Gateways) that are + associated with the route, and the status of the route with respect to + each parent. When this route attaches to a parent, the controller that + manages the parent must add an entry to this list when the controller + first sees the route and should update the entry as appropriate when the + route or gateway is modified. + + + Note that parent references that cannot be resolved by an implementation + of this API will not be added to this list. Implementations of this API + can only populate Route status for the Gateways/parent resources they are + responsible for. + + + A maximum of 32 Gateways will be represented in this list. An empty list + means the route has not been attached to any Gateway. + items: + description: |- + RouteParentStatus describes the status of a route with respect to an + associated Parent. + properties: + conditions: + description: |- + Conditions describes the status of the route with respect to the Gateway. + Note that the route's availability is also subject to the Gateway's own + status conditions and listener status. + + + If the Route's ParentRef specifies an existing Gateway that supports + Routes of this kind AND that Gateway's controller has sufficient access, + then that Gateway's controller MUST set the "Accepted" condition on the + Route, to indicate whether the route has been accepted or rejected by the + Gateway, and why. + + + A Route MUST be considered "Accepted" if at least one of the Route's + rules is implemented by the Gateway. + + + There are a number of cases where the "Accepted" condition may not be set + due to lack of controller visibility, that includes when: + + + * The Route refers to a non-existent parent. + * The Route is of a type that the controller does not support. + * The Route is in a namespace the controller does not have access to. + items: + description: "Condition contains details for one aspect of + the current state of this API Resource.\n---\nThis struct + is intended for direct use as an array at the field path + .status.conditions. For example,\n\n\n\ttype FooStatus + struct{\n\t // Represents the observations of a foo's + current state.\n\t // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // + +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controllerName: + description: |- + ControllerName is a domain/path string that indicates the name of the + controller that wrote this status. This corresponds with the + controllerName field on GatewayClass. + + + Example: "example.net/gateway-controller". + + + The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + valid Kubernetes names + (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + + Controllers MUST populate this field when writing status. Controllers should ensure that + entries to status populated with their ControllerName are cleaned up when they are no + longer necessary. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$ + type: string + parentRef: + description: |- + ParentRef corresponds with a ParentRef in the spec that this + RouteParentStatus struct describes the status of. + properties: + group: + default: gateway.networking.k8s.io + description: |- + Group is the group of the referent. + When unspecified, "gateway.networking.k8s.io" is inferred. + To set the core API group (such as for a "Service" kind referent), + Group must be explicitly set to "" (empty string). + + + Support: Core + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + kind: + default: Gateway + description: |- + Kind is kind of the referent. + + + There are two kinds of parent resources with "Core" support: + + + * Gateway (Gateway conformance profile) + * Service (Mesh conformance profile, experimental, ClusterIP Services only) + + + Support for other resources is Implementation-Specific. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + name: + description: |- + Name is the name of the referent. + + + Support: Core + maxLength: 253 + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace of the referent. When unspecified, this refers + to the local namespace of the Route. + + + Note that there are specific rules for ParentRefs which cross namespace + boundaries. Cross-namespace references are only valid if they are explicitly + allowed by something in the namespace they are referring to. For example: + Gateway has the AllowedRoutes field, and ReferenceGrant provides a + generic way to enable any other kind of cross-namespace reference. + + + + ParentRefs from a Route to a Service in the same namespace are "producer" + routes, which apply default routing rules to inbound connections from + any namespace to the Service. + + + ParentRefs from a Route to a Service in a different namespace are + "consumer" routes, and these routing rules are only applied to outbound + connections originating from the same namespace as the Route, for which + the intended destination of the connections are a Service targeted as a + ParentRef of the Route. + + + + Support: Core + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + port: + description: |- + Port is the network port this Route targets. It can be interpreted + differently based on the type of parent resource. + + + When the parent resource is a Gateway, this targets all listeners + listening on the specified port that also support this kind of Route(and + select this Route). It's not recommended to set `Port` unless the + networking behaviors specified in a Route must apply to a specific port + as opposed to a listener(s) whose port(s) may be changed. When both Port + and SectionName are specified, the name and port of the selected listener + must match both specified values. + + + + When the parent resource is a Service, this targets a specific port in the + Service spec. When both Port (experimental) and SectionName are specified, + the name and port of the selected port must match both specified values. + + + + Implementations MAY choose to support other parent resources. + Implementations supporting other types of parent resources MUST clearly + document how/if Port is interpreted. + + + For the purpose of status, an attachment is considered successful as + long as the parent resource accepts it partially. For example, Gateway + listeners can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept attachment + from the referencing Route, the Route MUST be considered successfully + attached. If no Gateway listeners accept attachment from this Route, + the Route MUST be considered detached from the Gateway. + + + Support: Extended + + + + format: int32 + maximum: 65535 + minimum: 1 + type: integer + sectionName: + description: |- + SectionName is the name of a section within the target resource. In the + following resources, SectionName is interpreted as the following: + + + * Gateway: Listener Name. When both Port (experimental) and SectionName + are specified, the name and port of the selected listener must match + both specified values. + * Service: Port Name. When both Port (experimental) and SectionName + are specified, the name and port of the selected listener must match + both specified values. Note that attaching Routes to Services as Parents + is part of experimental Mesh support and is not supported for any other + purpose. + + + Implementations MAY choose to support attaching Routes to other resources. + If that is the case, they MUST clearly document how SectionName is + interpreted. + + + When unspecified (empty string), this will reference the entire resource. + For the purpose of status, an attachment is considered successful if at + least one section in the parent resource accepts it. For example, Gateway + listeners can restrict which Routes can attach to them by Route kind, + namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from + the referencing Route, the Route MUST be considered successfully + attached. If no Gateway listeners accept attachment from this Route, the + Route MUST be considered detached from the Gateway. + + + Support: Core + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + type: object + required: + - controllerName + - parentRef + type: object + maxItems: 32 + type: array + required: + - parents + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/manifests/static/stunner-gateway-operator-manifests.yaml b/deploy/manifests/static/stunner-gateway-operator-manifests.yaml index 0fc55ce9..09b9b5c7 100644 --- a/deploy/manifests/static/stunner-gateway-operator-manifests.yaml +++ b/deploy/manifests/static/stunner-gateway-operator-manifests.yaml @@ -49,6 +49,18 @@ kind: ClusterRole metadata: name: stunner-gateway-operator-manager-role rules: +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -70,9 +82,11 @@ rules: - apiGroups: - "" resources: - - endpoints - - nodes - - secrets + - deployments/finalizers + - deployments/status + - endpoints/status + - nodes/status + - services/status verbs: - get - list @@ -80,11 +94,14 @@ rules: - apiGroups: - "" resources: - - endpoints/status - - nodes/status - - services/status + - endpoints + - namespaces + - nodes + - secrets verbs: - get + - list + - watch - apiGroups: - "" resources: @@ -121,7 +138,10 @@ rules: - apiGroups: - stunner.l7mp.io resources: + - dataplanes - gatewayconfigs + - staticservices + - udproutes verbs: - get - list @@ -131,7 +151,11 @@ rules: - apiGroups: - stunner.l7mp.io resources: + - dataplanes/finalizers - gatewayconfigs/finalizers + - staticservices/finalizers + - udproutes/finalizers + - udproutes/status verbs: - update --- @@ -306,10 +330,12 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: stunner-auth-server - image: l7mp/stunner-auth-server:dev + image: "l7mp/stunner-auth-server:0.16.0" imagePullPolicy: Always command: [ "./manager" ] - args: ["-zap-log-level","10", "-port", "8088"] + args: + - --zap-log-level=10 + - --port=8088 securityContext: allowPrivilegeEscalation: false capabilities: @@ -334,6 +360,10 @@ spec: requests: cpu: 10m memory: 64Mi + nodeSelector: + kubernetes.io/os: linux + tolerations: + [] --- apiVersion: apps/v1 kind: Deployment @@ -366,34 +396,53 @@ spec: - containerPort: 8443 name: https protocol: TCP + - containerPort: 13478 + name: cds + protocol: TCP resources: limits: - cpu: 500m - memory: 128Mi + cpu: 1000m + memory: 256Mi requests: - cpu: 5m - memory: 64Mi + cpu: 250m + memory: 128Mi - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 - - --leader-elect + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + - --zap-log-level=info + - --dataplane-mode=legacy command: - /manager - image: "l7mp/stunner-gateway-operator:0.15.0" + env: + - name: STUNNER_GATEWAY_OPERATOR_ADDRESS + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + image: "l7mp/stunner-gateway-operator:0.16.0" imagePullPolicy: IfNotPresent livenessProbe: + failureThreshold: 3 httpGet: path: /healthz port: 8081 + scheme: HTTP initialDelaySeconds: 15 periodSeconds: 20 + successThreshold: 1 + timeoutSeconds: 1 name: manager readinessProbe: + failureThreshold: 3 httpGet: path: /readyz port: 8081 + scheme: HTTP initialDelaySeconds: 5 periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 resources: limits: cpu: 1000m @@ -406,6 +455,7 @@ spec: securityContext: runAsNonRoot: true serviceAccountName: stunner-gateway-operator-controller-manager + serviceAccount: stunner-gateway-operator-controller-manager terminationGracePeriodSeconds: 10 nodeSelector: kubernetes.io/os: linux diff --git a/deploy/manifests/static/stunner-manifests.yaml b/deploy/manifests/static/stunner-manifests.yaml index 09a62ad6..b363f0e1 100644 --- a/deploy/manifests/static/stunner-manifests.yaml +++ b/deploy/manifests/static/stunner-manifests.yaml @@ -6,21 +6,23 @@ metadata: namespace: stunner --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +kind: Role metadata: - name: stunner-config-watcher-clusterrole + name: stunner-config-watcher-role + namespace: stunner rules: - apiGroups: [""] resources: ["configmaps", "secrets"] verbs: ["get", "watch", "list"] --- -kind: ClusterRoleBinding +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: stunner-config-watcher-clusterrolebind + name: stunner-config-watcher-rolebind + namespace: stunner roleRef: - kind: ClusterRole - name: stunner-config-watcher-clusterrole + kind: Role + name: stunner-config-watcher-role apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount @@ -50,8 +52,8 @@ metadata: namespace: stunner annotations: app: stunner - helm.sh/chart: stunner-0.15.0 - app.kubernetes.io/version: "0.15.0" + helm.sh/chart: stunner-0.16.0 + app.kubernetes.io/version: "0.16.0" spec: selector: matchLabels: @@ -75,7 +77,7 @@ spec: hostNetwork: false containers: - name: stunnerd - image: "l7mp/stunnerd:0.15.0" + image: "l7mp/stunnerd:0.16.0" imagePullPolicy: IfNotPresent command: ["stunnerd"] args: ["-w", "-c", "/etc/stunnerd/stunnerd.conf", "--udp-thread-num=16"] diff --git a/deploy/manifests/stunner-expose-kube-dns.yaml b/deploy/manifests/stunner-expose-kube-dns.yaml index 8065d8f0..35b1464f 100644 --- a/deploy/manifests/stunner-expose-kube-dns.yaml +++ b/deploy/manifests/stunner-expose-kube-dns.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,7 +12,7 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -24,7 +24,7 @@ spec: password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -36,7 +36,7 @@ spec: port: 3478 protocol: UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: stunner-udproute @@ -46,6 +46,5 @@ spec: - name: udp-gateway rules: - backendRefs: - - name: dummy - # - name: kube-dns - # namespace: kube-system + - name: kube-dns + namespace: kube-system diff --git a/deploy/manifests/stunner-standalone-tls.yaml.template b/deploy/manifests/stunner-standalone-tls.yaml.template deleted file mode 100644 index 22741af2..00000000 --- a/deploy/manifests/stunner-standalone-tls.yaml.template +++ /dev/null @@ -1,220 +0,0 @@ -# Stunner: An ingress gateway for WebRTC: TLS/DTLS in standalone mode ---- -# STUN/TURN server config: Make sure to customize the below configurations, see the README.pm for -# more details -apiVersion: v1 -kind: ConfigMap -metadata: - name: stunner-config -data: - # * `STUNNER_PUBLIC_ADDR` (no default): The public IP address clients can use to reach - # STUNner. By default, the public IP address will be dynamically assigned by the Kubernetes - # LoadBalancer service. The Helm installation script takes care of updating the configuration - # with the correct value. However, if installing from the static manifests then the external IP - # must be set manually. - STUNNER_PUBLIC_ADDR: "A.B.C.D" - - # * `STUNNER_PUBLIC_PORT` (default: 3478): The public port used by clients to reach STUNner. It - # is important that applications use the public port as found in the configuration, since the - # Helm installation scripts may overwrite this configuration. This occurs when the installation - # falls back to a NodePort service (i.e., when STUNner fails to obtain an external IP from the - # load-balancer). - STUNNER_PUBLIC_PORT: "443" - - # * `STUNNER_PORT` (default: 3478): The internal port used by STUNner for communication inside - # the cluster. It is safe to set this to the public port. - STUNNER_PORT: "443" - - # * `STUNNER_REALM` (default `stunner.l7mp.io`): the REALM used to guide the user agent in - # authenticating with STUNner. - STUNNER_REALM: "stunner.l7mp.io" - - # * `STUNNER_AUTH_TYPE` (default: `plaintext`): the STUN/TURN authentication mode, either - # "plaintext" over the username/password pair $STUNNER_USERNAME/$STUNNER_PASSWORD, or - # "longterm", using $STUNNER_SECRET. Make sure to customize! - STUNNER_AUTH_TYPE: "plaintext" - - # * `STUNNER_USERNAME` (default: `user`): the USERNAME attribute clients can use the authenticate - # with STUNner over plain-text authentication. Make sure to customize! - STUNNER_USERNAME: "user1" - - # * `STUNNER_PASSWORD` (default: `pass`): the password clients can use to authenticate with - # STUNner over plain-text authentication. Make sure to customize! - STUNNER_PASSWORD: "passwd1" - - # * `STUNNER_SHARED_SECRET`: the shared secret used for longterm authentication. - STUNNER_SHARED_SECRET: "my-shared-secret" - - # * `STUNNER_DURATION` (default: `86400`, i.e., one day): the lifetime of STUNner credentials - # * over longterm authentication. - STUNNER_DURATION: "86400" - - # * `STUNNER_LOGLEVEL` (default: `all:WARN`): the default log level used by the STUNner daemons. - STUNNER_LOGLEVEL: "all:INFO" - - # * `STUNNER_MIN_PORT` (default: 10000): smallest relay transport port assigned by STUNner. - STUNNER_MIN_PORT: "10000" - - # * `STUNNER_MAX_PORT` (default: 20000): highest relay transport port assigned by STUNner. - STUNNER_MAX_PORT: "20000" - - STUNNER_TLS_KEY: | - XXXXXXX - STUNNER_TLS_CERT: | - YYYYYYY - ---- -## custom static stunnerd conf for TLS/DTLS -apiVersion: v1 -kind: ConfigMap -metadata: - name: stunnerd-conf -data: - "stunnerd.conf" : | - version: v1alpha1 - admin: - name: stunnerd - loglevel: $STUNNER_LOGLEVEL - metrics_endpoint: "http://0.0.0.0:8080/metrics" - auth: - type: $STUNNER_AUTH_TYPE - realm: $STUNNER_REALM - credentials: - username: $STUNNER_USERNAME - password: $STUNNER_PASSWORD - secret: $STUNNER_SHARED_SECRET - clusters: - - name: media-plane - type: STRICT_DNS - endpoints: - - media-plane.default.svc.cluster.local - listeners: - - name: stunner-dtls - public_address: "$STUNNER_PUBLIC_ADDR" - public_port: $STUNNER_PUBLIC_PORT - address: "$STUNNER_ADDR" - port: $STUNNER_PORT - protocol: dtls - min_port: $STUNNER_MIN_PORT - max_port: $STUNNER_MAX_PORT - key: $STUNNER_TLS_KEY - cert: $STUNNER_TLS_CERT - routes: - - media-plane - - name: stunner-tls - public_address: "$STUNNER_PUBLIC_ADDR" - public_port: $STUNNER_PUBLIC_PORT - address: "$STUNNER_ADDR" - port: $STUNNER_PORT - protocol: tls - min_port: $STUNNER_MIN_PORT - max_port: $STUNNER_MAX_PORT - key: $STUNNER_TLS_KEY - cert: $STUNNER_TLS_CERT - routes: - - media-plane - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: stunner -spec: - selector: - matchLabels: - app: stunner - replicas: 1 - template: - metadata: - labels: - app: stunner - spec: - containers: - - name: stunnerd - image: l7mp/stunnerd:latest - imagePullPolicy: Always - command: ["stunnerd"] - args: ["-c", "/etc/stunnerd/stunnerd.conf"] - envFrom: - - configMapRef: - name: stunner-config - env: - - name: STUNNER_ADDR # we use the POD IP - valueFrom: - fieldRef: - fieldPath: status.podIP - volumeMounts: - - name: stunnerd-config-volume - mountPath: /etc/stunnerd - readOnly: true - - name: stunnerd-cert-volume - mountPath: /etc/ssl/certs - readOnly: true - # Uncomment this if you want to deploy a sidecar container with stunner to sniff traffic - # - name: net-debug - # image: l7mp/net-debug:latest - # command: ["/bin/sh"] - # args: ["-c", "while true; do echo hello; sleep 10;done"] - volumes: - - name: stunnerd-config-volume - configMap: - name: stunnerd-conf - optional: true - - name: stunnerd-cert-volume - secret: - secretName: stunner-tls - optional: true - ---- -apiVersion: v1 -kind: Service -metadata: - name: stunner-dtls - labels: - app: stunner -spec: - ports: - - port: 443 - targetPort: 443 - protocol: UDP - name: stunner-dtls - type: LoadBalancer - selector: - app: stunner - ---- -apiVersion: v1 -kind: Service -metadata: - name: stunner-tls - labels: - app: stunner -spec: - ports: - - port: 443 - targetPort: 443 - protocol: TCP - name: stunner-tls - type: LoadBalancer - selector: - app: stunner - ---- -# lock down access from the TURN server to anywhere! -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: stunner-network-policy -spec: - podSelector: - matchLabels: - app: stunner - policyTypes: - - Egress - egress: - - to: - - podSelector: - matchLabels: - app: media-plane - ports: - - protocol: UDP diff --git a/deploy/manifests/stunner-standalone.yaml b/deploy/manifests/stunner-standalone.yaml index e73d1823..229a09f5 100644 --- a/deploy/manifests/stunner-standalone.yaml +++ b/deploy/manifests/stunner-standalone.yaml @@ -1,147 +1,91 @@ -# Stunner: An ingress gateway for WebRTC --- -# STUN/TURN server config: Make sure to customize the below configurations, see the README.pm for -# more details -apiVersion: v1 -kind: ConfigMap +apiVersion: stunner.l7mp.io/v1 +kind: Dataplane metadata: - name: stunner-config - namespace: default -data: - # * `STUNNER_PUBLIC_ADDR` (no default): The public IP address clients can use to reach - # STUNner. By default, the public IP address will be dynamically assigned by the Kubernetes - # LoadBalancer service. The Helm installation script takes care of updating the configuration - # with the correct value. However, if installing from the static manifests then the external IP - # must be set manually. - STUNNER_PUBLIC_ADDR: "A.B.C.D" - - # * `STUNNER_PUBLIC_PORT` (default: 3478): The public port used by clients to reach STUNner. It - # is important that applications use the public port as found in the configuration, since the - # Helm installation scripts may overwrite this configuration. This occurs when the installation - # falls back to a NodePort service (i.e., when STUNner fails to obtain an external IP from the - # load-balancer). - STUNNER_PUBLIC_PORT: "3478" - - # * `STUNNER_PORT` (default: 3478): The internal port used by STUNner for communication inside - # the cluster. It is safe to set this to the public port. - STUNNER_PORT: "3478" - - # * `STUNNER_REALM` (default `stunner.l7mp.io`): the REALM used to guide the user agent in - # authenticating with STUNner. - STUNNER_REALM: "stunner.l7mp.io" - - # * `STUNNER_AUTH_TYPE` (default: `plaintext`): the STUN/TURN authentication mode, either - # "plaintext" over the username/password pair $STUNNER_USERNAME/$STUNNER_PASSWORD, or - # "longterm", using $STUNNER_SECRET. Make sure to customize! - STUNNER_AUTH_TYPE: "plaintext" - - # * `STUNNER_USERNAME` (default: `user`): the USERNAME attribute clients can use the authenticate - # with STUNner over plain-text authentication. Make sure to customize! - STUNNER_USERNAME: "user1" - - # * `STUNNER_PASSWORD` (default: `pass`): the password clients can use to authenticate with - # STUNner over plain-text authentication. Make sure to customize! - STUNNER_PASSWORD: "passwd1" - - # * `STUNNER_SHARED_SECRET`: the shared secret used for longterm authentication. - STUNNER_SHARED_SECRET: "my-shared-secret" - - # * `STUNNER_DURATION` (default: `86400`, i.e., one day): the lifetime of STUNner credentials - # * over longterm authentication. - STUNNER_DURATION: "86400" - - # * `STUNNER_LOGLEVEL` (default: `all:WARN`): the default log level used by the STUNner daemons. - STUNNER_LOGLEVEL: "all:INFO" - - # * `STUNNER_MIN_PORT` (default: 10000): smallest relay transport port assigned by STUNner. - STUNNER_MIN_PORT: "10000" - - # * `STUNNER_MAX_PORT` (default: 20000): highest relay transport port assigned by STUNner. - STUNNER_MAX_PORT: "20000" - + name: host-net +spec: + command: + - stunnerd + args: + - -w + - --udp-thread-num=16 + image: l7mp/stunnerd:dev + hostNetwork: true + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi --- -apiVersion: apps/v1 -kind: Deployment +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass metadata: - name: stunner - namespace: default + name: stunner-gatewayclass spec: - selector: - matchLabels: - app: stunner - replicas: 1 - template: - metadata: - labels: - app: stunner - spec: - containers: - - name: stunnerd - image: l7mp/stunnerd:latest - imagePullPolicy: Always - command: ["stunnerd"] - args: ["-c", "/stunnerd.conf"] - # args: ["-c", "/stunnerd.conf"] - envFrom: - - configMapRef: - name: stunner-config - env: - - name: STUNNER_ADDR # we use the POD IP - valueFrom: - fieldRef: - fieldPath: status.podIP - # Uncomment this if you want to deploy a sidecar container with stunner to sniff traffic - # - name: net-debug - # image: l7mp/net-debug:latest - # command: ["/bin/sh"] - # args: ["-c", "while true; do echo hello; sleep 10;done"] - + controllerName: "stunner.l7mp.io/gateway-operator" + parametersRef: + group: "stunner.l7mp.io" + kind: GatewayConfig + name: stunner-gatewayconfig + namespace: stunner + description: "STUNner is a WebRTC media gateway for Kubernetes" --- -apiVersion: v1 -kind: Service +apiVersion: stunner.l7mp.io/v1 +kind: GatewayConfig metadata: - name: stunner - namespace: default - labels: - app: stunner + name: stunner-gatewayconfig + namespace: stunner spec: - ports: - - port: 3478 - targetPort: 3478 - # nodePort: 30478 - protocol: UDP - name: stunner-udp - type: LoadBalancer - selector: - app: stunner - + dataplane: host-net + realm: stunner.l7mp.io + authRef: + name: stunner-auth-secret + namespace: stunner --- apiVersion: v1 -kind: Service +kind: Secret +metadata: + name: stunner-auth-secret + namespace: stunner +type: Opaque +stringData: + type: static + username: user-1 + password: pass-1 +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: turn-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: turn-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute metadata: - name: stunner-tcp - labels: - app: stunner + name: open-route + namespace: stunner spec: - ports: - - port: 3478 - targetPort: 3478 - protocol: TCP - name: stunner-tcp - type: LoadBalancer - selector: - app: stunner - + parentRefs: + - name: turn-gateway + rules: + - backendRefs: + - group: stunner.l7mp.io + kind: StaticService + name: wildcard-backend --- -# lock down access from the TURN server to anywhere! -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy +apiVersion: stunner.l7mp.io/v1 +kind: StaticService metadata: - name: stunner-network-policy - namespace: default + name: wildcard-backend + namespace: stunner spec: - podSelector: - matchLabels: - app: stunner - policyTypes: - - Egress + prefixes: + - "0.0.0.0/0" diff --git a/deploy/manifests/stunner-test.yaml b/deploy/manifests/stunner-test.yaml new file mode 100644 index 00000000..7749f08d --- /dev/null +++ b/deploy/manifests/stunner-test.yaml @@ -0,0 +1,63 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: stunner-gatewayclass +spec: + controllerName: "stunner.l7mp.io/gateway-operator" + parametersRef: + group: "stunner.l7mp.io" + kind: GatewayConfig + name: stunner-gatewayconfig + namespace: stunner + description: "STUNner is a WebRTC ingress gateway for Kubernetes" +--- + +apiVersion: stunner.l7mp.io/v1 +kind: GatewayConfig +metadata: + name: stunner-gatewayconfig + namespace: stunner +spec: + realm: stunner.l7mp.io + authType: plaintext + userName: "user-1" + password: "pass-1" + +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: tcp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: tcp-listener + port: 3478 + protocol: TURN-TCP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: media-plane + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + - name: tcp-gateway + rules: + - backendRefs: + - name: media-plane + namespace: default diff --git a/docs/AUTH.md b/docs/AUTH.md index 675975cd..329b4e1b 100644 --- a/docs/AUTH.md +++ b/docs/AUTH.md @@ -1,56 +1,25 @@ # Authentication -STUNner uses the IETF STUN/TURN protocol suite to ingest media traffic into the Kubernetes cluster, -which, [by design](https://datatracker.ietf.org/doc/html/rfc5766#section-17), provides -comprehensive security. In particular, STUNner provides message integrity and, if configured with -the TLS/TCP or DTLS/UDP listeners, complete confidentiality. To complete the CIA triad, this guide -shows how to configure user authentication with STUNner. +STUNner uses the IETF STUN/TURN protocol suite to ingest media traffic into a Kubernetes cluster, which, [by design](https://datatracker.ietf.org/doc/html/rfc5766#section-17), provides comprehensive security. In particular, STUNner provides message integrity and, if configured with the TURN-TLS or TURN-DTLS listeners, confidentiality. To complete the CIA triad, this guide shows how to configure user authentication with STUNner. ## The long-term credential mechanism STUNner relies on the STUN [long-term credential mechanism](https://www.rfc-editor.org/rfc/rfc8489.html#page-26) to provide user authentication. -The long-term credential mechanism assumes that, prior to the communication, STUNner and the WebRTC -clients agree on a username and password to be used for authentication. The credential is -considered long-term since it is assumed that it is provisioned for a user and remains in effect -until the user is no longer a subscriber of the system (STUNner's `static` authentication mode), -or until the predefined lifetime of the username/password pair passes and the credential expires -(`ephemeral` authentication mode in STUNner). +The long-term credential mechanism assumes that, prior to the communication, STUNner and the WebRTC clients agree on a username and password to be used for authentication. The credential is considered long-term since it is assumed to remain in effect until the user is no longer a subscriber of the system (STUNner's `static` authentication mode), or until the predefined lifetime of the credential expires (`ephemeral` authentication mode in STUNner). -STUNner secures the authentication process against replay attacks using a digest challenge. In -this mechanism, the server sends the user a realm (used to guide the user or agent in selection of -a username and password) and a nonce. The nonce provides replay protection. The client also -includes a message-integrity attribute in the authentication message, which provides an HMAC over -the entire request, including the nonce. The server validates the nonce and checks the message -integrity. If they match, the request is authenticated, otherwise the server rejects the request. +STUNner secures the authentication process against replay attacks using a digest challenge. In this mechanism, the server sends the user a realm (used to guide the user or agent in selection of a username and password) and a nonce. The nonce provides replay protection. The client also includes a message-integrity attribute in the authentication message, which provides an HMAC over the entire request, including the nonce. The server validates the nonce and checks the message integrity. If they match, the request is authenticated, otherwise the server rejects the request. ## Authentication workflow -The intended authentication workflow in STUNner is as follows. +The authentication workflow of STUNner is as follows. -1. *A username/password pair is generated.* This is outside the scope of STUNner; however, STUNner - comes with a comprehensive [authentication - service](https://github.com/l7mp/stunner-auth-service) that can be queried for a valid ICE - configuration for STUNner. The ICE configs returned by this service can be used by clients as - the [option field in the `PeerConnection` - call](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection#parameters), - so that the resultant PeerConnections will be opened via STUNner as the TURN server. +1. *A username/password pair is generated.* This is outside the scope of STUNner; however, STUNner comes with a custom [authentication service](https://github.com/l7mp/stunner-auth-service) that can be queried for a valid ICE configuration to be used by clients to authenticate with STUNner. The ICE configs returned by this service can be used by clients as the [option field in the `PeerConnection` call](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection#parameters), so that the resultant PeerConnections will be opened via STUNner as the TURN server. - > **Warning** - Clients should never query the STUNner authentication service directly to obtain an ICE - config. Instead, the WebRTC application server should retrieve the ICE config in the name of the - client during session establishment and return the generated ICE config to the client. + The ICE configs generated by the [STUNner authentication service](https://github.com/l7mp/stunner-auth-service) are always up to date with the most recent dataplane configuration. This makes sure that whenever you modify the STUNner Gateway API configuration (say, switch from `static` authentication to `ephemeral`), your clients will always receive an ICE config that reflects these changes (that is, the username/password pair will provide a time-windowed ephemeral credential). - The ICE configs generated by the [STUNner authentication - service](https://github.com/l7mp/stunner-auth-service) are always up to date with the most - recent dataplane configuration. This makes sure that whenever you modify the STUNner Gateway API - configuration (say, switch from `static` authentication to `ephemeral`), your clients will - always receive an ICE config that reflects these changes (that is, the username/password pair - will provide a time-windowed credential). - - For instance, the below will query the STUnner auth service, which is by default available at - the URL `http://stunner-auth.stunner-system:8088`, for a valid ICE config. + Below is a query to the STUnner auth service, by default available at the URL `http://stunner-auth.stunner-system:8088`, that returns a valid ICE config. ```console curl "http://stunner-auth.stunner-system:8088/ice?service=turn" @@ -69,59 +38,38 @@ The intended authentication workflow in STUNner is as follows. } ``` - Use the below to specify the lifetime of the generated credential to one hour (`ttl`, only makes sense when - STUNner uses `ephemeral` authentication credentials) for a user named `my-user`, and you want - the user to enter your cluster via the STUNner Gateway called `my-gateway` deployed into the - `my-namespace` namespace. + Use the below query to generate a valid STUNner credential to access the Gateway called `my-gateway` deployed into the `my-namespace` namespace: ```console - curl "http://stunner-auth.stunner-system:8088/ice?service=turn?ttl=3600&username=my-user&namespace=my-namespace&gateway=my-gateway" + curl "http://stunner-auth.stunner-system:8088/ice?service=turn&ttl=3600&username=my-user&namespace=my-namespace&gateway=my-gateway" ``` -2. The clients *receive the ICE configuration* (usually, from the application server) over a secure - channel. This is outside the context of STUNner; our advice is to return the ICE configuration - during the session setup process, say, along with the initial configuration returned for clients - before starting the call. +2. The clients *receive the ICE configuration* (usually, from the application server) over a secure channel. This is outside the context of STUNner. Our advice is to return the ICE configuration during the session setup process, say, along with the initial configuration returned for clients before starting the call. -3. WebRTC clients are *configured with the ICE configuration* obtained above. The below snippet - shows how to initialize a WebRTC - [`PeerConnection`](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection) - to use the above ICE server configuration in order to use STUNner as the default TURN service. +3. WebRTC clients are *configured with the ICE configuration*. The below snippet shows how to initialize a WebRTC [`PeerConnection`](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection) to use the above ICE server configuration in order to use STUNner as the default TURN service. - ```javascript + ``` var iceConfig = var pc = new RTCPeerConnection(iceConfig); ``` ## Static authentication -In STUNner, `static` authentication is the simplest and least secure authentication mode, basically -corresponding to a traditional "log-in" username and password pair given to users. STUNner accepts -(and sometimes reports) the alias `plaintext` to mean the `static` authentication mode; the use of -`plaintext` is deprecated and will be removed in a later release. +In STUNner, `static` authentication is the simplest and least secure authentication mode, basically corresponding to a traditional "log-in" username and password pair given to users. -When STUNner is configured to use `static` authentication only a single username/password pair is -used for *all* clients. This makes configuration easy; e.g., the ICE server configuration can be -hardcoded into the static Javascript code served to clients. At the same time, `static` -authentication is prone to leaking the credentials: once an attacker learns a username/password -pair they can use it without limits to reach STUNner (until the administrator rolls the -credentials, see below). +When STUNner is configured to use `static` authentication only a single username/password pair is used for *all* clients. This makes configuration easy; e.g., the ICE server configuration can be hardcoded into the static Javascript code served to clients. At the same time, `static` authentication is prone to leaking credentials: once an attacker learns a username/password pair they can use it without limits to reach STUNner (until the administrator rolls the credentials, see below). -The first step of configuring STUNner for the `static` authentication mode is to create a -Kubernetes Secret to hold the username/password pair. The below will set the username to `my-user` -and the password to `my-password`. Note that if no `type` is set then STUNner defaults to `static` -authentication. +The first step of configuring STUNner for the `static` authentication mode is to create a Kubernetes Secret to hold the username/password pair. The below will set the username to `my-user` and the password to `my-password`. If no `type` is set then STUNner defaults to `static` authentication. ```console kubectl -n stunner create secret generic stunner-auth-secret --from-literal=type=static \ --from-literal=username=my-user --from-literal=password=my-password ``` -Then, we create or update the current [GatewayConfig](REFERENCE.md) to refer STUNner to this secret -for setting the authentication credentials. +Then, we update the [GatewayConfig](GATEWAY.md) to refer STUNner to this Secret for setting authentication credentials. ```yaml -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -133,53 +81,28 @@ spec: namespace: stunner ``` -The main use of static authentication is for testing. The reason for this is that static -authentication credentials are easily discoverable: since the WebRTC Javascript API uses the TURN -credentials unencrypted, an attacker can easily extract the STUNner credentials from the -client-side Javascript code. In order to mitigate the risk, it is a good security practice to reset -the username/password pair every once in a while. This can be done by simply updating the Secret -that holds the credentials. +It is a good security practice to reset the username/password pair every once in a while. This can be done by simply updating the Secret that holds the credentials. ```yaml kubectl -n stunner edit secret stunner-auth-secret ``` -> **Warning** -Modifying STUNner's credentials goes *without* restarting the TURN server but may affect existing -sessions, in that existing sessions will not be able to refresh the active TURN allocation with the -old credentials. The application server may also need to be restarted to learn the new TURN -credentials. +> [!WARNING] +> +> Modifying STUNner's credentials goes *without* restarting the TURN server but may affect existing sessions, in that active sessions will not be able to refresh their TURN allocation any more. This will result in the disconnection of clients using the old credentials. ## Ephemeral authentication -For production use, STUNner provides the `ephemeral` authentication mode that uses per-client -time-limited STUN/TURN authentication credentials. Ephemeral credentials are dynamically generated -with a pre-configured lifetime and, once the lifetime expires, the credential cannot be used to -authenticate (or refresh) with STUNner any more. This authentication mode is more secure since -credentials are not shared between clients and come with a limited lifetime. Configuring -`ephemeral` authentication may be more complex though, since credentials must be dynamically -generated for each session and properly returned to clients. STUNner accepts (and sometimes -reports) the alias `longterm` to mean the `ephemeral` authentication mode; the use of `longterm` is -deprecated and will be removed in a later release. Note also that the alias `timewindowed` is also -accepted. - -To implement this mode, STUNner adopts the [quasi-standard time-windowed TURN authentication -credential format](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00). In this -format, the TURN username consists of a colon-delimited combination of the expiration timestamp and -the user-id parameter, where the user-id is some application-specific id that is opaque to STUNner -and the timestamp specifies the date of expiry of the credential as a UNIX timestamp. The TURN -password is computed from the a secret key shared with the TURN server and the returned username -value, by performing `base64(HMAC-SHA1(secret key, username))`. STUNner extends this scheme -somewhat for maximizing interoperability with WebRTC apps, in that it allows the user-id and the -timestamp to appear in any order in the TURN username and it accepts usernames with a plain -timestamp, without the colon and/or the user-id. +STUNner provides the `ephemeral` authentication mode for production use, which uses per-client time-limited STUN/TURN authentication credentials. Ephemeral credentials are dynamically generated with a pre-configured lifetime and, once the lifetime expires, the credential cannot be used to authenticate (or refresh) with STUNner any more. This authentication mode is more secure since credentials are not shared between clients and come with a limited lifetime. Configuring `ephemeral` authentication may be more complex though, since credentials must be dynamically generated for each session and properly returned to clients. + +STUNner adopts the [quasi-standard time-windowed TURN authentication credential format](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00) for ephemeral authentication. The TURN username consists of a colon-delimited combination of the expiration timestamp and the user-id parameter, where the user-id is some application-specific id that is opaque to STUNner and the timestamp specifies the date of expiry of the credential as a UNIX timestamp. The TURN password is computed from the a secret key shared with the TURN server and the returned username value, by performing `base64(HMAC-SHA1(secret key, username))`. STUNner extends this scheme somewhat for maximizing interoperability with WebRTC apps, in that it allows the user-id and the timestamp to appear in any order in the TURN username and it accepts usernames with a plain timestamp, without the colon and/or the user-id. The advantage of this mechanism is that it is enough to know the shared secret for STUNner to be able to check the validity of a credential. -> **Warning** -The user-id is used only for the integrity check but STUNner in no way checks whether it identifies -a valid user-id in the system. +> [!WARNING] +> +> The user-id is to ensure that the password generated per user-id is unique, but STUNner in no way checks whether it identifies a valid user-id in the system. In order to switch from `static` mode to `ephemeral` authentication, it is enough to update the Secret that holds the credentials. The below will set the shared secret `my-shared-secret` for the diff --git a/docs/CONCEPTS.md b/docs/CONCEPTS.md index f09412fa..d8cc646a 100644 --- a/docs/CONCEPTS.md +++ b/docs/CONCEPTS.md @@ -1,27 +1,27 @@ # Concepts -In this guide we describe STUNner's architecture and the most important components of an operational STUNner installation. +This guide describes STUNner's architecture and the most important components of an operational installation. ## Architecture -A STUNner installation consists of two parts, a *control plane* and a *dataplane*. The control plane consists of declarative policies specifying the way STUNner should route WebRTC media traffic to the media servers, plus a gateway operator that renders the high-level policies into an actual dataplane configuration. The dataplane in turn comprises one or more `stunnerd` pods, responsible for actually ingesting media traffic into the cluster through a STUN/TURN server. Since the TURN service underlying STUNner is agnostic to NATs, STUNner can inject clients' media traffic into the private Kubernetes pod network, addressing all NAT traversal steps (client-side and server-side) in a single go. +A STUNner installation consists of two parts, a *control plane* and a *data plane*. The control plane consists of declarative policies specifying the way STUNner should route WebRTC media traffic to the media servers, plus a gateway operator that renders the high-level policies into an actual dataplane configuration. The data plane in turn comprises one or more `stunnerd` pods, which are responsible for actually ingesting media traffic into the cluster. The dataplane pods are automatically provisioned by the gateway operator so they should come and go as you add and remove STUNner gateways. ![STUNner architecture](img/stunner_arch_big.svg) -The unit of the STUNner configuration is a [designated Kubernetes namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces) that holds the control plane configuration and the dataplane pods. You can run multiple STUNner deployments side-by-side by installing a separate dataplane into a each namespace and defining a distinct gateway hierarchy to configure each dataplane separately. - -### Control plane +## Control plane The STUNner control plane consists of the following components: -* **Gateway hierarchy:** A gateway hierarchy is a collection of [Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources) that together describe the way media traffic should enter the cluster, including public IP addresses and ports clients can use to reach STUNner, TURN credentials, forwarding rules, etc. The anchor of the gateway hierarchy is the GatewayClass object, and the rest of the resources form a complete hierarchy underneath it: the GatewayConfig describes general STUNner configuration, Gateways define the port and transport protocol for each TURN server listener, and UDPRoutes point to the backend services client traffic should be forwarded to. See [here](GATEWAY.md) for a full reference. +* **Gateway API resources:** The high-level STUNner configuration is a collection of [Gateway API](https://gateway-api.sigs.k8s.io) resources that together describe the way media traffic should enter the cluster. The anchor of the configuration hierarchy is the GatewayClass object, and the rest of the resources form a complete hierarchy underneath it: the GatewayConfig describes general STUNner configuration, Gateways define the port and transport protocol per each TURN server listener, and UDPRoutes point to the backend services client traffic should be forwarded to. See [here](GATEWAY.md) for a full reference. -* **Gateway operator:** The main purpose of the gateway operator is to watch gateway hierarchies for change and, once a custom resource is added or modified by the user, render a new dataplane configuration. This configuration is then mapped into the filesystem of the `stunnerd` pods running in the same namespace, so that each `stunnerd` instance will use the most recent configuration. The STUNner Helm chart [automatically installs](INSTALL.md) the gateway operator; more information can be found [here](https://github.com/l7mp/stunner-gateway-operator). +* **Gateway operator:** The main purpose of the gateway operator is to watch Gateway API resources and, once a Gateway API resource is added or modified by the user, update the dataplane accordingly (see below). -* **STUNner ConfigMap:** The STUNner ConfigMap contains the running dataplane configuration. Of course, we could let the `stunnerd` pods themselves to watch the control plane for changes, but this would run into scalability limitations for large deployments. Instead, we separate the control plane and the dataplane, which brings cool [benefits](https://en.wikipedia.org/wiki/Software-defined_networking). The STUNner ConfigMap is usually named as `stunnerd-config`, but you can override this from the GatewayConfig. +* **STUNner authentication service** (not shown on the figure): The auth service is an ancillary service that can be used to generate TURN credentials and complete [ICE server configurations](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection#iceservers) to bootstrap clients. See more info [here](AUTH.md). ## Dataplane -The STUNner dataplane is comprised of a fleet of `stunnerd` pods. These pods actually implement the TURN server, using the configuration available in the STUNner ConfigMap which is mapped into the pods' filesystem dynamically. Then, `stunnerd` will watch for changes in the config file and, once a change is detected, it [reconciles](https://kubernetes.io/docs/concepts/architecture/controller) the dataplane to match the new user policies. +The STUNner dataplane is comprised of a fleet of `stunnerd` pods implementing the TURN servers that can be used by clients to create WebRTC connections, plus some additional configuration to expose the TURN services to clients. The complete dataplane configuration per each Gateway is as follows: + +* **`stunnerd` Deployment:** Once you create a new Gateway the gateway operator will spawn a new dataplane for the Gateway automatically. For each Gateway there will be `stunnerd` Deployment with the same name and namespace. The `stunnerd` daemon itself is a TURN server implemented on top of the [pion/turn](https://github.com/pion/turn) Go WebRTC framework. The daemon will instantiate a separate *TURN listener* for each Gateway listener in the gateway configuration to terminate clients' TURN sessions, a *cluster* per each UDPRoute to forward packets to the backend services (e.g., to the media servers), with some ancillary administrative and authentication mechanisms in place to check client credentials, logging, etc. Whenever you modify a Gateway (UDPRoute), the gateway operator renders a new dataplane configuration with the modified listener (cluster, respectively) specs and downloads it to the `stunnerd` pods, which in turn reconcile their internal state with respect the new configuration. You are free to scale the dataplane to as many `stunnerd` pods as you wish: Kubernetes will make sure that new client connections are distributed evenly over the scaled-out STUNner dataplane. -The `stunnerd` daemon itself is essentially a simple TURN server on top of [pion/turn](https://github.com/pion/turn) written in Go. The daemon will instantiate a separate *TURN listener* for each Gateway listener in the gateway hierarchy to terminate clients' TURN sessions, a *cluster* per each UDPRoute to forward packets to the backend services (e.g., to the media servers), with some ancillary administrative and authentication mechanisms in place to check client credentials before admitting traffic into the cluster, logging, etc. There is a one-to-one mapping between the control-plane Gateway listeners and the `stunnerd` TURN listeners, as well as between the UDPRoute resources and `stunnerd`'s clusters. Whenever you modify a Gateway (UDPRoute), the gateway operator renders a new dataplane configuration with the modified listener (cluster, respectively) specs and the `stunnerd` pods reconcile their internal state to the new configuration. You are free to scale the dataplane to as many `stunnerd` pods as you like: Kubernetes will make sure that new client connections are distributed evenly over the scaled-out STUNner dataplane. +* **LoadBalancer Service:** STUNner creates a separate LoadBalancer Service per each Gateway to expose the TURN listeners of the `stunnerd` pods to the outside world. Similarly to the case of the `stunnerd` Deployment, there will be a separate LoadBalancer Service per each Gateway with the same name and namespace. diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 66b9b808..24da4338 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -1,15 +1,14 @@ # Deployment models -STUNner can be deployed in many combinations to support a wide range of operational +STUNner can be deployed in many different ways, supporting a wide range of operational requirements. First, it supports multiple [architectural models](#architectural-models) where it can act either as a simple headless STUN/TURN server or a fully fledged ingress gateway in front of an entire Kubernetes-based media server pool. Second, when STUNner is configured as an ingress gateway then there are multiple [ICE models](#ice-models), based on whether only the client connects via STUNner or both clients and media servers use STUNner to set up the media-plane -connection. Third, STUNner can run in one of two [control plane models](#control-plane-models), -based on whether the user manually supplies STUNner configuration or there is a separate STUNner -control plane that automatically reconciles the dataplane state based on a high-level [declarative -API](https://gateway-api.sigs.k8s.io). +connection. Third, STUNner can run in one of several [data plane models](#data-plane-models), based +on whether the dataplane is automatically provisioned or the user has to manually supply the +dataplane pods for STUNner. ## Architectural models @@ -26,11 +25,11 @@ this case the STUN/TURN servers are deployed into Kubernetes. ![STUNner headless deployment architecture](img/stunner_standalone_arch.svg) -> **Warning** -For STUNner to be able to connect WebRTC clients and servers in the headless model *all* the -clients and servers *must* use STUNner as the TURN server. This is because STUNner opens the -transport relay connections *inside* the cluster, on a private IP address, and this address is -reachable only to STUNner itself, but not for external STUN/TURN servers. + + + + + ### Media-plane deployment model @@ -52,12 +51,18 @@ for clients' UDP transport streams then STUNner can be scaled freely, otherwise result the [disconnection of a small number of client connections](https://cilium.io/blog/2020/11/10/cilium-19/#maglev). -#### Asymmetric ICE mode +## ICE models -The standard mode to supply an ICE server configuration for clients and media servers in the -media-plane deployment model of STUNner is the *asymmetric ICE mode*. In this model the client is -configured with STUNner as the TURN server and media servers run with no STUN or TURN servers -whatsoever. +The peers willing to create a connection via STUNner (e.g., two clients as per the headless model, +or a client and a media server in the media-plane deployment model) need to decide how to create +ICE candidates. + +### Asymmetric ICE mode + +In *asymmetric ICE mode*, one peer is configured with STUNner as the TURN server and the other peer +runs with no STUN or TURN servers whatsoever. The first peer will create a TURN transport relay +connection via STUNner to which the other peer can directly join. Asymmetric ICE mode is the +recommended way for the media-plane deployment model. ![STUNner asymmetric ICE mode](img/stunner_asymmetric_ice.svg) @@ -71,38 +76,34 @@ connection. In contrast, servers run without any STUN/TURN server whatsoever, so only. Due to servers being deployed into ordinary Kubernetes pods, the server's host candidate will likewise contain a private pod IP address. Then, since in the Kubernetes networking model ["pods can communicate with all other pods on any other node without a -NAT"](https://kubernetes.io/docs/concepts/services-networking), clients' relay candidates and the -servers' host candidates will have direct connectivity in the Kubernetes private container network -and the ICE connectivity check will succeed. See more explanation +NAT"](https://kubernetes.io/docs/concepts/services-networking), the client's relay candidate and +the server's host candidate will have direct connectivity in the Kubernetes private container +network and the ICE connectivity check will succeed. See more explanation [here](examples/kurento-one2one-call/README.md#what-is-going-on-here). -> **Warning** -Refrain from configuring additional public STUN/TURN servers, apart from STUNner itself. The rules -to follow in setting the [ICE server +Refrain from configuring additional public STUN/TURN servers apart from STUNner itself. The rules +to follow for setting the [ICE server configuration](https://github.com/l7mp/stunner#configuring-webrtc-clients) in asymmetric ICE mode are as below: -> - on the client, set STUNner as the *only* TURN server and configure *no* STUN servers, whereas -> - on the server do *not* configure *any* STUN or TURN servers whatsoever. +- on the client, set STUNner as the *only* TURN server and configure *no* STUN servers, and +- on the server do *not* configure *any* STUN or TURN server whatsoever. -Most users will want to deploy STUNner using the asymmetric ICE mode. In the rest of the docs, -unless noted otherwise we will assume the asymmetric ICE mode with the media plane deployment -model. +Deviating from these rules *might* work in certain cases, but may have uncanny and hard-to-debug +side-effects. For instance, configuring clients and servers with public STUN servers in certain +unlucky situations may allow them to connect via server-reflexive ICE candidates, completely +circumventing STUNner. This is on the one hand extremely fragile and, on the other hand, a security +vulnerability; remember, STUNner should be the *only* external access point to your media plane. It +is a good advice to set the `iceTransportPolicy` to `relay` on the clients to avoid side-effects: +this will prevent clients from generating host and server-reflexive ICE candidates, leaving STUNner +as the only option to obtain an ICE candidate from. -> **Warning** -Deviating from the above rules *might* work in certain cases, but may have uncanny and -hard-to-debug side-effects. For instance, configuring clients and servers with public STUN servers -in certain unlucky situations may allow them to connect via server-reflexive ICE candidates, -completely circumventing STUNner. This is on the one hand extremely fragile and, on the other hand, -a security vulnerability; remember, STUNner should be the *only* external access point to your -media plane. It is a good advice to set the `iceTransportPolicy` to `relay` on the clients to avoid -side-effects: this will prevent clients from generating host and server-reflexive ICE candidates, -leaving STUNner as the only option to obtain an ICE candidate from. - -#### Symmetric ICE mode +### Symmetric ICE mode In the symmetric ICE mode both the client and the server obtain an ICE [relay candidate](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidate/type) from STUNner and -the connection occurs directly via STUNner. +the connection occurs directly via STUNner. This is the simplest mode for the headless deployment +model, but symmetric mode can also be used for the media-plane model as well to connect clients to +media servers. ![STUNner symmetric ICE mode](img/stunner_symmetric_ice.svg) @@ -119,7 +120,7 @@ priorities](https://www.ietf.org/rfc/rfc5245.txt) to different connection types) is a good practice to configure the STUNner TURN URI in the server-side ICE server configuration with the *internal* IP address and port used by STUNner (i.e., the ClusterIP of the `stunner` Kubernetes service and the corresponding port), otherwise the server might connect via the external -LoadBalancer IP causing an unnecessary roundtrip. +LoadBalancer IP causing an unnecessary roundtrip (hairpinning). The symmetric mode means more overhead compared to the asymmetric mode, since STUNner now performs TURN encapsulation/decapsulation for both sides. However, the symmetric mode comes with certain @@ -128,15 +129,10 @@ internal IP addresses in the ICE candidates from attackers; note that this is no but feel free to open an issue if [exposing internal IP addresses](SECURITY.md) is blocking you from adopting STUNner. -## Control plane models - -STUNner can run in one of two modes: in the default mode STUNner configuration is controlled by a -*gateway-operator* component based on high-level intent encoded in [Kubernetes Gateway API -resources](https://gateway-api.sigs.k8s.io), while in the *standalone model* the user configures -STUNner manually. The standalone mode provides perfect control over the way STUNner ingests media, -but at the same time it requires users to deal with the subtleties of internal STUNner APIs that -are subject to change between subsequent releases. As of v0.14, STUNner's operator-ful mode is -feature complete and the standalone model is considered obsolete. If still interested, -comprehensive documentation for the standalone can be found [here](OBSOLETE.md), but this mode -is no longer supported. +## Data plane models +STUNner supports two dataplane provisioning modes. In the default *managed* mode, the dataplane +pods (i.e., the `stunnerd` pods) are provisioned automatically per each Gateway existing in the +cluster. In the *legacy* mode, the dataplane is supposed to be deployed by the user manually by +installing the `stunner/stunner` Helm chart into the target namespaces. Legacy mode is considered +obsolete at this point and it will be removed in a later release. diff --git a/docs/GATEWAY.md b/docs/GATEWAY.md index 51efe6bf..57ef7415 100644 --- a/docs/GATEWAY.md +++ b/docs/GATEWAY.md @@ -1,23 +1,22 @@ # Reference -The [STUNner gateway operator](https://github.com/l7mp/stunner-gateway-operator) exposes the control plane configuration using the standard [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io). This allows to configure STUNner in the familiar YAML-engineering style via Kubernetes manifests. The below reference gives a quick overview of the Gateway API. Note that STUNner implements only a subset of the full [spec](GATEWAY.md), see [here](https://github.com/l7mp/stunner-gateway-operator#caveats) for a list of the most important simplifications. +The [STUNner gateway operator](https://github.com/l7mp/stunner-gateway-operator) exposes the control plane configuration using the standard [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io). This allows to configure STUNner in the familiar YAML-engineering style via Kubernetes manifests. The below reference gives an overview of the subset of the Gateway API supported by STUNner, see [here](https://github.com/l7mp/stunner-gateway-operator#caveats) for a list of the most important simplifications. -## Overview - -The main unit of the control plane configuration is the *gateway hierarchy*. Here, a Gateway hierarchy is a collection of [Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources) that together describe the way media traffic should enter the cluster via STUNner, including public IP addresses and ports clients can use to reach STUNner, TURN credentials, routing rules, etc. The anchor of the gateway hierarchy is the GatewayClass object, and the rest of the resources form a complete hierarchy underneath it. - -![Gateway hierarchy](img/gateway_api.svg) - -In general, the scope of a gateway hierarchy is a single namespace, but this is not strictly enforced: e.g., the GatewayClass is [cluster-scoped](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions) so it is outside the namespace, GatewayClasses can refer to GatewayConfigs across namespaces, Routes can attach to Gateways across a namespace boundary (if the Gateway allows this), etc. Still, it is a good practice to keep all control plane configuration, plus the actual dataplane pods, in a single namespace as much as possible. +1. [GatewayClass](#gatewayclass) +1. [GatewayConfig](#gatewayconfig) +1. [Gateway](#gateway) +1. [UDPRoute](#udproute) +1. [StaticService](#staticservice) +1. [Dataplane](#dataplane) ## GatewayClass -The GatewayClass resource provides the root of the gateway hierarchy. GatewayClass resources are cluster-scoped, so they can be attached to from any namespace, and we usually assume that each namespaced gateway hierarchy will have a separate global GatewayClass as the anchor. +The GatewayClass resource provides the root of a STUNner gateway configuration. GatewayClass resources are cluster-scoped, so they can be attached to from any namespace. -Below is a sample GatewayClass resource. Each GatewayClass must specify a controller that will manage the Gateway objects created under the hierarchy; this must be set to `stunner.l7mp.io/gateway-operator` for the STUNner gateway operator to pick up the GatewayClass. In addition, a GatewayClass can refer to further implementation-specific configuration via a `parametersRef`; in the case of STUNner this will always be a GatewayConfig object (see [below](#gatewayconfig)). +Below is a sample GatewayClass resource. Each GatewayClass specifies a controller that will manage the Gateway objects created under the class; this must be set to `stunner.l7mp.io/gateway-operator` for the STUNner gateway operator to pick up the GatewayClass. In addition, a GatewayClass can refer to further implementation-specific configuration via a `parametersRef`; in the case of STUNner this will always be a GatewayConfig object (see [below](#gatewayconfig)). ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -31,7 +30,7 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" ``` -Below is a quick reference of the most important fields of the GatewayClass [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects) +Below is a quick reference of the most important fields of the GatewayClass [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects). | Field | Type | Description | Required | | :--- | :---: | :--- | :---: | @@ -41,12 +40,12 @@ Below is a quick reference of the most important fields of the GatewayClass [`sp ## GatewayConfig -The GatewayConfig resource provides general configuration for STUNner, most importantly the STUN/TURN authentication [credentials](AUTH.md) clients can use to connect to STUNner. GatewayClass resources attach a STUNner configuration to the hierarchy by specifying a particular GatewayConfig in the GatewayClass `parametersRef`. GatewayConfig resources are namespaced, and every hierarchy can contain at most one GatewayConfig. Failing to specify a GatewayConfig is an error because the authentication credentials cannot be learned by the dataplane otherwise. +The GatewayConfig resource provides general configuration for STUNner, most importantly the STUN/TURN authentication [credentials](AUTH.md) clients can use to connect to STUNner. GatewayClass resources attach a STUNner configuration to the hierarchy by specifying a particular GatewayConfig in the GatewayClass `parametersRef`. GatewayConfig resources are namespaced, and every hierarchy can contain at most one GatewayConfig. Failing to specify a GatewayConfig is an error because the authentication credentials cannot be learned otherwise. -The following example takes the [STUNner authentication settings](AUTH.md) from the Secret called `stunner-auth-secret` in the `stunner` namespace, sets the authentication realm to `stunner.l7mp.io`, sets the dataplane loglevel to `all:DEBUG,turn:INFO` (this will set all loggers to `DEBUG` level except the TURN protocol machinery's logger which is set to `INFO`), and sets the default URL for metric scraping. +The following example takes the [STUNner authentication settings](AUTH.md) from the Secret called `stunner-auth-secret` in the `stunner` namespace, sets the authentication realm to `stunner.l7mp.io`, and sets the dataplane loglevel to `all:DEBUG,turn:INFO` (this will set all loggers to `DEBUG` level except the TURN protocol machinery's logger which is set to `INFO`). ```yaml -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -54,42 +53,38 @@ metadata: spec: logLevel: "all:DEBUG,turn:INFO" realm: stunner.l7mp.io - authRef: + authRef: name: stunner-auth-secret namespace: stunner - metricsEndpoint: "http://0.0.0.0:8080/metrics" ``` -Below is a quick reference of the most important fields of the GatewayConfig [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects) +Below is a reference of the most important fields of the GatewayConfig [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects) | Field | Type | Description | Required | | :--- | :---: | :--- | :---: | -| `stunnerConfig` | `string` | The name of the ConfigMap into which the operator renders the `stunnerd` running configuration. Default: `stunnerd-config`. | No | -| `logLevel` | `string` | Logging level for the dataplane daemon pods (`stunnerd`). Default: `all:INFO`. | No | -| `realm` | `string` | The STUN/TURN authentication realm to be used for clients to authenticate with STUNner. The realm must consist of lower case alphanumeric characters or `-` and `-`, and must start and end with an alphanumeric character. Default: `stunner.l7mp.io`. | No | +| `dataplane` | `string` | The name of the Dataplane template to use for provisioning `stunnerd` pods. Default: `default`. | No | +| `logLevel` | `string` | Logging level for the dataplane pods. Default: `all:INFO`. | No | +| `realm` | `string` | The STUN/TURN authentication realm to be used for clients to authenticate with STUNner. The realm must consist of lower case alphanumeric characters or `-` and must start and end with an alphanumeric character. Default: `stunner.l7mp.io`. | No | | `authRef` | `reference` | Reference to a Secret (`namespace` and `name`) that defines the STUN/TURN authentication mechanism and the credentials. | No | | `authType` | `string` | Type of the STUN/TURN authentication mechanism. Valid only if `authRef` is not set. Default: `static`. | No | -| `username` | `string` | The username for [`static` authentication](AUTH.md). Valid only if `authRef` is not set. | No | +| `userName` | `string` | The username for [`static` authentication](AUTH.md). Valid only if `authRef` is not set. | No | | `password` | `string` | The password for [`static` authentication](AUTH.md). Valid only if `authRef` is not set. | No | | `sharedSecret` | `string` | The shared secret for [`ephemeral` authentication](AUTH.md). Valid only if `authRef` is not set. | No | -| `metricsEndpoint` | `string` | The metrics server (Prometheus) endpoint URL for the `stunnerd` pods.| No | -| `healthCheckEndpoint` | `string` | HTTP health-check endpoint exposed by `stunnerd`. Liveness check will be available on path `/live` and readiness check on path `/ready`. Default is to enable health-checking on `http://0.0.0.0:8086/ready` and `http://0.0.0.0:8086/live`, use an empty string to disable.| No | | `authLifetime` | `int` | The lifetime of [`ephemeral` authentication](AUTH.md) credentials in seconds. Not used by STUNner.| No | -| `loadBalancerServiceAnnotations` | `map[string]string` | A list of annotations that will go into the LoadBalancer services created automatically by STUNner to obtain a public IP addresses. See more detail [here](https://github.com/l7mp/stunner/issues/32). | No | +| `loadBalancerServiceAnnotations` | `map[string]string` | A list of annotations that will go into the LoadBalancer services created automatically by STUNner to obtain a public IP address. See more detail [here](https://github.com/l7mp/stunner/issues/32). | No | -> **Warning** At least a valid username/password pair *must* be supplied for `static` authentication, or a `sharedSecret` for the `ephemeral` mode, either via an external Secret or inline in the GatewayConfig. External authentication settings override inline settings. Missing both is an error. -Except the TURN authentication realm, all GatewayConfig resources are safe for modification. That is, the `stunnerd` daemons know how to reconcile a change in the GatewayConfig without restarting listeners/TURN servers. Changing the realm, however, induces a *full* TURN server restart (see below). +Except the TURN authentication realm, all GatewayConfig resources are safe for modification. That is, the `stunnerd` daemons know how to reconcile a change in the GatewayConfig without restarting listeners/TURN servers. Changing the realm, however, induces a *full* dataplane restart. ## Gateway Gateways describe the STUN/TURN server listeners exposed to clients. -The below Gateway will configure STUNner to open a STUN/TURN listener on the UDP port 3478 and automatically expose it on a public IP address and port by creating a [LoadBalancer service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). The name and namespace of the automatically provisioned service are the same as those of the Gateway, and the service is automatically updated if the Gateway changes (e.g., a port changes). +The below Gateway resource will configure STUNner to open a STUN/TURN listener over the UDP port 3478 and make it available on a public IP address and port to clients. Each Gateway will have a `stunnerd` Deployment that will run the dataplane and a LoadBalancer Service that will expose the gateway to the Internet, both using the same name and namespace as the Gateway. Once the Gateway is removed, the corresponding resources are automatically garbage-collected. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -99,13 +94,13 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP ``` -The below more complex example defines two TURN listeners: a UDP listener at port 3478 that accepts routes from any namespace, and a TLS/TCP listener at port 443 that accepts routes from all namespaces labeled as `app:dev`. +The below example defines two TURN listeners: a TURN listener at the UDP:3478 port that accepts routes from any namespace (see below), and a TURN listener at port TLS/TCP:443 that accepts routes only from namespaces labeled with `app=dev`. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: complex-gateway @@ -121,13 +116,13 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP allowedRoutes: namespaces: from: All - name: tls-listener port: 443 - protocol: TLS + protocol: TURN-TLS tls: mode: Terminate certificateRefs: @@ -142,64 +137,149 @@ spec: app: dev ``` -Below is a quick reference of the most important fields of the Gateway [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects). +Below is a reference of the most important fields of the Gateway [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects). | Field | Type | Description | Required | | :--- | :---: | :--- | :---: | | `gatewayClassName` | `string` | The name of the GatewayClass that provides the root of the hierarchy the Gateway is attached to. | Yes | | `listeners` | `list` | The list of TURN listeners. | Yes | +| `addresses` | `list` | The list of manually hinted external IP addresses for the rendered service (only the first one is used). | No | + +> [!WARNING] +> +> Gateway resources are *not* safe for modification. This means that certain changes to a Gateway will restart the underlying TURN server listener, causing all active client sessions to terminate. The particular rules are as follows: +> - adding or removing a listener will start/stop *only* the TURN listener being created/removed, without affecting the rest of the listeners on the same Gateway; +> - changing the transport protocol, port or TLS keys/certs of an *existing* listener will restart the TURN listener but leave the rest of the listeners intact; +> - changing the TURN authentication realm will restart *all* TURN listeners. -Each TURN `listener` is defined by a unique name, a transport protocol and a port. In addition, a -`tls` configuration is required for TLS and DTLS listeners. +Manually hinted external address describes an address that can be bound to a Gateway. It is defined by an address type and an address value. Note that only the first address is used. Setting the `spec.addresses` field in the Gateway will result in the rendered Service's [loadBalancerIP](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#service-v1-core:~:text=non%20%27LoadBalancer%27%20type.-,loadBalancerIP,-string) and [externalIPs](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#service-v1-core:~:text=and%2Dservice%2Dproxies-,externalIPs,-string%20array) fields to be set. + +| Field | Type | Description | Required | +|:--------|:--------:|:--------------------------------------------------------------|:--------:| +| `type` | `string` | Type of the address. Currently only `IPAddress` is supported. | Yes | +| `value` | `string` | Address that should be bound to the Gateway's service. | Yes | + +> [!WARNING] +> +> Be careful when using this feature. Since Kubernetes v1.24 the `loadBalancerIP` field is deprecated and it will be ignored if your Kubernetes install does not support the feature. In addition, the `externalIPs` field is denied by some cloud-providers. + +### Listener configuration + +Each TURN `listener` is defined by a unique name, a transport protocol and a port. In addition, a `tls` configuration is required for TURN-TLS and TURN-DTLS listeners. Per-listener configuration is as follows. | Field | Type | Description | Required | | :--- | :---: | :--- | :---: | -| `name` | `string` | Name of the TURN listener. | Yes | +| `name` | `string` | Name of the TURN listener. Must be unique per Gateway. | Yes | | `port` | `int` | Network port for the TURN listener. | Yes | -| `protocol` | `string` | Transport protocol for the TURN listener. Either UDP, TCP, TLS or DTLS. | Yes | -| `tls` | `object` | [TLS configuration](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1beta1.GatewayTLSConfig).| Yes (for TLS/DTLS) | -| `allowedRoutes.from` | `object` | [Route attachment policy](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.AllowedRoutes), either `All`, `Selector`, or `Same` (default is `Same`) | No | +| `protocol` | `string` | Transport protocol for the TURN listener. Either TURN-UDP, TURN-TCP, TURN-TLS or TURN-DTLS. | Yes | +| `tls` | `object` | [TLS configuration](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1beta1.GatewayTLSConfig).| Yes (for TURN-TLS/TURN-DTLS) | +| `allowedRoutes.from` | `object` | [Route attachment policy](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.AllowedRoutes), either `All`, `Selector`, or `Same`. Default: `Same`. | No | + +For TURN-TLS/TURN-DTLS listeners, `tls.mode` must be set to `Terminate` or omitted (`Passthrough` does not make sense for TURN), and `tls.certificateRefs` must be a [reference to a Kubernetes Secret](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1beta1.GatewayTLSConfig) of type `tls` or `opaque` with exactly two keys: `tls.crt` must hold the TLS PEM certificate and `tls.key` must hold the TLS PEM key. + +### Load balancer configuration + +STUNner will automatically generate a Kubernetes LoadBalancer Service to expose each Gateway to clients. All TURN listeners specified in the Gateway are wrapped by a single Service and will be assigned a single externally reachable IP address. If you want multiple TURN listeners on different public IPs, create multiple Gateways. TURN over UDP and TURN over DTLS listeners are exposed as UDP services, TURN-TCP and TURN-TLS listeners are exposed as TCP. + +STUNner implements several ways to customize the automatically created Service, each involving certain pre-defined [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations). First, you can add global annotations to the `loadBalancerServiceAnnotations` field of the [GatewayConfig spec](#gatewayconfig), which affect the Service created for each Gateway that links to the GatewayConfig (via the GatewayClass `parametersRef`). To customize annotations on a per-gateway status, you can also add specific annotations to the Gateway itself. Gateway annotations override the global annotations on conflict. Finally, you can also add custom labels/annotations to the automatically created STUNner Services manually, these are retained on Service update (unless there is a conflict). Note that labels/annotations added via the Gateway and the GatewayConfig are also propagated to the corresponding `stunnerd` Deployments. + +The following rules apply: +- Each annotation is copied verbatim into the Service created for any Gateway. This can be used, for instance, to specify health-check settings on the load-balancer Service (using the `service.beta.kubernetes.io/*-loadbalancer-healthcheck-*` annotations, see above). +- Annotations with the prefix `stunner.l7mp.io/...` have special meaning: apart from being copied into the Service these annotations also affect some specifics of the created Service, like the service type or the nodeports assigned to listeners. + +STUNner defines the following special annotations: + +1. **Service type:** The special annotation `stunner.l7mp.io/service-type` can be used to customize the type of the Service created by STUNner. The value can be either `ClusterIP`, `NodePort`, or `LoadBalancer` (this is the default); for instance, setting `stunner.l7mp.io/service-type: ClusterIP` will prevent STUNner from exposing a Gateway publicly (useful for testing). + +1. **Mixed-protocol support:** Currently, STUNner limits each Gateway to a single transport protocol, e.g., UDP or TCP. This is intended to improve the consistency across the Kubernetes services of different cloud providers, which provide varying support for [mixed multi-protocol LoadBalancer Services](https://kubernetes.io/docs/concepts/services-networking/service/#load-balancers-with-mixed-protocol-types). If you still want to expose a UDP and a TCP port on the same IP using a single Gateway, add the annotation `stunner.l7mp.io/enable-mixed-protocol-lb: true` to the Gateway. Since mixed-protocol LB support is not supported in many popular Kubernetes offerings, STUNner currently defaults to disabling this feature. + + The below Gateway will expose both ports with their respective protocols. + + ```yaml + apiVersion: gateway.networking.k8s.io/v1 + kind: Gateway + metadata: + name: mixed-protocol-gateway + annotations: + stunner.l7mp.io/enable-mixed-protocol-lb: true + spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP + - name: tcp-listener + port: 3479 + protocol: TURN-TCP + ``` + +1. **Retaining the source IP:** Normally, Kubernetes load balancers apply source IP address translation when ingesting packets into the cluster. This replaces clients' original IP address with a private IP address. For STUNner's intended use case, as an ingress media gateway exposing the cluster's media services over the TURN protocol, this does not matter. However, STUNner can also act as a STUN server, which requires clients' source IP to be retained at the load balancer. This can be achieved by adding the annotation `stunner.l7mp.io/external-traffic-policy: local` to a Gateway, which will set the [`service.spec.externalTrafficPolicy`](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip) field in the Service created by STUNner for the Gateway to `Local`. Note that this Kubernetes feature comes with fairly complex [limitations](https://kubernetes.io/docs/tutorials/services/source-ip): if a STUN or TURN request hits a Kubernetes node that is not running a `stunnerd` pod, then the request will silently fail. This is required for Kubernetes to retain the client IP, which otherwise would be lost when passing packets between nodes. Use this setting at your own [risk](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#caveats-and-limitations-when-preserving-source-ips). + +1. **Manually provisioning the dataplane:** In some cases it may be useful to manually provision a dataplane for a Gateway, e.g., to deploy `stunnerd` in a DeamonSet instead of a Deployment. Adding the annotation `stunner.l7mp.io/disable-managed-dataplane: true` to a Gateway will prevent STUNner from spawning a dataplane for the Gateway. This then allows one to manually create a `stunnerd` dataplane and connect it to the CDS server exposed by the operator to obtain the dataplane configuration. Remove the annotation to revert to the default mode and let STUNner to manage the dataplane for the Gateway. Manual dataplane provisioning requires intimate knowledge with the STUNner internals, use this feature only if you know what you are doing. + +1. **Selecting the NodePort:** By default, Kubernetes assigns a random external port from the range [32000-32767] to each listener of a Gateway exposed with a NodePort Service. This requires all ports in the [32000-32767] range to be opened on the external firewall, which may raise security concerns for hardened deployments. In order to assign specific nodeports to particular listeners, add the annotation `stunner.l7mp.io/nodeport:` `{listener_name_1:nodeport_1,listener_name_2:nodeport_2,...}` to the Gateway, where each key-value pair is a name of a listener and the selected (numeric) NodePort. The value itself must be proper a JSON map. Unknown listeners are silently ignored. Note that STUNner makes no specific effort to reconcile conflicting NodePorts: whenever the selected NodePort is unavailable Kubernetes will silently reject the Service, which can lead to hard-to-debug failures. Use this feature at your own risk. + +1. **Selecting the target port:** Some hardened Kubernetes deployments prohibit containers to open privileged ports (i.e., everything under 1024). This causes problems when one wants to ingest TURN over the standard TCP/TLS port 443. Kubernetes lets Services to choose an arbitrary [target port](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service) for each service port, which makes it possible to map a particular external port to an arbitrary (potentially non-privileged) port in the containers. In order to enforce a particular target port per listener, add the annotation `stunner.l7mp.io/targetport:` `{listener_name_1:targetport_1,...}` to the corresponding Gateway (the syntax and semantics are the same as those for the nodeport annotation). For instance, the below Gateway would expose the TURN TCP/TLS on port 443, but it would map the egress port to the target port 44321 in the STUNner dataplane container: + + ```yaml + apiVersion: gateway.networking.k8s.io/v1 + kind: Gateway + metadata: + name: tls-gateway + annotations: + stunner.l7mp.io/targetport: "{\"tls-listener\":44321}" + spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: tls-listener + port: 443 + protocol: TURN-TLS + tls: + certificateRefs: + - kind: Secret + namespace: stunner + name: tls-secret + ``` + +1. **Disabling the exposition of the health-check port:** Some older Kubernetes load-balancer providers required the exposition of the health-check port on LoadBalancer Services for UDP listeners to become externally reachable. Therefore, by default STUNner adds the health-check port (usually set via specific Gateway annotations) to the service-ports in automatically created LoadBalancer services. This has the unfortunate consequence that the health-check port becomes publicly reachable, which is considered a security issue by some, see https://github.com/l7mp/stunner-gateway-operator/issues/49. To prevent STUNner from exposing the health-check port, add the annotation `stunner.l7mp.io/disable-health-check-expose: true` to the corresponding Gateway. Note that this may cause TURN/UDP listeners unreachable on the Gateway, so use this only if you know that this setting will work with your Kubernetes provider. + +1. **Disabling session affinity:** By default STUNner applies the `sessionAffinity: ClientIP` setting on the LB services it creates to expose Gateways. Normally this setting improves stability by ensuring that each TURN session is safely pinned to the same dataplane pod for its entire lifetime. Certain hosted Kubernetes platforms, however, seem to reject UDP LB services that have this setting on, [breaking STUNner deployments](https://github.com/l7mp/stunner/issues/155) on these systems. In order to prevent STUNner from enforcing session affinity on the LB Service corresponding to a Gateway, just set the `stunner.l7mp.io/disable-session-affinity: true` annotation on the Gateway. Otherwise, session affinity is turned on. + +The below table summarizes the Gateway annotations supported by STUNner. + +| Key/value | Description | Default | +|:----------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------:| +| `stunner.l7mp.io/service-type: ` | [Type of the Service](https://kubernetes.io/docs/concepts/services-networking/service) per Gateway, either `ClusterIP`, `NodePort`, or `LoadBalancer`. | `LoadBalancer` | +| `stunner.l7mp.io/enable-mixed-protocol-lb: ` | [Mixed protocol load balancer service](https://kubernetes.io/docs/concepts/services-networking/service/#load-balancers-with-mixed-protocol-types) support. | False | +| `stunner.l7mp.io/external-traffic-policy: ` | Se the value to `Local` to preserve clients' source IP at the load balancer. | `Cluster` | +| `stunner.l7mp.io/disable-managed-dataplane: ` | Switch managed-dataplane support off for a Gateway. | False | +| `stunner.l7mp.io/nodeport: ` | Request a specific NodePort for particular listeners. Value is a JSON map of listener-nodeport key-value pairs. | None | +| `stunner.l7mp.io/targetport: ` | Request a specific target port for particular listeners. Value is a JSON map of listener-targetport key-value pairs. | None | +| `stunner.l7mp.io/disable-health-check-expose: true` | Disable the default exposition of the health-check port (if any). | False | +| `stunner.l7mp.io/disable-session-affinity: true` | Disable session affinity for a Gateway. | False | -For TLS/DTLS listeners, `tls.mode` must be set to `Terminate` or omitted (`Passthrough` does not make sense for TURN), and `tls.certificateRefs` must be a [reference to a Kubernetes Secret](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1beta1.GatewayTLSConfig) of type `tls` or `opaque` with exactly two keys: `tls.crt` must hold the TLS PEM certificate and `tls.key` must hold the TLS PEM key. +## UDPRoute -STUNner will automatically generate a Kubernetes LoadBalancer service to expose each Gateway to clients. All TURN listeners specified in the Gateway are wrapped by a single Service and will be assigned a single externally reachable IP address. If you want multiple TURN listeners on different public IPs, create multiple Gateways. TURN listeners on UDP and DTLS protocols are exposed as UDP services, TCP and TLS listeners are exposed as TCP. +UDPRoute resources can be attached to Gateways in order to specify the backend services permitted to be reached via the Gateway. Multiple UDPRoutes can attach to the same Gateway, and each UDPRoute can specify multiple backend services; in this case access to *all* backends in *each* of the attached UDPRoutes is allowed. An UDPRoute can be attached to a Gateway by setting the `parentRef` to the Gateway's name and namespace. This is, however, contingent on whether the Gateway accepts routes from the given namespace: customize the `allowedRoutes` per each Gateway listener to control which namespaces the listener accepts routes from. -Mixed multi-protocol Gateways are supported: this means if you want to expose a UDP and a TCP port on the same LoadBalancer service you can do it with a single Gateway. By default, the STUNner gateway-operator disables the use of mixed-protocol LBs for compatibility reasons. However, it can be enabled by annotating a Gateway with the `stunner.l7mp.io/enable-mixed-protocol-lb: true` key-value pair. The below Gateway will expose both ports with their respective protocols. +The below UDPRoute will configure STUNner to route client connections received on the Gateway called `udp-gateway` to *any UDP port* on the pods of the media server pool identified by the Kubernetes service `media-server-pool` in the `media-plane` namespace. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 -kind: Gateway +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute metadata: - name: mixed-protocol-gateway - annotations: - stunner.l7mp.io/enable-mixed-protocol-lb: true + name: media-plane-route + namespace: stunner spec: - gatewayClassName: stunner-gatewayclass - listeners: - - name: udp-listener - port: 3478 - protocol: UDP - - name: tcp-listener - port: 3479 - protocol: TCP + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - name: media-server-pool + namespace: media-plane ``` -> **Warning** -> Note that the mixed-protocol LB feature might not be supported in your Kubernetes version. - -STUNner implements two ways to customize the automatically created Service, both involving setting certain [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations) to the Service. First, if any annotation is set in the GatewayConfig `loadBalancerServiceAnnotations` object then those will be copied verbatim into the Service. Note that `loadBalancerServiceAnnotations` affect *all* LoadBalancer Services created by STUNner. Second, Service annotations can be customized on a per-Gateway basis as well by adding the annotations to Gateway resources. STUNner then copies all annotations from the Gateway verbatim into the Service, overwriting the annotations specified in the GatewayConfig on conflict. This is useful to, e.g., specify health-check settings for the Kubernetes load-balancer controller. The special annotation `stunner.l7mp.io/service-type` can be used to customize the type of the Service created by STUNner. Value can be either `ClusterIP`, `NodePort`, or `LoadBalancer` (this is the default); for instance, setting `stunner.l7mp.io/service-type: ClusterIP` will prevent STUNner from exposing a Gateway publicly (useful for testing). - -> **Warning** -Gateway resources are *not* safe for modification. This means that certain changes to a Gateway will restart the underlying TURN server listener, causing all active client sessions to terminate. The particular rules are as follows: -> - adding or removing a listener will start/stop *only* the TURN server to be started/stopped, without affecting the rest of the listeners; -> - changing the transport protocol, port or TLS keys/certs of an *existing* listener will restart the TURN listener but leave the rest of the listeners intact; -> - changing the TURN authentication realm will restart *all* TURN listeners. - -## UDPRoute - -UDPRoute resources can be attached to Gateways in order to specify the backend services permitted to be reached via the Gateway. Multiple UDPRoutes can attach to the same Gateway, and each UDPRoute can specify multiple backend services; in this case access to *all* backends in *each* of the attached UDPRoutes is allowed. An UDPRoute can be attached only to a Gateway in any namespace by setting the `parentRef` to the Gateway's name and namespace. This is, however, contingent on whether the Gateway accepts routes from the given namespace: customize the `allowedRoutes` for each Gateway listener to control which namespaces the listener accepts routes from. - -The below UDPRoute will configure STUNner to route client connections received on the Gateway called `udp-gateway` to the media server pool identified by the Kubernetes service `media-server-pool` in the `media-plane` namespace. +Note that STUNner provides its own UDPRoute resource instead of the official UDPRoute resource available in the Gateway API. In contrast to the official version, still at version v1alpha2, STUNner's UDPRoutes can be considered stable and expected to be supported throughout the entire lifetime of STUNner v1. You can still use the official UDPRoute resource as well, by changing the API version and adding an arbitrary port to the backend references (this is required by the official API). Note that the port will be omitted. ```yaml apiVersion: gateway.networking.k8s.io/v1alpha2 @@ -214,24 +294,136 @@ spec: - backendRefs: - name: media-server-pool namespace: media-plane + port: 1 ``` -Below is a quick reference of the most important fields of the UDPRoute [`spec`](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects). +Below is a reference of the most important fields of the STUNner UDPRoute `spec`. | Field | Type | Description | Required | | :--- | :---: | :--- | :---: | | `parentRefs` | `list` | Name/namespace of the Gateways to attach the route to. If no namespace is given, then the Gateway will be searched in the UDPRoute's namespace. | Yes | -| `rules.backendRefs` | `list` | A list of `name`/`namespace` pairs specifying the backend Service(s) reachable through the UDPRoute. It is allowed to specify a service from a namespace other than the UDPRoute's own namespace. | No | +| `rules.backendRefs` | `list` | A list of backends (Services or StaticServices) reachable through the UDPRoute. It is allowed to specify a service from a namespace other than the UDPRoute's own namespace. | No | + +Backend reference configuration is as follows: + +| Field | Type | Description | Required | +| :--- | :---: | :--- | :---: | +| `group` | `string` | API group for the backend, either empty string for Service backends or `stunner.l7mp.io` for StaticService backends. Default: `""`. | No | +| `kind` | `string` | The kind of the backend resource, either `Service` or `StaticService`. Default: `Service`. | No | +| `name` | `string` | Name of the backend Service or StaticService. | Yes | +| `namespace` | `string` | Namespace of the backend Service or StaticService. | Yes | +| `port` | `int` | Port to use to reach the backend. If empty, make all ports available on the backend. Default: empty.| No | +| `endPort` | `int` | If port is also specified, then access to the backend is restricted to the port range [port, endPort] inclusive. If port and endPort are empty, make all ports available on the backend. If port is given but endPort is not, admit the singleton port range [port,port]. Default: empty.| No | UDPRoute resources are safe for modification: `stunnerd` knows how to reconcile modified routes without restarting any listeners/TURN servers. -## Status +## StaticService + +When the target backend of a UDPRoute is running *inside* Kubernetes then the backend is always a proper Kubernetes Service. However, when the target is deployed *outside* Kubernetes then there is no Kubernetes Service that could be configured as a backend. This is particularly problematic in the cases when STUNner is used as a public TURN service. For such deployments, the StaticService resource provides a way to assign a routable IP address range to a UDPRoute. -Most Kubernetes resources contain a `status` subresource that describes the current state of the resource, supplied and updated by the Kubernetes system and its components. The Kubernetes control plane continually and actively manages every object's actual state to match the desired state you supplied and updates the status field to indicate whether any error was encountered during the reconciliation process. +The below StaticService represents a hypothetical Kubernetes Service backing a set of pods with IP addresses in the range `192.0.2.0/24` or `198.51.100.0/24`. -If you are not sure about whether the STUNner gateway operator successfully picked up your Gateways or UDPRoutes, it is worth checking the status to see what went wrong. +```yaml +apiVersion: stunner.l7mp.io/v1 +kind: StaticService +metadata: + name: static-svc + namespace: stunner +spec: + prefixes: + - "192.0.2.0/24" + - "198.51.100.0/24" +``` -```console -kubectl get -n -o jsonpath='{.status}' +Assigning this StaticService to a UDPRoute will make sure allows access to *any* IP address in the specified ranges. + +```yaml +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: media-plane-route + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - group: stunner.l7mp.io + kind: StaticService + name: static-svc +``` + +The StaticService `spec.prefixes` must be a list of proper IPv4 prefixes: any IP address in any of the listed prefixes will be whitelisted. Use the single prefix `0.0.0.0/0` to provide wildcard access via an UDPRoute. + +> [!WARNING] +> +> Never use StaticServices to access Services running *inside* Kubernetes, this may open up an unintended backdoor to your cluster. Use StaticServices only with *external* target backends. + +## Dataplane + +The Dataplane resource is used as a template for provisioning [`stunnerd`](/cmd/stunnerd/README.md) dataplane pods that implement TURN media ingestion. This is useful to choose the `stunnerd` image origin and version, set custom command line arguments and environment variables, configure resource requests/limits, etc. + +Below is the `default` Dataplane installed by STUNner. + +```yaml +apiVersion: stunner.l7mp.io/v1 +kind: Dataplane +metadata: + name: default +spec: + command: + - stunnerd + args: + - -w + - --udp-thread-num=16 + image: l7mp/stunnerd:latest + resources: + limits: + cpu: 2 + memory: 512Mi + requests: + cpu: 500m + memory: 128Mi + terminationGracePeriodSeconds: 3600 ``` +The following fields can be set in the Dataplane `spec` to customize the provisioning of `stunnerd` pods. + +| Field | Type | Description | Required | +|:--------------------------------|:----------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------:| +| `image` | `string` | The container image. | Yes | +| `imagePullPolicy` | `string` | Policy for if/when to pull the [`stunnerd`](/cmd/stunnerd/README.md), either `Always`, `Never`, or `IfNotPresent`. Default: `Always` if the `latest` tag is specified on the image, `IfNotPresent` otherwise. | No | +| `imagePullSecrets` | `list` | List of Secret references to use for pulling the `stunnerd` image. Each ref is a secret name, namespace is the same as that of the Gateway on behalf of which the dataplane is deployed. | No | +| `labels` | `map` | Custom labels added to `stunnerd` pods. Mandatory labels override whatever is set here. Changing pod labels triggers full dataplane restart for the affected Gateways. | No | +| `annotations` | `map` | Custom annotations added to `stunnerd` pods. Mandatory annotations override whatever is set here, which in turn override manually added annotations. Changes trigger full dataplane restart for the affected Gateways. | No | +| `command` | `list` | Entrypoints for the [dataplane container](https://pkg.go.dev/k8s.io/api/core/v1#Container). | No | +| `args` | `list` | Command line arguments for the [dataplane container](https://pkg.go.dev/k8s.io/api/core/v1#Container). | No | +| `envFrom` | `list` | List of sources to populate environment variables for the [dataplane container](https://pkg.go.dev/k8s.io/api/core/v1#Container). Default: empty. | No | +| `env` | `list` | List of environment variables for the [dataplane container](https://pkg.go.dev/k8s.io/api/core/v1#Container). Default: empty. | No | +| `replicas` | `int` | Number of dataplane pods per Gateway to provision. Not enforced if the [dataplane](/cmd/stunnerd/README.md) Deployment replica count is overwritten manually or by an autoscaler. Default: 1. | No | +| `resources` | `object` | Compute resources per [dataplane](/cmd/stunnerd/README.md) pod. Default: none. | No | +| `affinity` | `object` | Scheduling constraints for the [dataplane](/cmd/stunnerd/README.md) pods. Default: none. | No | +| `tolerations` | `object` | Tolerations for the [dataplane](/cmd/stunnerd/README.md) pods. Default: none. | No | +| `securityContext` | `object` | Pod-level security attributes for the [dataplane](/cmd/stunnerd/README.md) pods. Default: none. | No | +| `containerSecurityContext` | `object` | Container-level security attributes for the [dataplane](/cmd/stunnerd/README.md) pods. Default: none. | No | +| `topologySpreadConstraints` | `object` | Description of how the [dataplane](/cmd/stunnerd/README.md) pods for a Gateway ought to spread across topology domains. Default: none. | No | +| `terminationGracePeriodSeconds` | `duration` | Optional duration in seconds for `stunnerd` to terminate gracefully. Default: 30 seconds. | No | +| `hostNetwork` | `bool` | Deploy the [dataplane](/cmd/stunnerd/README.md) into the host network namespace of Kubernetes nodes. Useful for implementing headless TURN services. May require elevated privileges. Default: false. | No | +| `disableHealthCheck` | `bool` | Disable health-checking. If true, enable HTTP health-checks on port 8086: liveness probe responder will be exposed on path `/live` and readiness probe on path `/ready`. Default: true. | No | +| `enableMetricsEndpoint` | `bool` | Enable Prometheus metrics scraping. If true, a metrics endpoint will be available at `http://0.0.0.0:8080`. Default: false. | No | + +There can be multiple Dataplane resources defined in a cluster, say, one for the production workload and one for development. Use the `spec.dataplane` field in the GatewayConfig to choose the Dataplane per each STUNner install. + +> [!WARNING] +> +> A Dataplane resource called `default` must always be available in the cluster, otherwise the operator will not know how to provision dataplane pods. Removing the `default` template will break your STUNner installation. + + + + + + + + + + diff --git a/docs/INSTALL.md b/docs/INSTALL.md index b9cc4077..5a1f1f09 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -2,74 +2,117 @@ ## Prerequisites -You need a Kubernetes cluster (>1.22), and the `kubectl` command-line tool must be installed and -configured to communicate with your cluster. STUNner should be compatible with *any* major hosted -Kubernetes service or any on-prem Kubernetes cluster; if not, please file an issue. +You need a Kubernetes cluster (>1.22), and the `kubectl` command-line tool must be installed and configured to communicate with your cluster. STUNner should be compatible with *any* major hosted Kubernetes service or any on-prem Kubernetes cluster; if not, please file an issue. -The simplest way to expose STUNner to clients is through Kubernetes [LoadBalancer -services](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer); -these are automatically managed by STUNner. This depends on a functional LoadBalancer integration -in your cluster (if using Minikube, try `minikube tunnel` to get an idea of how this -works). STUNner automatically detects if LoadBalancer service integration is functional and falls -back to using NodePorts when it is not; however, this may require manual tweaking of the firewall -rules to admit the UDP NodePort range into the cluster. +The simplest way to expose STUNner to clients is through Kubernetes [LoadBalancer services](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer); these are automatically managed by STUNner. This depends on a functional LoadBalancer integration in your cluster (if using Minikube, try `minikube tunnel` to get an idea of how this works). STUNner automatically detects if LoadBalancer service integration is functional and falls back to using NodePorts when it is not; however, this may require manual tweaking of the firewall rules to admit the UDP NodePort range into the cluster. -To recompile STUNner, at least Go v1.19 is required. Building the container images requires -[Docker](https://docker.io) or [Podman](https://podman.io). +To compile STUNner, at least Go v1.19 is required. Building the container images requires [Docker](https://docker.io) or [Podman](https://podman.io). -## Basic installation +## Installation -The simplest way to deploy the full STUNner distro, with the dataplane and the controller -automatically installed, is through [Helm](https://helm.sh). STUNner configuration parameters are -available for customization as [Helm -Values](https://helm.sh/docs/chart_template_guide/values_files). We recommend deploying each -STUNner dataplane into a separate Kubernetes namespace (e.g., `stunner`), while the gateway -operator should go into the `stunner-system` namespace (but effectively any namespace would work). +The simplest way to deploy STUNner is through [Helm](https://helm.sh). STUNner configuration parameters are available for customization as [Helm Values](https://helm.sh/docs/chart_template_guide/values_files); see the [STUNner-helm](https://github.com/l7mp/stunner-helm) repository for a list of the available customizations. -First, register the STUNner repository with Helm. +The first step is to register the STUNner repository with Helm. ```console helm repo add stunner https://l7mp.io/stunner helm repo update ``` -Install the control plane: +### Stable version + +The below will install the stable version of STUNner. In particular, the this will install only the STUNner control plane, i.e., the gateway operator and the authentication service, the dataplane will be automatically provisioned by the operator when needed (but see below). We recommend to use the `stunner-system` namespace to keep the full STUNner control plane in a single scope. ```console -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system +helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace \ + --namespace=stunner-system ``` -Install the dataplane: +And that's all: you don't need to install the dataplane separately, this is handled automatically by the operator. The `stunnerd` pods created by the operator can be customized using the Dataplane custom resource: you can specify the `stunnerd` container image version, provision resources per each `stunnerd` pod, deploy into the host network namespace, etc.; see the documentation [here](https://pkg.go.dev/github.com/l7mp/stunner-gateway-operator/api/v1#DataplaneSpec). + +### Development version + +By default, the Helm chart installs the stable version of STUNner. To track the bleeding edge, STUNner provides a `dev` release channel that tracks the latest development version. Use it at your own risk: we do not promise any stability for the dev-channel. ```console -helm install stunner stunner/stunner --create-namespace --namespace=stunner +helm install stunner-gateway-operator stunner/stunner-gateway-operator-dev --create-namespace \ + --namespace=stunner-system +``` + +After upgrading the operator from the dev channel you may need to manually restart the dataplane +for each of your Gateways: + +```console +kubectl -n rollout restart deployment ``` -## Parallel deployments +### Legacy mode -You can install multiple STUNner dataplanes side-by-side, provided that the corresponding -namespaces are different. For instance, to create a `prod` dataplane installation for your -production workload and a `dev` installation for experimentation, the below commands will install -two dataplanes, one into the `stunner-prod` and another one into the `stunner-dev` namespace. +In the default *managed dataplane mode*, the STUNner gateway operator automatically provisions the dataplane, which substantially simplifies operations and removes lot of manual and repetitive work. For compatibility reasons the traditional operational model, called the *legacy mode*, is still available. In this mode the user is responsible for provisioning both the control plane, by installing the `stunner-gateway-operator` Helm chart, and the dataplane(s), by helm-installing the `stunner` chart possibly multiple times. + +```console +helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace \ + --namespace=stunner-system --set stunnerGatewayOperator.dataplane.mode=legacy +helm install stunner stunner/stunner --create-namespace --namespace=stunner +``` + +You can install multiple legacy STUNner dataplanes side-by-side, provided that the corresponding namespaces are different. For instance, to create a `prod` dataplane installation for your production workload and a `dev` installation for experimentation, the below commands will install two dataplanes, one into the `stunner-prod` and another one into the `stunner-dev` namespace. ```console helm install stunner-prod stunner/stunner --create-namespace --namespace=stunner-prod helm install stunner-dev stunner/stunner --create-namespace --namespace=stunner-dev ``` -Now, you can build a separate [gateway hierarchy](CONCEPTS.md) per each namespace to supply a -distinct ingress gateway configuration per dataplane. +### Skip install CRDs -For the list of available customizations, see the -[STUNner-helm](https://github.com/l7mp/stunner-helm) repository. For installing STUNner in the -standalone mode, consult the documentation [here](OBSOLETE.md). +You can install the STUNner chart without the Gateway API CRDs and STUNner CRDs with the `--skip-crds` flag. However, ensure that the CRDs are already present in the cluster, as the STUNner Gateway Operator will fail to start without them. -## Development version +```console +helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace \ + --namespace=stunner-system --skip-crds +``` -STUNner provides a `dev` release channel, which allows to track the latest development version. Use -it at your own risk: we do not promise any stability for STUNner installed from the dev-channel. +To manually install the CRDs: ```console -helm install stunner-gateway-operator stunner/stunner-gateway-operator-dev --create-namespace --namespace=stunner-system -helm install stunner stunner/stunner-dev --create-namespace --namespace=stunner +kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml +kubectl apply -f https://raw.githubusercontent.com/l7mp/stunner-helm/refs/heads/main/helm/stunner-gateway-operator/crds/stunner-crd.yaml ``` + +## Customization + +The Helm charts let you fine-tune STUNner features, including [compute resources](#resources) provisioned for `stunnerd` pods, [UDP multithreading](#udp-multithreading), and[graceful shutdown](#graceful-shutdown). + +### Resources requests/limits + +it is important to manage the [amount of CPU and memory resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers) available for each `stunnerd` pod. The [default](https://github.com/l7mp/stunner-helm/blob/main/helm/stunner-gateway-operator/values.yaml) resource request and limit is set as follows: + +```yaml +resources: + limits: + cpu: 2 + memory: 512Mi + requests: + cpu: 500m + memory: 128Mi +``` + +This means that every `stunnerd` pod will request 0.5 CPU cores and 128 Mibytes of memory. Note that the pods will start only if Kubernetes can successfully allocate the given amount of resources. In order to avoid stressing the Kubernetes scheduler, it is advised to keep the limits at the bare minimum and scale out by [increasing the number of running `stunnerd` pods](SCALING.md) if needed. + +### UDP multithreading + +STUNner can run multiple UDP listeners over multiple parallel readloops for loadbalancing. Namely, ech `stunnerd` pod can create a configurable number of UDP server sockets using `SO_REUSEPORT` and then spawn a separate goroutine to run a parallel readloop per each. The kernel will load-balance allocations across the sockets/readloops per the IP 5-tuple, so the same allocation will always stay at the same CPU. This allows UDP listeners to scale to multiple CPUs, improving performance. Note that this is required only for UDP: TCP, TLS and DTLS listeners spawn a per-client readloop anyway. Also note that `SO_REUSEPORT` is not portable, so currently we enable this only for UNIX architectures. + +The feature is exposed via the command line flag `--udp-thread-num=` in `stunnerd`. In the Helm chart, it can be enabled or disabled with the `--set stunner.deployment.container.stunnerd.udpMultithreading.enabled=true` flag. By default, UDP multithreading is enabled with 16 separate readloops per each UDP listener. + +```yaml +udpMultithreading: + enabled: true + readLoopsPerUDPListener: 16 +``` + +### Graceful shutdown + +STUNner has full support for [graceful shutdown](SCALING.md). This means that `stunner` pods will remain alive as long as there are active allocations in the embedded TURN server, and a pod will automatically remove itself once all allocations are deleted or time out. This enables the full support for graceful scale-down: the user can scale the number of `stunner` instances up and down and no harm should be made to active client connections meanwhile. + +The default termination period is set to 3600 seconds (1 hour). To modify, use the `--set stunner.deployment.container.terminationGracePeriodSeconds=` flag. diff --git a/docs/MONITORING.md b/docs/MONITORING.md index 73ff4740..ba830169 100644 --- a/docs/MONITORING.md +++ b/docs/MONITORING.md @@ -1,43 +1,18 @@ # Monitoring -STUNner can export various statistics into an external timeseries database like -[Prometheus](https://prometheus.io). This allows one to observe the state of the STUNner media -gateway instances, like CPU or memory use, as well as the amount of data received and sent, in -quasi-real-time. These statistics can then be presented to the operator in easy-to-use monitoring -dashboards in [Grafana](https://grafana.com). +STUNner can export various statistics into an external timeseries database like [Prometheus](https://prometheus.io). This allows one to observe the state of the STUNner media gateway instances, like CPU or memory use or the amount of data received and sent in quasi-real-time. These statistics can then be presented to the operator in a monitoring dashboard using, e.g., [Grafana](https://grafana.com). ## Configuration -Metrics collection is *not* enabled in the default installation. In order to open the -metrics-collection endpoint for a [gateway hierarchy](GATEWAY.md#overview), configure an -appropriate HTTP URL in the `metricsEndpoint` field of corresponding the -[GatewayConfig](GATEWAY.md#gatewayconfig) resource. - -For instance, the below GatewayConfig will expose the metrics-collection server on the URL -`http://:8080/metrics` in all the STUNner media gateway instances of the current gateway hierarchy. - -```yaml -apiVersion: stunner.l7mp.io/v1alpha1 -kind: GatewayConfig -metadata: - name: stunner-gatewayconfig - namespace: stunner -spec: - userName: "my-user" - password: "my-password" - metricsEndpoint: "http://:8080/metrics" -``` +Metrics collection is *not* enabled by default. To enable it, set the `enableMetricsEndpoint` field to true in the [Dataplane](GATEWAY.md#dataplane) template. This will configure the `stunnerd` dataplane pods to expose a HTTP metrics endpoint at port 8080 that Prometheus can scrape for metrics. ## Metrics -STUNner exports two types of metrics: the *Go collector metrics* describe the state of the Go -runtime, while the *Connection statistics* expose traffic monitoring data. +STUNner exports two types of metrics: the *Go collector metrics* describe the state of the Go runtime, while the *Connection statistics* expose traffic monitoring data. ### Go collector metrics -Each STUNner gateway instance exports a number of standard metrics that describe the state of the -current Go process runtime. Some notable metrics as listed below, see more in the -[documentation](https://github.com/prometheus/client_golang). +Each STUNner gateway instance exports a number of standard metrics that describe the state of the current Go process. Some notable metrics as listed below, see more in the [documentation](https://github.com/prometheus/client_golang). | Metric | Description | | :--- | :--- | @@ -46,66 +21,58 @@ current Go process runtime. Some notable metrics as listed below, see more in th | `go_goroutines` | Number of goroutines that currently exist. | | `go_threads` | Number of OS threads created. | | `process_open_fds` | Number of open file descriptors.| +| `process_resident_memory_bytes` | Resident memory size in bytes. | | `process_virtual_memory_bytes` | Virtual memory size in bytes. | ### Connection statistics -STUNner provides deep visibility into the amount of traffic sent and received on each listener -(downstream connections) and cluster (upstream connections). The particular metrics are as follows. +STUNner provides deep visibility into the amount of traffic sent and received on each listener (downstream connections) and cluster (upstream connections). The particular metrics are as follows. | Metric | Description | Type | Labels | | :--- | :--- | :--- | :--- | -| `stunner_listener_connections` | Number of *active* downstream connections at a listener. | gauge | `name=` | +| `stunner_allocations_active` | Number of active allocations. | gauge | none | +| `stunner_listener_connections` | Number of *active* downstream connections at a listener. Stays constant when using only UDP listeners. | gauge | `name=` | | `stunner_listener_connections_total` | Number of downstream connections at a listener. | counter | `name=` | -| `stunner_listener_packets_total` | Number of datagrams sent or received at a listener. Unreliable for listeners running on a connection-oriented a protocol (TCP/TLS). | counter | `direction=`, `name=`| +| `stunner_listener_packets_total` | Number of datagrams sent or received at a listener. Unreliable for listeners running on a connection-oriented transport protocol (TCP/TLS). | counter | `direction=`, `name=`| | `stunner_listener_bytes_total` | Number of bytes sent or received at a listener. | counter | `direction=`, `name=` | -| `stunner_cluster_connections` | Number of *active* upstream connections on behalf of a listener. | gauge | `name=` | -| `stunner_cluster_connections_total` | Number of upstream connections on behalf of a listener. | counter | `name=` | -| `stunner_cluster_packets_total` | Number of datagrams sent to backends or received from backends on behalf of a listener. Unreliable for clusters running on a connection-oriented a protocol (TCP/TLS).| counter | `direction=`, `name=` | -| `stunner_cluster_bytes_total` | Number of bytes sent to backends or received from backends on behalf of a listener. | counter | `direction=`, `name=` | +| `stunner_cluster_packets_total` | Number of datagrams sent to backends or received from backends of a cluster. Unreliable for clusters running on a connection-oriented transport protocol (TCP/TLS).| counter | `direction=`, `name=` | +| `stunner_cluster_bytes_total` | Number of bytes sent to backends or received from backends of a cluster. | counter | `direction=`, `name=` | ## Integration with Prometheus and Grafana -Collection and visualization of STUNner relies on Prometheus and Grafana services. The STUNer helm repository provides a ready-to-use Prometheus and Grafana stack. See [Installation](#installation) for installation steps. Metrics visualization requires user input on configuring the plots. Refer to [Configuration and Usage](#configuration-and-usage) for details. +Collection and visualization of STUNner relies on Prometheus and Grafana services. The STUNer helm repository provides a way to [install](https://github.com/l7mp/stunner-helm#monitoring) a ready-to-use Prometheus and Grafana stack. In addition, metrics visualization requires [user input](#configuration) on configuring the plots; see below. ### Installation -A full-fledged Prometheus+Grafana helm chart is available in the STUNner helm repo. To use this chart, the installation steps involve enabling monitoring in STUNner, and installing the Prometheus+Grafana stack with helm. +A full-fledged Prometheus+Grafana helm chart is available in the [STUNner helm repo](https://github.com/l7mp/stunner-helm#monitoring). To use this chart, the installation steps involve enabling monitoring in STUNner, and installing the Prometheus+Grafana stack with helm. -1. **Configure STUNner to expose the metrics** +1. Install stunner-gateway-operator with Prometheus support: -- Deploy STUNner with monitoring enabled to enable the monitoring port of STUNner pods ```console -helm install stunner stunner/stunner --create-namespace --namespace=stunner --set stunner.deployment.monitoring.enabled=true +helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system --set stunnerGatewayOperator.dataplane.spec.enableMetricsEndpoint=true ``` -- [Expose the STUNner metrics-collection server in the GatewayConfig](#configuration) +Alternatively, you can enable it on existing installations by setting `enableMetricsEndpoint: true` in your [Dataplane](GATEWAY.md#dataplane) objects. + +> [!NOTE] +> Metrics are exposed at `http://:8080/metrics` on each STUNner pod + -2. **Install the Prometheus+Grafana stack with a helm chart** +2. Install the Prometheus+Grafana stack with a helm chart. -This helm chart creates a ready-to-use Prometheus+Grafana stack in the `monitoring` namespace: installs Prometheus along with the prometheus-operator, and Grafana; configures PodMonitor for monitoring STUNner pods, and sets up Prometheus as a datasource for Grafana. +The below creates a ready-to-use Prometheus+Grafana stack in the `monitoring` namespace: Prometheus, along with the prometheus-operator, is installed for metrics scarping, Grafana is set up for visualization, and the Prometheus is configured as a datasource for Grafana. ```console helm repo add stunner https://l7mp.io/stunner helm repo update - helm install prometheus stunner/stunner-prometheus ``` - -### Configuration and Usage +### Configuration The helm chart deploys a ready-to-use Prometheus and Grafana stack, but leaves the Grafana dashboard empty to let the user pick metrics and configure their visualization. An interactive way to visualize STUNner metrics is to use the Grafana dashboard. -#### Access the Grafana dashboard - -To open the Grafana dashboard navigate a web browser to `grafana` NodePort service IP and port 80. - -The default username is **admin** with the password **admin**. - -At the first login you can change the password or leave as it is (use the *Skip* button). - -#### Visualize STUNner metrics +To open the Grafana dashboard navigate a web browser to `grafana` NodePort service IP and port 80. The default username is **admin** with the password **admin**. At the first login you can change the password or leave as it is (use the *Skip* button). As an example, let us plot the STUNner metric `stunner_listener_connections`. First step is to create a new panel, then to configure the plot parameters. @@ -115,11 +82,11 @@ Click on *Add panel* (1), then *Add a new panel* (2): The *Add a new panel* will open the panel configuration. The configuration steps are the following. -1. Set the datasource: **prometheus** +1. Set the datasource: **prometheus**. 2. Choose a metric. In this example, this is the `stunner_listener_connections`. -3. Click on *Run queries* (this will update the figure) +3. Click on *Run queries* (this will update the figure). 4. Fine-tune plot parameters. For example, set the title. -5. Click *Apply* +5. Click *Apply*. ![Grafana Panel Configuration](img/grafana-add-panel-config_0.png) @@ -129,27 +96,21 @@ Below is an example dashboard with data collected from the [simple-tunnel](examp ![Grafana Dashboard with the New Panel](img/grafana-add-panel-dashboard_1.png) - ### Troubleshooting Prometheus and Grafana both provide a dashboard to troubleshoot a running system, and to check the flow of metrics from STUNner to Prometheus, and from Prometheus to Grafana. -### Check Prometheus operations via its dashboard -The Prometheus dashboard is available as the `prometheus` NodePort service (use the node IP and node port to connect with a web browser). - -The dashboard enables checking running Prometheus configuration and testing the metrics collection. +The Prometheus dashboard is available as the `prometheus` NodePort service (use the node IP and node port to connect with a web browser). The dashboard enables checking running Prometheus configuration and testing the metrics collection. For example, to observe the `stunner_listener_connections` metric on the Prometheus dashboard: -1. Write `stunner_listener_connections` to the marked field (next to the looking glass icon) -2. Click on the `Execute` button +1. Write `stunner_listener_connections` to the marked field (next to the looking glass icon). +2. Click on the `Execute` button. 3. Switch to `Graph` view tab. ![Prometheus Dashboard](img/prometheus-dashboard.png) -Note: some STUNner metrics are not available when they are inactive (e.g., there is no active cluster). - -#### Check Prometheus data source in Grafana +Note that some STUNner metrics may not be available when they are inactive (e.g., there is no active cluster). To configure/check the Prometheus data source in Grafana, first click on *Configuration* (1), then *Data sources* (2), as shown here: diff --git a/docs/OBSOLETE.md b/docs/OBSOLETE.md deleted file mode 100644 index 6beaa545..00000000 --- a/docs/OBSOLETE.md +++ /dev/null @@ -1,501 +0,0 @@ -# Standalone mode - -In order to gain full control over media ingestion, STUNner can be deployed without the gateway -operator component. In this standalone mode, the user is fully in charge of creating and -maintaining the configuration of the `stunnerd` pods. With the introduction of the STUNner gateway -operator *the standalone mode is considered obsolete* as of STUNner v0.11. The below documentation -is provided only for historical reference; before the gateway operator existed this was *the* -recommended way to interact with STUNner. - -## Table of contents - -- [Standalone mode](#standalone-mode) - - [Table of contents](#table-of-contents) - - [Prerequisites](#prerequisites) - - [Installation](#installation) - - [Installation with Helm](#installation-with-helm) - - [Manual installation](#manual-installation) - - [Configuration](#configuration) - - [Learning the external IP and port](#learning-the-external-ip-and-port) - - [Configuring WebRTC clients](#configuring-webrtc-clients) - - [Authentication](#authentication) - - [Access control](#access-control) - - [Enabling TURN transport over TCP](#enabling-turn-transport-over-tcp) - - [Enabling TURN transport over TLS and DTLS](#enabling-turn-transport-over-tls-and-dtls) - -## Prerequisites - -The below installation instructions require an operational cluster running a supported version of -Kubernetes (>1.22). Make sure that the cluster comes with a functional [load-balancer -integration](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer), -otherwise STUNner will not be able to allocate a public IP address for clients to reach your WebRTC -infra. In the standalone mode STUNner relies on Kubernetes ACLs (`NetworkPolicy`) with [port -ranges](https://kubernetes.io/docs/concepts/services-networking/network-policies/#targeting-a-range-of-ports) -to block malicious access; make sure your Kubernetes installation supports these. - -## Installation - -### Installation with Helm - -Use the [Helm charts](https://github.com/l7mp/stunner-helm) for installing STUNner, setting the -`standalone.enabled` feature gate to `true`: - -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner stunner/stunner --set stunner.standalone.enabled=true -``` - -The below will create a new namespace named `stunner` and install the STUNner dataplane pods into that -namespace. - -```console -helm install stunner stunner/stunner --set stunner.standalone.enabled=true --create-namespace --namespace=stunner -``` - -Note that we do not install the usual control plane: in this mode we ourselves need to manually -provide the dataplane configuration for STUNner. - -### Manual installation - -If Helm is not an option, you can perform a manual installation using the static Kubernetes -manifests packaged with STUNner. - -First, clone the STUNner repository. - -```console -git clone https://github.com/l7mp/stunner.git -cd stunner -``` - -Then, customize the default settings in the STUNner service -[manifest](https://github.com/l7mp/stunner/blob/main/deploy/manifests/stunner-standalone.yaml) and deploy it via `kubectl`. - -```console -kubectl apply -f deploy/manifests/stunner-standalone.yaml -``` - -By default, all resources are created in the `default` namespace. - -## Configuration - -The default STUNner installation will create the below Kubernetes resources: - -1. a ConfigMap that stores STUNner local configuration, -2. a Deployment running one or more STUNner daemon replicas, -3. a LoadBalancer service to expose the STUNner deployment on a public IP address and UDP port - (by default, the port is UDP 3478), and finally -4. a NetworkPolicy, i.e., an ACL/firewall policy to control network communication from STUNner to - the rest of the Kubernetes workload. - -The installation scripts packaged with STUNner will use hard-coded configuration defaults that must -be customized prior to deployment. In particular, make absolutely sure to customize the access -tokens (`STUNNER_USERNAME` and `STUNNER_PASSWORD` for `plaintext` authentication, and -`STUNNER_SHARED_SECRET` and possibly `STUNNER_DURATION` for the `longterm` authentication mode), -otherwise STUNner will use hard-coded STUN/TURN credentials. This should not pose a major security -risk (see [here](SECURITY.md) for more info), but it is still safer to customize the access -tokens before exposing STUNner to the Internet. - -The most recent STUNner configuration is always available in the Kubernetes ConfigMap named -`stunnerd-config`. This configuration is made available to the `stunnerd` pods by -[mapping](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#define-container-environment-variables-using-configmap-data) -the `stunnerd-config` ConfigMap into the pods as environment variables. Note that changes to this -ConfigMap will take effect only once STUNner is restarted. - -The most important STUNner configuration settings are as follows. -* `STUNNER_PUBLIC_ADDR` (no default): The public IP address clients can use to reach STUNner. By - default, the public IP address will be dynamically assigned during installation. The installation - scripts take care of querying the external IP address from Kubernetes and automatically setting - `STUNNER_PUBLIC_ADDR`; for manual installation the external IP must be set by hand (see - [details](#learning-the-external-ip-and-port) below). -* `STUNNER_PUBLIC_PORT` (default: 3478): The public port used by clients to reach STUNner. Note - that the Helm installation scripts may overwrite this configuration if the installation falls - back to the `NodePort` service (i.e., when STUNner fails to obtain an external IP from the - Kubernetes ingress load balancer), see [details](#learning-the-external-ip-and-port) below. -* `STUNNER_PORT` (default: 3478): The internal port used by STUNner for communication inside the - cluster. It is safe to set this to the public port. -* `STUNNER_TRANSPORT_UDP_ENABLE` (default: "1", enabled): Enable UDP TURN transport. -* `STUNNER_TRANSPORT_TCP_ENABLE` (default: "", disabled): Enable TCP TURN transport. -* `STUNNER_REALM` (default: `stunner.l7mp.io`): the - [`REALM`](https://www.rfc-editor.org/rfc/rfc8489.html#section-14.9) used to guide the user agent - in authenticating with STUNner. -* `STUNNER_AUTH_TYPE` (default: `plaintext`): the STUN/TURN authentication mode, either `plaintext` - using the username/password pair `$STUNNER_USERNAME`/`$STUNNER_PASSWORD`, or `longterm`, using - the [STUN/TURN long-term credential](https://www.rfc-editor.org/rfc/rfc8489.html#section-9.2) - mechanism with the secret `$STUNNER_SHARED_SECRET`. -* `STUNNER_USERNAME` (default: `user`): the - [username](https://www.rfc-editor.org/rfc/rfc8489.html#section-14.3) attribute clients can use to - authenticate with STUNner over `plaintext` authentication. Make sure to customize! -* `STUNNER_PASSWORD` (default: `pass`): the password clients can use to authenticate with STUNner - in `plaintext` authentication. Make sure to customize! -* `STUNNER_SHARED_SECRET`: the shared secret used for `longterm` authentication mode. Make sure to - customize! -* `STUNNER_DURATION` (default: `86400` sec, i.e., one day): the lifetime of STUNner credentials in - `longterm` authentication. -* `STUNNER_LOGLEVEL` (default: `all:WARN`): the default log level used by the STUNner daemons. -* `STUNNER_MIN_PORT` (default: 10000): smallest relay transport port assigned by STUNner. -* `STUNNER_MAX_PORT` (default: 20000): highest relay transport port assigned by STUNner. - -The default configuration can be overridden by setting custom command line arguments when -[launching the STUNner pods](cmd/stunnerd.md). All examples below assume that STUNner is -deployed into the `default` namespace; see the installation notes below on how to override this. - -Note that changing in the configuration values becomes valid only once STUNner is restarted (see -below). - -## Learning the external IP and port - -There are two ways to expose the STUN/TURN ingress gateway service with STUNner: through a standard -Kubernetes [`LoadBalancer` -service](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) (the -default) or as a [`NodePort` -service](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport), used as a -fallback if an ingress load-balancer is not available. In both cases the external IP address and -port that WebRTC clients can use to reach STUNner may be set dynamically by Kubernetes. (Kubernetes -lets you use your own [fix IP address and domain -name](https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address), -but the default installation scripts do not support this.) - -In general, WebRTC clients will need to learn STUNner's external IP and port somehow. In order to -simplify the integration of STUNner into the WebRTC application server, STUNner stores the dynamic -IP address/port assigned by Kubernetes into the `stunnerd-config` ConfigMap under the key -`STUNNER_PUBLIC_IP` and `STUNNER_PUBLIC_PORT`. Then, WebRTC application pods can map this ConfigMap -as environment variables and communicate the IP address and port back to the clients (see an -[example](#configuring-webrtc-clients) below). - -The [Helm installation](#helm) scripts should take care of setting the IP address and port -automatically in the ConfigMap during installation. However, if later the LoadBalancer services -change for some reason then the new external IP address and port will need to be configured -manually in the ConfigMap. Similar is the case when using the static Kubernetes manifests to deploy -STUNner. The below instructions simplify this process. - -After a successful installation, you should see something similar to the below: - -```console -kubectl get all -NAME READY STATUS RESTARTS AGE -pod/stunner-XXXXXXXXXX-YYYYY 1/1 Running 0 8s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kubernetes ClusterIP 10.72.128.1 443/TCP 6d4h -service/stunner ClusterIP 10.72.130.61 3478/UDP 81s -service/stunner-standalone-lb LoadBalancer 10.72.128.166 A.B.C.D 3478:30630/UDP 81s - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/stunner 1/1 1 1 8s -``` - -Note the external IP address allocated by Kubernetes for the `stunner-standalone-lb` service -(`EXTERNAL-IP` marked with a placeholder `A.B.C.D` in the above): this will be the public STUN/TURN -access point that your WebRTC clients will need to use in order to access the WebRTC media service -via STUNner. - -Wait until Kubernetes assigns a valid external IP to STUNner and query the public IP address and -port used by STUNner from Kubernetes. - -```console -until [ -n "$(kubectl get svc stunner-standalone-lb -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -export STUNNER_PUBLIC_ADDR=$(kubectl get svc stunner-standalone-lb -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -export STUNNER_PUBLIC_PORT=$(kubectl get svc stunner-standalone-lb -o jsonpath='{.spec.ports[0].port}') -``` - -If this hangs for minutes, then your Kubernetes load-balancer integration is not working (if using -[Minikube](https://github.com/kubernetes/minikube), make sure `minikube tunnel` is -[running](https://minikube.sigs.k8s.io/docs/handbook/accessing)). This may still allow STUNner to -be reached externally, using a Kubernetes `NodePort` service (provided that your [Kubernetes -supports -NodePorts](https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#no_direct_external_inbound_connections_for_private_clusters)). In -this case, but only in this case!, set the IP address and port from the NodePort: - -```console -export STUNNER_PUBLIC_ADDR=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}') -export STUNNER_PUBLIC_PORT=$(kubectl get svc stunner-standalone-lb -o jsonpath='{.spec.ports[0].nodePort}') -``` - -Check that the IP address/port `${STUNNER_PUBLIC_ADDR}:${STUNNER_PUBLIC_PORT}` is reachable by your -WebRTC clients; some Kubernetes clusters are installed with private node IP addresses that may -prevent NodePort services to be reachable from the Internet. - -If all goes well, the STUNner service is now exposed on the IP address `$STUNNER_PUBLIC_ADDR` and -UDP port `$STUNNER_PUBLIC_PORT`. Finally, store the public IP address and port back into STUNner's -configuration, so that the WebRTC application server can learn this information and forward it to -the clients. - -```console -kubectl patch configmap/stunnerd-config --type merge \ - -p "{\"data\":{\"STUNNER_PUBLIC_ADDR\":\"${STUNNER_PUBLIC_ADDR}\",\"STUNNER_PUBLIC_PORT\":\"${STUNNER_PUBLIC_PORT}\"}}" -``` - -## Configuring WebRTC clients - -The last step is to configure your WebRTC clients to use STUNner as the TURN server. The below -JavaScript snippet will direct WebRTC clients to use STUNner; make sure to substitute the -placeholders (like ``) with the correct configuration from the above. - -```javascript -var ICE_config = { - 'iceServers': [ - { - 'url': "turn::?transport=udp', - 'username': , - 'credential': , - }, - ], -}; -var pc = new RTCPeerConnection(ICE_config); -``` - -## Authentication - -STUNner relies on the STUN [long-term credential -mechanism](https://www.rfc-editor.org/rfc/rfc8489.html#page-26) to provide user authentication. See -[here](AUTH.md) for more detail on STUNner's authentication modes. - -The below commands will configure STUNner to use `plaintext` authentication using the -username/password pair `my-user/my-password` and restart STUNner for the new configuration to take -effect. - -```console -kubectl patch configmap/stunnerd-config --type merge \ - -p "{\"data\":{\"STUNNER_AUTH_TYPE\":\"plaintext\",\"STUNNER_USERNAME\":\"my-user\",\"STUNNER_PASSWORD\":\"my-password\"}}" -kubectl rollout restart deployment/stunner -``` - -The below commands will configure STUNner to use `longterm` authentication mode, using the shared -secret `my-secret`. By default, STUNner credentials are valid for one day. - -```console -kubectl patch configmap/stunnerd-config --type merge \ - -p "{\"data\":{\"STUNNER_AUTH_TYPE\":\"longterm\",\"STUNNER_SHARED_SECRET\":\"my-secret\"}}" -kubectl rollout restart deployment/stunner -``` - -## Access control - -The security risks and best practices associated with STUNner are described -[here](SECURITY.md), below we summarize the only step that is specific to the standalone mode: -configuring access control. - -By default, a standalone STUNner installation comes with an open route: this essentially means -that, possessing a valid TURN credential, an attacker can reach *any* UDP service inside the -Kubernetes cluster via STUNner. This is because, without an operator, there is no control plane to -supply [endpoint-discovery -service](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/service_discovery#endpoint-discovery-service-eds) -for the dataplane and therefore `stunnerd` does not know whether the peer address a client wished -to reach belongs to the legitimate backend service or not. In order to prevent open access through -STUNner, the default standalone installation comes with a default-deny Kubernetes NetworkPolicy -that locks down *all* access from the STUNner pods to the rest of the workload. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: stunner-network-policy -spec: - podSelector: - matchLabels: - app: stunner - policyTypes: - - Egress -``` - -In order for clients to reach a media server pod via STUNner the user must explicitly whitelist the -target service in this access control rule. Suppose that we want STUNner to reach the media server -pods labeled as `app=media-server` over the UDP port range `[10000:20000]`, but we don't want -connections via STUNner to succeed to any other pod. This will be enough to support WebRTC media, -but will not allow clients to, e.g., reach the Kubernetes DNS service. - -Assuming that the entire workload is deployed into the `default` namespace, the below -`NetworkPolicy` ensures that all access from any STUNner pod to any media server pod is allowed -over any UDP port between 10000 and 20000, and all other network access from STUNner is denied. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: stunner-network-policy -spec: -# Choose the STUNner pods as source - podSelector: - matchLabels: - app: stunner - policyTypes: - - Egress - egress: - # Allow only this rule, everything else is denied - - to: - # Choose the media server pods as destination - - podSelector: - matchLabels: - app: media-server - ports: - # Only UDP ports 10000-20000 are allowed between - # the source-destination pairs - - protocol: UDP - port: 10000 - endPort: 20000 -``` - -If your Kubernetes CNIs does not support [network policies with port -ranges](https://kubernetes.io/docs/concepts/services-networking/network-policies/#targeting-a-range-of-ports), -then the below will provide an access control rule similar to the above, except that it opens up -*all* UDP ports on the media server instead of limiting access to the UDP port range -`[10000:20000]`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: stunner-network-policy -spec: - podSelector: - matchLabels: - app: stunner - policyTypes: - - Egress - egress: - - to: - - podSelector: - matchLabels: - app: media-server - ports: - - protocol: UDP -``` - -## Enabling TURN transport over TCP - -Some corporate firewalls block all UDP access from the private network, except DNS. To make sure -that clients can still reach STUNner, you can expose STUNner over a [TCP-based TURN -transport](https://www.rfc-editor.org/rfc/rfc6062). To maximize the chances of getting through a -zealous firewall, below we expose STUNner over the default HTTPS port 443. - -First, enable TURN transport over TCP in STUNner. - -```console -kubectl patch configmap/stunnerd-config --type merge -p "{\"data\":{\"STUNNER_TRANSPORT_TCP_ENABLE\":\"1\"}}" -``` - -Then, delete the default Kubernetes service that exposes STUNner over UDP and re-expose it over the -TCP port 443. -```console -kubectl delete service stunner-standalone-lb -kubectl expose deployment stunner-standalone-lb --protocol=TCP --port=443 --type=LoadBalancer -``` - -Wait until Kubernetes assigns a public IP address. -```console -until [ -n "$(kubectl get svc stunner-standalone-lb -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -export STUNNER_PUBLIC_ADDR=$(kubectl get svc stunner-standalone-lb -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -export STUNNER_PUBLIC_PORT=$(kubectl get svc stunner-standalone-lb -o jsonpath='{.spec.ports[0].port}') -kubectl patch configmap/stunnerd-config --type merge \ - -p "{\"data\":{\"STUNNER_PUBLIC_ADDR\":\"${STUNNER_PUBLIC_ADDR}\",\"STUNNER_PUBLIC_PORT\":\"${STUNNER_PUBLIC_PORT}\"}}" -``` - -Restart STUNner with the new configuration. -```console -kubectl rollout restart deployment/stunner -``` - -Finally, direct your clients to the re-exposed STUNner TCP service with the below `PeerConnection` configuration; don't -forget to rewrite the TURN transport to TCP by adding the query `transport=tcp` to the -STUNner URI. -```javascript -var ICE_config = { - 'iceServers': [ - { - 'url': "turn::?transport=tcp", - 'username': , - 'credential': , - }, - ], -}; -var pc = new RTCPeerConnection(ICE_config); -``` - -## Enabling TURN transport over TLS and DTLS - -The ultimate tool to work around aggressive firewalls and middleboxes is exposing STUNner via TLS -and/or DTLS. Fixing the TLS listener port at 443 will make it impossible for the corporate firewall -to block TURN/TLS connections without blocking all external HTTPS access, so most probably at least -the TCP/443 port will be open to encrypted connections. - -Start with a fresh Kubernetes install. Below we create a self-signed certificate for testing; make -sure to replace the cert/key pair below with your own trusted credentials. - -```console -openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=example.domain.com" -kubectl create secret tls stunner-tls --key /tmp/tls.key --cert /tmp/tls.crt -``` - -Patch the TLS cert/key into the pre-configured static manifest and deploy the STUNner gateway. - -```console -cd stunner -cat deploy/manifests/stunner-standalone-tls.yaml.template | \ - perl -pe "s%XXXXXXX%`cat /tmp/tls.key | base64 -w 0`%g" | - perl -pe "s%YYYYYYY%`cat /tmp/tls.crt | base64 -w 0`%g" | - kubectl apply -f - -``` - -This will fire up STUNner with two TURN listeners, a TLS/TCP and a DTLS/UDP listener, both at port -443, and create two LoadBalancer services to expose these to clients. - -Wait until Kubernetes assigns a public IP address and learn the new public addresses. -```console -until [ -n "$(kubectl get svc stunner-tls -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -until [ -n "$(kubectl get svc stunner-dtls -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -export STUNNER_PUBLIC_ADDR_TLS=$(kubectl get svc stunner-tls -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -export STUNNER_PUBLIC_ADDR_DTLS=$(kubectl get svc stunner-dtls -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -``` - -Check your configuration with the handy [`turncat`](cmd/turncat.md) utility and the [UDP -greeter](https://github.com/l7mp/stunner#testing) service. First, query the UDP greeter service via TLS/TCP. Here, the -`turncat` command line argument `-i` puts `turncat` into insecure mode in order to accept our -self-signed TURN sever TLS certificate. - -```console -cd stunner -go build -o turncat cmd/turncat/main.go -kubectl apply -f deploy/manifests/udp-greeter.yaml -export PEER_IP=$(kubectl get svc media-plane -o jsonpath='{.spec.clusterIP}') -export STUNNER_USERNAME=$(kubectl get cm stunner-config -o yaml -o jsonpath='{.data.STUNNER_USERNAME}') -export STUNNER_PASSWORD=$(kubectl get cm stunner-config -o yaml -o jsonpath='{.data.STUNNER_PASSWORD}') -./turncat -i - turn://${STUNNER_USERNAME}:${STUNNER_PASSWORD}@${STUNNER_PUBLIC_ADDR_TLS}:443?transport=tls udp://${PEER_IP}:9001 -Hello STUNner via TLS -Greetings from STUNner! -``` - -Type anything once `turncat` is running to receive a nice greeting from STUNner. DTLS/UDP should -also work fine: - -```console -./turncat -i - turn://${STUNNER_USERNAME}:${STUNNER_PASSWORD}@${STUNNER_PUBLIC_ADDR_DTLS}:443?transport=dtls udp://${PEER_IP}:9001 -Another hello STUNner, now via DTLS! -Greetings from STUNner! -``` - -Remember, you can always direct your clients to your TURN listeners by setting the TURN URIs in the -ICE server configuration on your `PeerConnection`s. - -```javascript -var ICE_config = { - 'iceServers': [ - { - 'url': "turn::443?transport=tls", - 'username': , - 'credential': , - }, - { - 'url': "turn::443?transport=dtls", - 'username': , - 'credential': , - }, - ], -}; -var pc = new RTCPeerConnection(ICE_config); -``` - -Note that the default Kubernetes manifest -['stunner-standalone-tls.yaml'](https://github.com/l7mp/stunner/blob/main/deploy/manifests/stunner-standalone-tls.yaml.template) opens up the -NetworkPolicy for the `media-plane/default` service only, make sure to configure this to your own -setup. diff --git a/docs/README.md b/docs/README.md index 546d1cdc..cb81f66f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,6 +4,8 @@ STUNner

+*Note: This page documents the latest development version of STUNner. See the documentation for the stable version [here](https://docs.l7mp.io/en/stable).* + ## Overview * [Why STUNner](WHY.md) @@ -16,16 +18,17 @@ ## User guides -* [Gateway API reference](GATEWAY.md) * [Authentication](AUTH.md) * [Monitoring](MONITORING.md) * [Scaling](SCALING.md) * [Security](SECURITY.md) +* [Reference](GATEWAY.md) ## Tutorials ### Basics +* [Deploying a UDP echo service behind STUNner](examples/udp-echo) * [Opening a UDP tunnel via STUNner](examples/simple-tunnel) ### Headless deployment model @@ -34,19 +37,19 @@ ### Media-plane deployment model -* [One to one video call with Kurento](examples/kurento-one2one-call) -* [Magic mirror with Kurento](examples/kurento-magic-mirror/README.md) * [Video-conferencing with LiveKit](examples/livekit/README.md) +* [Video-conferencing with mediasoup](examples/mediasoup/README.md) +* [Video-conferencing with Janus](examples/janus/README.md) +* [Video-conferencing with Elixir WebRTC](examples/elixir-webrtc/README.md) * [Video-conferencing with Jitsi](examples/jitsi/README.md) -* [Cloud-gaming with Cloudretro](examples/cloudretro/README.md) +* [Cloud-gaming with CloudRetro](examples/cloudretro/README.md) * [Remote desktop access with Neko](examples/neko/README.md) +* [One to one video call with Kurento](examples/kurento-one2one-call) +* [Magic mirror with Kurento](examples/kurento-magic-mirror/README.md) ## Manuals * [`stunnerd` manual](cmd/stunnerd.md) * [`turncat` manual](cmd/turncat.md) * [`stunnerctl` manual](cmd/stunnerctl.md) - -## Obsolete features - -* [Standalone mode](OBSOLETE.md) +* [Benchmarking](examples/benchmark) diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 58c41c83..4e7ac876 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -4,6 +4,8 @@ Let the new version be vX.Y.Z. ## STUNner +- remove the doc disclaimers ("this doc is for the dev version, see RTD for stable docs") from the + README.md and docs/README.md - write release notes for vX.Y.Z - `git pull` - `go mod tidy` @@ -12,28 +14,31 @@ Let the new version be vX.Y.Z. - `git push` if there are local changes - release vX.Y.Z on github (so that we can publish the release notes) - wait until the CI/CD pipeline goes green (may take a while) +- restore doc disclaimers -## STUNner gateway operator +## STUNner auth service +- this must finish before releasing the operator, otherwise the Helm build will fail - `git pull` -- bump `github.com/l7mp/stunner` version to vX.Y.Z in the `go.mod` +- bump `github.com/l7mp/stunner` version to vX.Y.Z in the `go.mod` - `go mod tidy` -- make sure `make test` passes +- make sure `go test ./... -count 1` passes - make sure `golangci-lint run` passes -- `git push` if there are local changes +- `git push` to send the updated `go.mod/go.sum` files - release vX.Y.Z (try to have the same version as the main stunner repo, if possible) on github (so that we can publish the release notes) -## STUNner auth service +## STUNner gateway operator +- wait until both stunner and the auth-service releases finished building and the CI status goes + green on both repos (the gateway-operator helm chart CI build step needs the new artifact from + both, so it will fail until the other two finish the image push phase) - `git pull` -- bump `github.com/l7mp/stunner` version to vX.Y.Z in the `go.mod` -- bump `github.com/l7mp/stunner-gateway-operator` version to vX.Y.Z (or whatever you used when - releasing the operator) in the `go.mod` +- bump `github.com/l7mp/stunner` version to vX.Y.Z in the `go.mod` - `go mod tidy` - make sure `make test` passes - make sure `golangci-lint run` passes -- `git push` if there are local changes +- `git push` to send the updated `go.mod/go.sum` files - release vX.Y.Z (try to have the same version as the main stunner repo, if possible) on github (so that we can publish the release notes) diff --git a/docs/SCALING.md b/docs/SCALING.md index c591f42f..98e9cfdd 100644 --- a/docs/SCALING.md +++ b/docs/SCALING.md @@ -1,85 +1,33 @@ # Scaling -[Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) is one of -the key features in Kubernetes. This means that Kubernetes will automatically increase the number -of pods that run a service as the demand for the service increases, and reduce the number of pods -when the demand drops. This improves service quality, simplifies management, and reduces -operational costs by avoiding the need to over-provision services to the peak load. Most -importantly, autoscaling saves you from having to guess the number of nodes or pods needed to run -your workload: Kubernetes will automatically and dynamically resize your workload based on demand. +[Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) is one of the key features in Kubernetes. This means that Kubernetes will automatically increase the number of pods that run a service as the demand for the service increases, and reduce the number of pods when the demand drops. This improves service quality, simplifies management, and reduces operational costs by avoiding the need to over-provision services to the peak load. Most importantly, autoscaling saves you from having to guess the number of nodes or pods needed to run your workload: Kubernetes will automatically and dynamically resize your workload based on demand. Further factors to autoscale your WebRTC workload are: - smaller load on each instance: this might result in better and more stable performance; - smaller blast radius: less calls will be affected if a pod fails for some reason. -Autoscaling a production service, especially one as sensitive to latency and performance as WebRTC, -can be challenging. This guide will provide the basics on autoscaling; see the [official -docs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) for more detail. +Autoscaling a production service, especially one as sensitive to latency and performance as WebRTC, can be challenging. This guide will provide the basics on autoscaling; see the [official docs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) for more detail. ## Horizontal scaling -It is a good practice to scale Kubernetes workloads -[horizontally](https://openmetal.io/docs/edu/openstack/horizontal-scaling-vs-vertical-scaling) -(that is, by adding or removing service pods) instead of vertically (that is, by migrating to a -more powerful server) when demand increases. Correspondingly it is a good advice to set the -[resource limits and -requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) to the -bare minimum and let Kubernetes to automatically scale out the service by adding more pods if -needed. Note that that HPA [uses the requested amount of -resources](https://pauldally.medium.com/horizontalpodautoscaler-uses-request-not-limit-to-determine-when-to-scale-97643d808997) -to determine when to scale-up or down the number of instances. - -STUNner comes with a full support for horizontal scaling using the the Kubernetes built-in -[HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) -(HPA). The triggering event can be based on arbitrary metric, say, the [number of active client -connections](#MONITORING.md) per STUNner dataplane pod. Below we use the CPU utilization for -simplicity. - -Scaling STUNner *up* occurs by Kubernetes adding more pods to the STUNner dataplane deployment and -load-balancing client requests across the running pods. This should (theoretically) never interrupt -existing calls, but new calls should be automatically routed by the cloud load balancer to the new -endpoint(s). Automatic scale-up means that STUNner should never become the bottleneck in the -system. Note that in certain cases scaling STUNner up would require adding new Kubernetes nodes to -your cluster: most modern hosted Kubernetes services provide horizontal node autoscaling out of the -box to support this. - -Scaling STUNner *down*, however, is trickier. Intuitively, when a running STUNner dataplane pod is -terminated on scale-down, all affected clients with active TURN allocations on the terminating pod -would be disconnected. This would then require clients to go through an [ICE -restart](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/restartIce) to -re-connect, which may cause prolonged connection interruption and may not even be supported by all -browsers. - -In order to avoid client disconnects on scale-down, STUNner supports a feature called [graceful -shutdown](https://cloud.google.com/blog/products/containers-kubernetes/kubernetes-best-practices-terminating-with-grace). This -means that `stunnerd` pods would refuse to terminate as long as there are active TURN allocations -on them, and automatically remove themselves only once all allocations are deleted or timed out. It -is important that *terminating* pods will not be counted by the HorizontalPodAutoscaler towards the -average CPU load, and hence would not affect autoscaling decisions. In addition, new TURN -allocation requests would never be routed by Kubernetes to terminating `stunnerd` pods. - -Graceful shutdown enables full support for scaling STUNner down without affecting active client -connections. As usual, however, some caveats apply: -1. Currently the max lifetime for `stunnerd` to remain alive is 1 hour after being deleted: this - means that `stunnerd` will remain active only for 1 hour after it has been deleted/scaled-down - even if active allocations would last longer. You can always set this by adjusting the - `terminationGracePeriod` on your `stunnerd` pods. -2. STUNner pods may remain alive well after the last client connection goes away. This occurs when - an TURN/UDP allocation is left open by a client (spontaneous UDP client-side connection closure - cannot be reliably detected by the server). As the default TURN refresh lifetime is [10 - minutes](https://www.rfc-editor.org/rfc/rfc8656#section-3.2-3), it may take 10 minutes until all - allocations time out, letting `stunnerd` to finally terminate. -3. If there are active (or very recent) TURN allocations then the `stunnerd` pod may refuse to be - removed after a `kubectl delete`. Use `kubectl delete pod --grace-period=0 --force stunner-XXX` - to force removal. +It is a good practice to scale your STUNner deployment [horizontally](https://openmetal.io/docs/edu/openstack/horizontal-scaling-vs-vertical-scaling) (that is, by adding or removing `stunnderd` pods) instead of vertically (that is, by increasing the resource limits of your pods) when demand increases. We advice to set the [resource limits and requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) of the `stunnerd` pods to the bare minimum (this can be set in the [Dataplane](GATEWAY.md#dataplane) template used for provisioning `stunnerd` pods) and let Kubernetes to automatically scale out the STUNner dataplane by adding more `stunnerd` pods if needed. Note that HPA uses the [requested amount of resources](https://pauldally.medium.com/horizontalpodautoscaler-uses-request-not-limit-to-determine-when-to-scale-97643d808997) to determine when to scale-up or down the number of instances. + +STUNner comes with a full support for horizontal scaling using the the Kubernetes built-in [HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale) (HPA). The triggering event can be based on arbitrary metric, say, the [number of active client connections](#MONITORING.md) per STUNner dataplane pod. Below we use the CPU utilization for simplicity. + +Scaling STUNner *up* occurs by Kubernetes adding more pods to the STUNner dataplane deployment and load-balancing client requests across the running pods. This should (theoretically) never interrupt existing calls, but new calls should be automatically routed by the cloud load balancer to the new endpoint(s). Automatic scale-up means that STUNner should never become the bottleneck in the system. Note that in certain cases scaling STUNner up would require adding new Kubernetes nodes to your cluster: most modern hosted Kubernetes services provide horizontal node autoscaling out of the box to support this. + +Scaling STUNner *down*, however, is trickier. Intuitively, when a running STUNner dataplane pod is terminated on scale-down, all affected clients with active TURN allocations on the terminating pod would be disconnected. This would then require clients to go through an [ICE restart](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/restartIce) to re-connect, which may cause prolonged connection interruption and may not even be supported by all browsers. + +In order to avoid client disconnects on scale-down, STUNner supports a feature called [graceful shutdown](https://cloud.google.com/blog/products/containers-kubernetes/kubernetes-best-practices-terminating-with-grace). This means that `stunnerd` pods would refuse to terminate as long as there are active TURN allocations on them, and automatically remove themselves only once all allocations are deleted or timed out. It is important that *terminating* pods will not be counted by the HorizontalPodAutoscaler towards the average CPU load, and hence would not affect autoscaling decisions. In addition, new TURN allocation requests would never be routed by Kubernetes to terminating `stunnerd` pods. + +Graceful shutdown enables full support for scaling STUNner down without affecting active client connections. As usual, however, some caveats apply: +1. The default is to provision `stunnerd` pods with at most 2 CPU cores and 16 listener threads, both can be customized in the [Dataplane](GATEWAY.md#dataplane) template used to provision `stunnerd` pods. +2. Currently the max lifetime for `stunnerd` to remain alive is 1 hour after being deleted: this means that `stunnerd` will remain active only for 1 hour after it has been deleted/scaled-down even if active allocations would last longer. You can adjust the grace period in the `terminationGracePeriod` setting in the [Dataplane](GATEWAY.md#dataplane) template. +3. STUNner pods may remain alive well after the last client connection is gone. This occurs when an allocation is left open by a client (e.g., spontaneous UDP client-side connection closure cannot be reliably detected by the server). As the default TURN refresh lifetime is [10 minutes](https://www.rfc-editor.org/rfc/rfc8656#section-3.2-3) it may take 10 minutes until all allocations time out, letting `stunnerd` to finally terminate. In such cases `stunnerd` may refuse to stop after a `kubectl delete`. Use `kubectl delete pod --grace-period=0 --force stunner-XXX` to force removal. ### Example -Below is a simple -[HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) -config for autoscaling `stunnerd`. The example assumes that the [Kubernetes metric -server](https://github.com/kubernetes-sigs/metrics-server#installation) is available in the -cluster. +Below is a simple [HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) config for autoscaling `stunnerd`. The example assumes that the [Kubernetes metric server](https://github.com/kubernetes-sigs/metrics-server#installation) is available in the cluster. ```yaml apiVersion: autoscaling/v2 @@ -103,9 +51,7 @@ spec: averageUtilization: 300 ``` -Here, `scaleTargetRef` selects the STUNner Deployment named `stunnerd` as the scaling target and -the deployment will always run at least 1 pod and at most 10 pods. Understanding how Kubernetes -chooses the number of running pods is, however, a bit tricky. +Here, `scaleTargetRef` selects the STUNner Deployment named `stunnerd` as the scaling target and the deployment will always run at least 1 pod and at most 10 pods. Understanding how Kubernetes chooses the number of running pods is, however, a bit tricky. Suppose that the configured resources in the STUNner deployment are the following. @@ -119,12 +65,5 @@ resources: memory: 128Mi ``` -Suppose that, initially, there is only a single `stunnerd` pod in the cluster. As new calls come -in, CPU utilization is increasing. Scale out will be triggered when CPU usage of the `stunnerd` pod -reaches 1500 millicore CPU (three times the requested CPU). If more calls come and the total CPU -usage of the `stunnerd` pods reaches 3000 millicore, which amounts to 1500 millicore on average, -scale out would happen again. When users leave, load will drop and the total CPU utilization will -fall under 3000 millicore. At this point Kubernetes will automatically scale-in and remove one of -the `stunnerd` instances. Recall, this would never affect existing connections thanks to graceful -shutdown. +Initially, there is only a single `stunnerd` pod in the cluster. As new calls arrive, CPU utilization is increasing. Scale out will be triggered when CPU usage of the `stunnerd` pod reaches 1500 millicore CPU (three times the requested CPU). If more calls come and the total CPU usage of the `stunnerd` pods reaches 3000 millicore, which amounts to 1500 millicore on average, scale out would happen again. When users leave, load will drop and the total CPU utilization will fall under 3000 millicore. At this point Kubernetes will automatically scale-in and remove one of the `stunnerd` instances. Recall, this would never affect existing connections thanks to graceful shutdown. diff --git a/docs/SECURITY.md b/docs/SECURITY.md index 806d696a..2131a6c7 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -1,30 +1,22 @@ # Security -Like any conventional gateway service, an improperly configured STUNner service may easily end up -exposing sensitive services to the Internet. The below security guidelines will allow to minimize -the risks associated with a misconfigured STUNner gateway service. +Like any conventional gateway service, an improperly configured STUNner service may easily end up exposing sensitive services to the Internet. The below security guidelines will allow to minimize the risks associated with a misconfigured STUNner gateway service. ## Threat -Before deploying STUNner, it is worth evaluating the potential [security -risks](https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty) a poorly -configured public STUN/TURN server poses. To demonstrate the risks, below we shall use the -[`turncat`](cmd/turncat.md) utility and `dig` to query the Kubernetes DNS service through a -misconfigured STUNner gateway. +Before deploying STUNner, it is worth evaluating the potential [security risks](https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty) a poorly configured public STUN/TURN server poses. To demonstrate the risks, below we shall use the [`turncat`](cmd/turncat.md) utility and `dig` to query the Kubernetes DNS service through a misconfigured STUNner gateway. -Start with a [fresh STUNner installation](INSTALL.md) into an empty namespace called `stunner` -and apply the below configuration. +Start with a [fresh STUNner installation](INSTALL.md) into an empty namespace called `stunner` and apply the below configuration. ```console cd stunner kubectl apply -f deploy/manifests/stunner-expose-kube-dns.yaml ``` -This will open a STUNner Gateway at port UDP:3478 and add a UDPRoute with the Kubernetes cluster -DNS service as the backend: +This will open a STUNner Gateway called `udp-gateway` at port UDP:3478 and add a UDPRoute with the Kubernetes cluster DNS service as the backend: ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: stunner-udproute @@ -44,39 +36,32 @@ Learn the virtual IP address (`ClusterIP`) assigned by Kubernetes to the cluster export KUBE_DNS_IP=$(kubectl get svc -n kube-system -l k8s-app=kube-dns -o jsonpath='{.items[0].spec.clusterIP}') ``` -Build `turncat`, the Swiss-army-knife [testing tool](cmd/turncat.md) for STUNner, fire up a -UDP listener on `localhost:5000`, and forward all received packets to the cluster DNS service -through STUNner. +Build `turncat`, the Swiss-army-knife [testing tool](cmd/turncat.md) for STUNner, fire up a UDP listener on `localhost:5000`, and forward all received packets to the cluster DNS service through STUNner. ```console -./turncat --log=all:DEBUG udp://127.0.0.1:5000 k8s://stunner/stunnerd-config:udp-listener udp://${KUBE_DNS_IP}:53 +./turncat --log=all:DEBUG udp://127.0.0.1:5000 k8s://stunner/udp-gateway:udp-listener udp://${KUBE_DNS_IP}:53 ``` Now, in another terminal query the Kubernetes DNS service through the `turncat` tunnel. ```console -dig +short @127.0.0.1 -p 5000 stunner.default.svc.cluster.local +dig +short @127.0.0.1 -p 5000 kubernetes.default.svc.cluster.local ``` -You should see the internal Cluster IP address allocated by Kubernetes for the STUNner dataplane -service. Experiment with other FQDNs, like `kubernetes.default.svc.cluster.local`, etc.; the -Kubernetes cluster DNS service will readily return the the corresponding internal service IP -addresses. +You should see the internal Cluster IP address for the Kubernetes API server. -This little experiment demonstrates the threats associated with a poorly configured STUNner -gateway: it may allow external access to *any* UDP service running inside your cluster. The -prerequisites for this: +This little experiment should demonstrate the threats associated with a poorly configured STUNner gateway: it may allow external access to *any* UDP service running inside your cluster. The prerequisites for this: 1. the target service *must* run over UDP (e.g., `kube-dns`), 2. the target service *must* be wrapped with a UDPRoute 3. the attacker *must* know at least one pod address or the ClusterIP for the targeted service. -Should any of these prerequisites miss, STUNner will block access to the target service. +Should any of these prerequisites fail, STUNner will block access to the target service. Now rewrite the backend service in the UDPRoute to an arbitrary non-existent service. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: stunner-udproute @@ -89,39 +74,23 @@ spec: - name: dummy ``` -Repeat the above `dig` command to query the Kubernetes DNS service again and observe how the query -times out. This demonstrates that a properly locked down STUNner installation blocks all accesses -outside of the backend services explicitly opened up via a UDPRoute. +Repeat the above `dig` command to query the Kubernetes DNS service again and observe how the query times out. This demonstrates that a properly locked down STUNner installation blocks all access outside of the backend services explicitly opened up via a UDPRoute. ## Locking down STUNner -Unless properly locked down, STUNner may be used maliciously to open a tunnel to any UDP service -running inside a Kubernetes cluster. Accordingly, it is critical to tightly control the pods and -services exposed via STUNner. +Unless properly locked down, STUNner may be used maliciously to open a tunnel to any UDP service running inside a Kubernetes cluster. Accordingly, it is critical to tightly control the pods and services exposed via STUNner. STUNner's basic security model is as follows: -> In a properly configured deployment, STUNner provides the same level of security as a media -server pool exposed to the Internet over public IP addresses, protected by a firewall that admits -only UDP access. A malicious attacker, even possessing a valid TURN credential, can reach only the -media servers deployed behind STUNner, but no other services. +> In a properly configured deployment, STUNner provides the same level of security as a media server pool exposed to the Internet over a public IP address, protected by a firewall that admits only UDP access. A malicious attacker, even possessing a valid TURN credential, can reach only the media servers deployed behind STUNner, but no other services. -The below security considerations will greatly reduce this attack surface even further. In any -case, use STUNner at your own risk. +The below security considerations will greatly reduce this attack surface even further. In any case, use STUNner at your own risk. ## Authentication -By default, STUNner uses a single static username/password pair for all clients and the password is -available in plain text at the clients (`static` authentication mode). Anyone with access to the -static STUNner credentials can open a UDP tunnel via STUNner, provided that they know the private -IP address of the target service or pod and provided that a UDPRoute exists that specifies the -target service as a backend. This means that a service is exposed only if STUNner is explicitly -configured so. +By default, STUNner uses a single static username/password pair for all clients and the password is available in plain text at the clients (`static` authentication mode). Anyone with access to the static STUNner credentials can open a UDP tunnel via STUNner, provided that they know the private IP address of the target service or pod and provided that a UDPRoute exists that specifies the target service as a backend. This means that a service is exposed only if STUNner is explicitly configured so. -For more security sensitive workloads, we recommend the `ephemeral` authentication mode, which uses -per-client fixed lifetime username/password pairs. This makes it more difficult for attackers to -steal and reuse STUNner's TURN credentials. See the [authentication guide](AUTH.md) for configuring -STUNner with `ephemeral` authentication. +For production deployments we recommend the `ephemeral` authentication mode, which uses per-client fixed lifetime username/password pairs. This makes it more difficult for attackers to steal and reuse STUNner's TURN credentials. See the [authentication guide](AUTH.md) for configuring STUNner with `ephemeral` authentication. ## Access control @@ -130,7 +99,7 @@ a proper UDPRoute. For instance, the below UDPRoute allows access *only* to the service in the `media-plane` namespace, and nothing else. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: stunner-udproute @@ -141,19 +110,10 @@ spec: rules: - backendRefs: - name: media-server - - namespace: media-plane + namespace: media-plane ``` -To avoid potential misuse, STUNner disables open wildcard access to the entire cluster. (Note that -in the [standalone mode](OBSOLETE.md) the user can still explicitly create an open `stunnerd` -cluster, but this is discouraged). - -For hardened deployments, it is possible to add a second level of isolation between STUNner and the -rest of the workload using the Kubernetes NetworkPolicy facility. Creating a NetworkPolicy will -essentially implement a firewall, blocking all access from the source to the target workload except -the services explicitly whitelisted by the user. The below example allows access from STUNner to -*any* media server pod labeled as `app=media-server` in the `default` namespace over the UDP port -range `[10000:20000]`, but nothing else. +For hardened deployments, it is possible to add a second level of isolation between STUNner and the rest of the workload using the Kubernetes NetworkPolicy facility. Creating a NetworkPolicy will essentially implement a firewall, blocking all access from the source to the target workload except the services explicitly whitelisted by the user. The below example allows access from STUNner to *any* media server pod labeled as `app=media-server` in the `default` namespace over the UDP port range `[10000:20000]`, but nothing else. ```yaml apiVersion: networking.k8s.io/v1 @@ -182,27 +142,12 @@ spec: endPort: 20000 ``` -Kubernetes network policies can be easily [tested](https://banzaicloud.com/blog/network-policy) -before exposing STUNner publicly; e.g., the [`turncat` utility](cmd/turncat.md) packaged with -STUNner can be used conveniently for this [purpose](examples/simple-tunnel/README.md). +Kubernetes network policies can be easily [tested](https://banzaicloud.com/blog/network-policy) before exposing STUNner publicly; e.g., the [`turncat` utility](cmd/turncat.md) packaged with STUNner can be used conveniently for this [purpose](examples/simple-tunnel/README.md). ## Exposing internal IP addresses -The trick in STUNner is that both the TURN relay transport address and the media server address are -internal pod IP addresses, and pods in Kubernetes are guaranteed to be able to connect -[directly](https://sookocheff.com/post/kubernetes/understanding-kubernetes-networking-model/#kubernetes-networking-model), -without the involvement of a NAT. This makes it possible to host the entire WebRTC infrastructure -over the private internal pod network and still allow external clients to make connections to the -media servers via STUNner. At the same time, this also has the bitter consequence that internal IP -addresses are now exposed to the WebRTC clients in ICE candidates. - -The threat model is that, possessing the correct credentials, an attacker can scan the *private* IP -address of all STUNner pods and all media server pods. This should not pose a major security risk -though: remember, none of these private IP addresses can be reached externally. The attack surface -can be further reduced to the STUNner pods' private IP addresses by using the [symmetric ICE -mode](DEPLOYMENT.md#symmetric-ice-mode). - -Nevertheless, if worried about information exposure then STUNner may not be the best option at the -moment. In later releases, we plan to implement a feature to obscure the transport relay connection -addresses returned by STUNner, which would lock down external scanning attempts. Feel free to open -an issue if you think this limitation is a blocker for you. +The trick in STUNner is that both the TURN relay transport address and the media server address are internal pod IP addresses, and pods in Kubernetes are guaranteed to be able to connect [directly](https://sookocheff.com/post/kubernetes/understanding-kubernetes-networking-model/#kubernetes-networking-model) without the involvement of a NAT. This makes it possible to host the entire WebRTC infrastructure over the private internal pod network and still allow external clients to make connections to the media servers via STUNner. At the same time, this also has the bitter consequence that internal IP addresses are now exposed to the WebRTC clients in ICE candidates. + +The threat model is that, possessing the correct credentials, an attacker can scan the *private* IP address of all STUNner pods and all media server pods. This should pose no major security risk though: remember, none of these private IP addresses can be reached externally. The attack surface can be further reduced to the STUNner pods' private IP addresses by using the [symmetric ICE mode](DEPLOYMENT.md#symmetric-ice-mode). + +Nevertheless, if worried about information exposure then STUNner may not be the best option at the moment. In later releases, we plan to implement a feature to obscure the relay transport addresses returned by STUNner. Please file an issue if you think this limitation is a blocker for your use case. diff --git a/docs/WHY.md b/docs/WHY.md index 348a560a..5a663018 100644 --- a/docs/WHY.md +++ b/docs/WHY.md @@ -5,48 +5,44 @@ STUNner is a *WebRTC media gateway for Kubernetes*. All words matter here: indee encapsulations, it is a *media gateway* so its job is to ingest WebRTC audio/video streams into a virtualized media plane, and it is *opinionated towards Kubernetes*, so everything around STUNner is designed and built to fit into the Kubernetes ecosystem. That being said, STUNner can easily be -used outside of this context (e.g., as a regular STUN/TURN server), but these deployment options -are not the main focus. +used outside of this context (e.g., as a regular STUN/TURN server), but this is not the main focus. ## The problem -The main pain points STUNner is trying to solve are all related to that Kubernetes and WebRTC are +The pain points STUNner is trying to solve are all related to that Kubernetes and WebRTC are currently foes, not friends. Kubernetes has been designed and optimized for the typical HTTP/TCP Web workload, which makes streaming workloads, and especially UDP/RTP based WebRTC media, feel like a foreign citizen. Most importantly, Kubernetes runs the media server pods/containers over a private L3 network over a -private IP address and the network dataplane applies several rounds of Network Address Translation -(NAT) steps to ingest media traffic into this private pod network. Most cloud load-balancers apply -a DNAT step to route packets to a node and then an SNAT step to put the packet to the private pod +private IP address and the several rounds of Network Address Translation (NAT) steps are required +to ingest media traffic into this private pod network. Most cloud load-balancers apply a DNAT step +to route packets to a Kubernetes node and then an SNAT step to inject a packet into the private pod network, so that by the time a media packet reaches a pod essentially all header fields in the [IP -5-tuple](https://www.techopedia.com/definition/28190/5-tuple) are modified, except the destination +5-tuple](https://www.techopedia.com/definition/28190/5-tuple) are modified except the destination port. Then, if any pod sends the packet over to another pod via a Kubernetes service load-balancer then the packet will again undergo a DNAT step, and so on. -The *Kubernetes dataplane teems with NATs*. This is not a big deal for the usual HTTP/TCP web -protocols Kubernetes was designed for, since an HTTP/TCP session contains an HTTP header that fully -describes it. Once an HTTP/TCP session is accepted by a server it does not need to re-identify the -client per each received packet, because it has session context. - -This is not the case with the prominent WebRTC media protocol encapsulation though, RTP over -UDP. RTP does not have anything remotely similar to an HTTP header. Consequently, the only -"semi-stable" connection identifier WebRTC servers can use to identify a client is by expecting the -client's packets to arrive from a negotiated IP source address and source port. When the IP 5-tuple -changes, for instance because there is a NAT in the datapath, then WebRTC media connections -break. Due to reasons which are mostly historical at this point, *UDP/RTP connections do not -survive not even a single NAT step*, let alone the 2-3 rounds of NATs a packet regularly undergoes -in the Kubernetes dataplane. +The *Kubernetes dataplane teems with NATs*. This is not a big deal for the web protocols Kubernetes +was designed for, since each HTTP/TCP connection involves a session context that can be used by a +server to identify clients. This is not the case with WebRTC media protocol stack though, since +UDP/RTP connections do not involve anything remotely similar to an HTTP context. Consequently, the +only "semi-stable" connection identifier WebRTC servers can use to identify a client is by +expecting the client's packets to arrive from a negotiated IP source address and source port. When +the IP 5-tuple changes, for instance because there is a NAT in the datapath, then WebRTC media +connections break. Due to reasons which are mostly historical at this point, *UDP/RTP connections +do not survive not even a single NAT step*, let alone the 2-3 rounds of NATs a packet regularly +undergoes in the Kubernetes dataplane. ## The state-of-the-art The current stance is that the only way to deploy a WebRTC media server into Kubernetes is to exploit a [well-documented Kubernetes anti-pattern](https://kubernetes.io/docs/concepts/configuration/overview): *running the media -server pods in the host network namespace* (using the `hostNetwork=true` setting in the pod's -container template). This way the media server shares the network namespace of the host (i.e., the -Kubernetes node) it is running on, inheriting the public address (if any) of the host and -(hopefully) sidestepping the private pod network with the involved NATs. +server pods in the host network namespace* of Kubernetes nodes (using the `hostNetwork=true` +setting in the pod's container template). This way the media server shares the network namespace of +the host (i.e., the Kubernetes node) it is running on, inheriting the public address (if any) of +the host and (hopefully) sidestepping the private pod network with the involved NATs. There are *lots* of reasons why this deployment model is less than ideal: @@ -68,7 +64,7 @@ There are *lots* of reasons why this deployment model is less than ideal: - **It is a security nightmare.** Given today's operational reality, exposing a fleet of media servers to the Internet over a public IP address, and opening up all UDP ports for potentially - malicious access, is an adventurous undertaking, to say the least. Wouldn't it be nice to hide + malicious access, is an adventurous undertaking to say the least. Wouldn't it be nice to hide your media servers behind a secure perimeter defense mechanism and lock down *all* uncontrolled access and nefarious business by running it over a private IP? diff --git a/docs/cmd/icetester.md b/docs/cmd/icetester.md new file mode 100644 index 00000000..a43eb208 --- /dev/null +++ b/docs/cmd/icetester.md @@ -0,0 +1,42 @@ +# icetester: Universal UDP echo service using WebRTC/ICE + +`icetester` is test server that can be used WebRTC/ICE connectivity. The tester serves a simple +WebSocket/JSON API server that clients can use to create a WebRTC data channel. whatever is +received by `icetester` on the data channel will be echoed back to the client over the data channel. + +While `icetester` can be used as a standalone too, the intended use is via `stunnerctl icetest`. + +## Installation + +Install `icetester` using the standard Go toolchain and add it to `$PATH`. + +```console +go install github.com/l7mp/stunner/cmd/icetester@latest +``` + +Building from source is as easy as it usually gets with Go: + +```console +cd stunner +go build -o turncat cmd/icetester/main.go +``` + +The containerized version is available as `docker.io/l7mp/icester`. + +## Usage + +Deploy a STUNner gateway and test is via UDP and TCP through `stunnerctl`: + +```console +stunnerctl icetest +``` + +## License + +Copyright 2021-2024 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). + +MIT License - see [LICENSE](../../LICENSE) for full text. + +## Acknowledgments + +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/docs/cmd/stunnerctl.md b/docs/cmd/stunnerctl.md index b0231788..0a9e6761 100644 --- a/docs/cmd/stunnerctl.md +++ b/docs/cmd/stunnerctl.md @@ -1,17 +1,161 @@ # stunnerctl: Command line toolbox for STUNner A CLI tool to simplify the interaction with STUNner. +The prominent use of `stunnerctl` is to load or watch STUNner dataplane configurations from a Kubernetes cluster for debugging and troubleshooting, or just for checking whether everything is configured the way it should be. + +## Installation + +Install the `stunnerctl` binary using the standard Go toolchain and add it to `$PATH`. + +```console +go install github.com/l7mp/stunner/cmd/stunnerctl@latest +``` + +You can also enforce a specific OS, CPU architecture, and STUNner version: + +```console +GOOS=windows GOARCH=amd64 go install github.com/l7mp/stunner/cmd/stunnerctl@v0.17.5 +``` + +Building from source is as easy as it usually gets with Go: + +```console +cd stunner +go build -o stunnerctl cmd/stunnerctl/main.go +``` ## Usage -Dump the running config from a live STUNner deployment in human-readable format. +Type `stunnerctl` to get a glimpse of the sub-commands and features provided. + +### Config + +The `config` sub-command is used to load or watch running dataplane configs from the STUNner config discovery service (CDS) running in a remote Kubernetes cluster. Usually the CDS server role is fulfilled by the [STUNner gateway operator](https://github.com/l7mp/stunner-gateway-operator) but you can choose any CDS service you want (see the `--cds-server-*` CLI flags in the help). The main use of this command is to check the active dataplane configuration for troubleshooting connectivity problems. + +- Dump a summary of the running config of the STUNner gateway called `udp-gateway` deployed into the `stunner` namespace: + + ```console + stunnerctl -n stunner config udp-gateway + Gateway: stunner/udp-gateway (loglevel: "all:INFO") + Authentication type: static, username/password: user-1/pass-1 + Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.88.91:9001 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.3, 10.80.7.104] + ``` + +- The same, but using the alternative Kubernetes config file `~/my-config.conf` to access the cluster. The rest of the usual `kubectl` flags (`--context`, `--token`, etc.) are also available to select the cluster to connect to. + + ``` console + stunnerctl --kubeconfig ~/my-config.conf -n stunner config udp-gateway + ``` + +- Dump the running config of all gateways in the `stunner` namespace in JSON format (YAML is also available using `-o yaml`): + + ```console + stunnerctl -n stunner config -o json + {"version":"v1","admin":{"name":"stunner/tcp-gateway",...}} + {"version":"v1","admin":{"name":"stunner/udp-gateway",...}}} + ``` + +- Watch STUNner configs as they are being refreshed by the operator and dump only the name of the gateway whose config changes: + + ```console + stunnerctl config --all-namespaces -o jsonpath='{.admin.name}' -w + stunner/tcp-gateway + stunner/udp-gateway + ... + ``` + +For those who don't have the Go toolchain available to run `go install`, STUNner provides a minimalistic `stunnerctl` replacement called `stunnerctl.sh`. +This script requires nothing else than `bash`, `kubectl`, `curl` and `jq` to work. + +The below will dump the running config of `tcp-gateway` deployed into the `stunner` namespace: + ```console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener: udp-listener -Protocol: UDP -Public address: 34.118.36.108 -Public port: 3478 -``` \ No newline at end of file +cd stunner +cmd/stunnerctl/stunnerctl.sh running-config stunner/tcp-gateway +STUN/TURN authentication type: static +STUN/TURN username: user-1 +STUN/TURN password: pass-1 +Listener 1 + Name: stunner/tcp-gateway/tcp-listener + Listener: stunner/tcp-gateway/tcp-listener + Protocol: TURN-TCP + Public address: 35.187.97.94 + Public port: 3478 +``` + +You can also use `kubectl port-forward` to load or watch STUNner configs manually. Open a port-forwarded connection to the STUNner gateway operator: + +``` console +export CDS_SERVER_NAME=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.name}') +export CDS_SERVER_NAMESPACE=$(kubectl get pods -l stunner.l7mp.io/config-discovery-service=enabled --all-namespaces -o jsonpath='{.items[0].metadata.namespace}') +kubectl -n $CDS_SERVER_NAMESPACE port-forward pod/${CDS_SERVER_NAME} 63478:13478 & +``` + +If all goes well, you can now connect to the STUNner CDS API served by the gateway operator through the port-forwarded tunnel opened by `kubectl` just using `curl`. The below will load the config of the `udp-gateway` in the `stunner` namespace: + +``` console +curl -s http://127.0.0.1:63478/api/v1/configs/stunner/udp-gateway +``` + +If you happen to have a WebSocket client like the wonderful [`websocat`](https://github.com/vi/websocat) tool installed, you can also watch the configs as they are being rendered by the operator en live. + +``` console +websocat ws://127.0.0.1:63478/api/v1/configs/stunner/udp-gateway?watch=true - +``` + +### Status + +The `status` sub-command reports the status of the dataplane pods for a gateway, especially the runtime state of the `stunnerd` daemon. + +- Find all dataplane pods for the `udp-gateway` in the `stunner` namespace and dump a status summary: + + ``` console + stunnerctl -n stunner status udp-gateway + stunner/udp-gateway-856c9f4dc9-524hc: + stunner/udp-gateway:{logLevel="all:INFO",health-check="http://:8086"} + static-auth:{realm="stunner.l7mp.io",username="",password=""} + listeners:1/clusters:1 + allocs:3/status=READY + stunner/udp-gateway-856c9f4dc9-c7wcq: + stunner/udp-gateway:{logLevel="all:INFO",health-check="http://:8086"} + static-auth:{realm="stunner.l7mp.io",username="",password=""} + listeners:1/clusters:1 + allocs:2/status=READY + ``` + +- Same but report only the runtime status of the `stunnerd` pods in the `stunner` namespace: + + ``` console + stunnerctl -n stunner status -o jsonpath='{.status}' + READY + TERMINATING + ``` + +### Authentication + +The `auth` sub-command can be used to obtain a TURN credential or a full ICE server config for connecting to a specific gateway. The authentication service API is usually served by a separate [STUNner authentication server](https://github.com/l7mp/stunner-auth-service) deployed alongside the gateway operator. The main use of this command is to feed an ICE agent manually with the ICE server config to connect to a specific STUNner gateway. + +- Obtain a full ICE server config for `udp-gateway` deployed into the `stunner` namespace: + + ``` console + stunnerctl -n stunner auth udp-gateway + {"iceServers":[{"credential":"pass-1","urls":["turn:10.104.19.179:3478?transport=udp"],"username":"user-1"}],"iceTransportPolicy":"all"} + ``` + +- Request a plain [TURN credential](https://datatracker.ietf.org/doc/html/draft-uberti-behave-turn-rest-00) using the authentication service deployed into the `stunner-system-prod` namespace: + + ``` console + stunnerctl -n stunner auth udp-gateway --auth-turn-credential --auth-service-namespace=stunner-system-prod + {"password":"pass-1","ttl":86400,"uris":["turn:10.104.19.179:3478?transport=udp"],"username":"user-1"} + ``` + +## License + +Copyright 2021-2023 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). + +MIT License - see [LICENSE](../../LICENSE) for full text. diff --git a/docs/cmd/stunnerd.md b/docs/cmd/stunnerd.md index 92396348..5d532836 100644 --- a/docs/cmd/stunnerd.md +++ b/docs/cmd/stunnerd.md @@ -4,14 +4,14 @@ The `stunnerd` daemon implements the STUNner gateway dataplane. The daemon supports two basic modes. For quick tests `stunnerd` can be configured as a TURN server by specifying a TURN network URI on the command line. For more complex scenarios, and especially -for use in a Kubernetes cluster, `stunnerd` can take configuration from a config file. In addition, -`stunnerd` implements a watch-mode, so that it can actively monitor the config file for updates -and, once the config file has changed, automatically reconcile the TURN server to the new -configuration. This mode is intended for use with the [STUNner Kubernetes gateway -operator](https://github.com/l7mp/stunner-gateway-operator): the operator watches the Kubernetes -[Gateway API](https://gateway-api.sigs.k8s.io) resources and renders the active control plane -configuration into a ConfigMap, which is then mapped into the `stunnerd` pod's filesystem so that -the daemon can pick up the latest configuration using the watch mode. +for use in a Kubernetes cluster, `stunnerd` can take configuration from a config origin, which can +either be a config file or from a remote server reached over WebSocket. In addition, `stunnerd` +implements a watch-mode, so that it can actively monitor the config origin for updates and +automatically reconcile the TURN server to any new configuration. This mode is intended for use +with the [STUNner Kubernetes gateway operator](https://github.com/l7mp/stunner-gateway-operator): +the operator watches the Kubernetes [Gateway API](https://gateway-api.sigs.k8s.io) resources, +renders the active control plane configuration per each `stunnerd` pod and dynamically updates the +dataplane using STUNner's config discovery service. ## Features @@ -22,8 +22,11 @@ the daemon can pick up the latest configuration using the watch mode. * [RFC 6062](https://tools.ietf.org/html/rfc6062): Traversal Using Relays around NAT (TURN) Extensions for TCP Allocations * TURN transport over UDP, TCP, TLS/TCP and DTLS/UDP. -* Two authentication modes via the long-term STUN/TURN credential mechanism: `plaintext` using a - static username/password pair, and `longterm` with dynamically generated time-scoped credentials. +* TURN/UDP listener CPU scaling. +* Two authentication modes via the long-term STUN/TURN credential mechanism: `static` using a + static username/password pair, and `ephemeral` with dynamically generated time-scoped + credentials. +* Peer port range filtering. ## Getting Started @@ -37,36 +40,25 @@ go build -o stunnerd cmd/stunnerd/main.go ### Usage -The below command will open a `stunnerd` UDP listener at `127.0.0.1:5000`, set `plaintext` -authentication using the username/password pair `user1/passwrd1`, and raises the debug level to the -maximum. +The below command will open a `stunnerd` UDP listener at `127.0.0.1:5000`, set `static` authentication using the username/password pair `user1/passwrd1`, and raise the debug level to the maximum. ```console ./stunnerd --log=all:TRACE turn://user1:passwd1@127.0.0.1:5000 ``` -Alternatively, run `stunnerd` in verbose mode with the config file taken from -`cmd/stunnerd/stunnerd.conf`. Adding the flag `-w` will enable watch mode. +Alternatively, run `stunnerd` in verbose mode with the config file taken from `cmd/stunnerd/stunnerd.conf`. Adding the flag `-w` will enable watch mode. ```console -$ ./stunnerd -v -w -c cmd/stunnerd/stunnerd.conf +./stunnerd -v -w -c cmd/stunnerd/stunnerd.conf ``` -Type `./stunnerd` to see a short description of the command line arguments supported by `stunnerd`. +Type `./stunnerd -h` to get a short description of the supported command line arguments. -In practice, you'll rarely need to run `stunnerd` directly: just fire up the [prebuilt container -image](https://hub.docker.com/repository/docker/l7mp/stunnerd) in Kubernetes and you should be good -to go. +In practice, you'll rarely need to run `stunnerd` directly: just fire up the [prebuilt container image](https://hub.docker.com/repository/docker/l7mp/stunnerd) in Kubernetes and you should be good to go. Or better yet, [install](/docs/INSTALL.md) the STUNner Kubernetes gateway operator that will readily manage the `stunnerd` pods for each Gateway you create. ## Configuration -Using the below configuration, `stunnerd` will open 4 STUNner listeners: two for accepting -unencrypted connections at UDP/3478 and TCP/3478, and two for encrypted connections at TLS/TCP/3479 -and DTLS/UDP/3479. For easier debugging, the port for the transport relay connections opened by -`stunnerd` will be taken from [10000:19999] for the UDP listener, [20000:29999] for the TCP -listener, etc. The daemon will use `longterm` authentication, with the shared secret read from the -environment variable `$STUNNER_SHARED_SECRET` during initialization. The relay address is taken -from the `$STUNNER_ADDR` environment variable. +Using the below configuration, `stunnerd` will open 4 STUNner listeners: two for accepting unencrypted connections at UDP/3478 and TCP/3478, and two for encrypted connections at TLS/TCP/3479 and DTLS/UDP/3479. The daemon will use `ephemeral` authentication, with the shared secret taken from the environment variable `$STUNNER_SHARED_SECRET` during initialization. The relay address will be taken from the `$STUNNER_ADDR` environment variable. ``` yaml version: v1alpha1 @@ -76,34 +68,46 @@ admin: realm: "my-realm.example.com" static: auth: - type: longterm + type: ephemeral credentials: secret: $STUNNER_SHARED_SECRET listeners: - name: stunnerd-udp address: "$STUNNER_ADDR" - protocol: udp + protocol: turn-udp port: 3478 - minPort: 10000 - maxPort: 19999 - name: stunnerd-tcp address: "$STUNNER_ADDR" - protocol: tcp + protocol: turn-tcp port: 3478 - minPort: 20000 - maxPort: 29999 - name: stunnerd-tls - protocol: tls + address: "$STUNNER_ADDR" + protocol: turn-tls port: 3479 - minPort: 30000 - maxPort: 39999 cert: "my-cert.cert" key: "my-key.key" - name: stunnerd-dtls - protocol: dtls + address: "$STUNNER_ADDR" + protocol: turn-dtls port: 3479 cert: "my-cert.cert" key: "my-key.key" - minPort: 40000 - maxPort: 49999 -``` \ No newline at end of file +``` + +STUNner can run multiple parallel readloops for TURN/UDP listeners, which allows it to scale to practically any number of CPUs and brings massive performance improvements for UDP workloads. This can be achieved by creating a configurable number of UDP readloop threads over the same TURN listener. The kernel will load-balance allocations across the readloops per the IP 5-tuple and so the same allocation will always stay at the same CPU, which is important for correct TURN operations. + +The feature is exposed via the command line flag `--udp-thread-num=`. The below starts `stunnerd` watching the config file in `/etc/stunnerd/stunnerd.conf` using 32 parallel UDP readloops (the default is 16). + +``` sh +./stunnerd -w -c /etc/stunnerd/stunnerd.conf --udp-thread-num=32 +``` + +## License + +Copyright 2021-2023 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). + +MIT License - see [LICENSE](../../LICENSE) for full text. + +## Acknowledgments + +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/docs/cmd/turncat.md b/docs/cmd/turncat.md index 6be4dfcd..8d2fd9ef 100644 --- a/docs/cmd/turncat.md +++ b/docs/cmd/turncat.md @@ -1,48 +1,80 @@ # turncat: Swiss-army-knife testing tool for STUNner -`turncat` is a STUN/TURN client to open a connection through a TURN server to an arbitrary remote -address/port. The main use is to open a local tunnel endpoint to any service running inside a -Kubernetes cluster via STUNner. This is very similar in functionality to `kubectl proxy`, but it -uses STUN/TURN to enter the cluster. +`turncat` is a STUN/TURN client to open a connection through a TURN server to an arbitrary remote address/port. +The main use is to open a local tunnel endpoint to any service running inside a Kubernetes cluster via STUNner. +This is very similar in functionality to `kubectl port-forward`, but it uses STUN/TURN to enter the cluster. +This is much faster than the TCP connection used by `kubectl`. -## Getting Started +## Installation -### Installation +On Linux and macOS, use [this script](/cmd/getstunner/getstunner.sh) to download the latest version of the `turncat` binary: -As simple as it gets: +```console +curl -sL https://raw.githubusercontent.com/l7mp/stunner/main/cmd/getstunner/getstunner.sh | sh - +export PATH=$HOME/.l7mp/bin:$PATH +``` +> [!NOTE] +> The script installs `stunnerctl` too. + +Install the `turncat` binary using the standard Go toolchain and add it to `$PATH`. + +```console +go install github.com/l7mp/stunner/cmd/turncat@latest +``` + +You can also enforce a specific OS, CPU architecture, and STUNner version like below: + +```console +GOOS=windows GOARCH=amd64 go install github.com/l7mp/stunner/cmd/turncat@v0.17.5 +``` + +Building from source is as easy as it usually gets with Go: ```console cd stunner go build -o turncat cmd/turncat/main.go ``` -### Usage +## Usage -Listen to client connections on the UDP listener `127.0.0.1:5000` and tunnel the received packets -through the TURN server located at `192.0.2.1:3478` to the UDP server located at -`192.0.2.2:53`. Use the longterm STUN/TURN credential mechanism to authenticate with the TURN -server and set the user/passwd to `test/test`: +Listen to client connections on the UDP listener `127.0.0.1:5000` and tunnel the received packets through the TURN server located at `192.0.2.1:3478` to the UDP listener located at `192.0.2.2:53`. +Use the [`static` STUN/TURN credential mechanism](/docs/AUTH.md) to authenticate with the TURN server and set the user/passwd to `test/test`: ```console -./turncat --log=all:INFO,turncat:DEBUG udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478 udp://192.0.2.2:53 +./turncat --log=all:INFO,turncat:DEBUG udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478 \ + udp://192.0.2.2:53 ``` -TLS/DTLS should also work fine; note that `--insecure` allows `turncat` to accept self-signed TLS -certificates and `--verbose` is equivalent to setting all `turncat` loggers to DEBUG mode (`-l -all:DEBUG`). +TLS/DTLS should also work. +Below `--insecure` allows `turncat` to accept self-signed TLS certificates and `--verbose` is equivalent to setting all loggers to DEBUG mode (`-l all:DEBUG`). ```console -./turncat --verbose --insecure udp://127.0.0.1:5000 turn://test:test@192.0.2.1:3478?transport=tls udp://192.0.2.2:53 +./turncat --verbose --insecure udp://127.0.0.1:5000 \ + turn://test:test@192.0.2.1:3478?transport=tls udp://192.0.2.2:53 ``` -Alternatively, specify the special TURN server URI `k8s://stunner/stunnerd-config:udp-listener` to -let `turncat` parse the running STUNner configuration from the active Kubernetes cluster. The URI -directs `turncat` to read the STUNner config from the ConfigMap named `stunnerd-config` in the -`stunner` namespace, and connect to the STUNner listener named `udp-listener`. The CLI flag `-` -instructs `turncat` to listen on the standard input: anything you type in the terminal will be sent -via STUNner to the peer `udp://10.0.0.1:9001` (after you press Enter). The CLI flag `-v` will -enable verbose logging. +Alternatively, you can specify the special TURN server meta-URI `k8s://stunner/udp-gateway:udp-listener` to let `turncat` parse the running STUNner configuration from the active Kubernetes cluster. +The URI directs `turncat` to read the config of the STUNner Gateway called `udp-gateway` in the `stunner` namespace and connect to the TURN listener named `udp-listener`. +The CLI flag `-` instructs `turncat` to listen on the standard input: anything you type in the terminal will be sent via STUNner to the peer `udp://10.0.0.1:9001` (after you press Enter). +The CLI flag `-v` will enable verbose logging. ```console -./turncat -v - k8s://stunner/stunnerd-config:udp-listener udp://10.0.0.1:9001 -``` \ No newline at end of file +./turncat -v - k8s://stunner/udp-gateway:udp-listener udp://10.0.0.1:9001 +``` + +Note that the standard `kubectl` command line flags are available. +For instance, the below will use the context `prod-europe` from the kubeconfig file `kube-prod.conf`: + +```console +./turncat --kubeconfig=kube-prod.conf --context prod-europe -v - k8s://... udp://... +``` + +## License + +Copyright 2021-2023 by its authors. Some rights reserved. See [AUTHORS](../../AUTHORS). + +MIT License - see [LICENSE](../../LICENSE) for full text. + +## Acknowledgments + +Initial code adopted from [pion/stun](https://github.com/pion/stun) and [pion/turn](https://github.com/pion/turn). diff --git a/docs/examples/TLS.md b/docs/examples/TLS.md new file mode 100644 index 00000000..34a55025 --- /dev/null +++ b/docs/examples/TLS.md @@ -0,0 +1,103 @@ +# TLS + +This documentation sums up the TLS and certificate issues you will encounter deploying the examples. + +## The issue + +Some client-side application must work over a secure HTTPS connection, because [getUserMedia](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#browser_compatibility) is available only in secure contexts. This implies that the client-server signaling connection must be secure too. In the demos, we will aim to obtain a proper CA-signed certificate (self-signed certificates haven't been tested). Obtaining a valid TLS certificate is a challenge. Thus, the majority of the installation guides will be about securing client connections to the client-side apps and the WebRTC mediaservers over TLS. Once HTTPS is correctly working, integrating the mediaservers with STUNner is very simple. + +## TLS certificates + +Some WebRTC servers will need a valid TLS cert, which means it must run behind an existing DNS domain name backed by a CA signed TLS certificate. This is simple if you have your own domain, but if you don't, we still have a solution for that. + +> [!NOTE] +> +> By default, the examples and commands snippets assume you don't own a domain. + +### If you don't have your own domain + +[nip.io](https://nip.io) provides a dead simple wildcard DNS for any IP address. We will use this to "own a domain" and obtain a CA signed certificate for the mediaserver. This will allow us to point the domain name `client-.nip.io` to an ingress HTTP gateway in our Kubernetes cluster, which will then use some automation (namely, cert-manager) to obtain a valid CA signed cert. + +### If you have your own domain + +We use `nip.io` to "own a domain" in some examples. To replace it with your own domain, you must locate the corresponding lines in the specific mediaserver's configuration file and overwrite them. + +> [!NOTE] +> +> Although they might look similar, every mediaserver has a different configuration. You might need to (re)configure more things in one mediaserver than another. + +> [!NOTE] +> +> Make sure to set up your Ingress correctly and do not forget to create a new DNS record pointing to your Ingress' IP address! + +## Installation + +### Ingress + +The first step of secured traffic ingestion is obtaining a valid cert by installing a Kubernetes Ingress: this will be used during the validation of our certificates and to terminate client TLS encrypted contexts. + +Install an Ingress controller into your cluster. We used the official [nginx ingress](https://github.com/kubernetes/ingress-nginx), but other Ingress implementations might work (check their documentation for install steps). + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm install ingress-nginx ingress-nginx/ingress-nginx +``` + +Wait until Kubernetes assigns an external IP to the Ingress. + +```console +until [ -n "$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done +``` + +Store the Ingress IP address Kubernetes assigned to our Ingress; this will be needed later when we configure the validation pipeline for our TLS certs. + +```console +kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +export INGRESSIP=$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +export INGRESSIP=$(echo $INGRESSIP | sed 's/\./-/g') +``` + +### Cert manager + +We use the official [cert-manager](https://cert-manager.io) to automate TLS certificate management. + +Add the Helm repository, which contains the cert-manager Helm chart, and install the charts: + +```console +helm repo add cert-manager https://charts.jetstack.io +helm repo update +helm install cert-manager jetstack/cert-manager --namespace cert-manager \ + --create-namespace --set global.leaderElection.namespace=cert-manager \ + --set crds.enabled=true --timeout 600s +``` + +At this point we have all the necessary boilerplate set up to automate TLS issuance for the demo. + +## Troubleshooting + +#### Wildcard DNS domain rate limiting + +Note that public wildcard DNS domains might run into [rate limiting](https://letsencrypt.org/docs/rate-limits/) issues. If this occurs you can try [alternative services](https://moss.sh/free-wildcard-dns-services/) instead of `nip.io`. + +#### Certificate issuance + +If you work with certificates you must be aware that signing a certificate request takes some time and it differs for every CA (certificate authority). If you sense there is a problem with the certificate being signed or issued, you can check it directly and see what is going on. + +First, you'll need to find the certificate and its related resources in your cluster. +```console +kubectl get certificate -A +kubectl get certificaterequests.cert-manager.io -A +kubectl get certificatesigningrequests.certificates.k8s.io +``` + +To find more information about them +```console +kubectl describe certificate -A +kubectl describe certificaterequests.cert-manager.io -A +kubectl describe certificatesigningrequests.certificates.k8s.io +``` + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/benchmark/README.md b/docs/examples/benchmark/README.md index 3b110a64..75497e97 100644 --- a/docs/examples/benchmark/README.md +++ b/docs/examples/benchmark/README.md @@ -1,29 +1,25 @@ # Performance Benchmarking -With the help of this guide you are able to take performance measurements in your setup using STUNner. Both running STUNner locally (outside of Kubernetes) and running STUNner in Kubernetes can be evaluated. +This guide will help you to take performance measurements in your setup using STUNner. +Both running STUNner locally (outside of Kubernetes) and running STUNner in Kubernetes can be evaluated. +You can then compare the locally measured result with the result obtained from Kubernetes and figure out performance bottlenecks. -Compare the locally measured result to the result measured in Kubernetes and figure out the overhead cost. The extra cost of your cluster's networking may surprise you in terms of extra delay or more packet drops using the same bandwidth. - -Locally there is no installation needed, it should take less than a minute to measure. +Locally there is no installation needed, it should take less than a minute to make a measurements. If you have a Kubernetes cluster up and running, the installation and measurement should take a few minutes max. ## Tools The tools used in the measurement are the following: -* `iperf` Using to create traffic flows between the clients and server -* `turncat` Using to open a connection through STUNner to the iperf server -* `STUNner` Acting as a STUN server towards `turncat` clients - -### Caveats - -When measuring latency with `iperf` you might be fooled because it is [measuring one-way latency](https://stackoverflow.com/questions/63793030/iperf2-latency-is-a-two-way-or-one-way-latency) which requires the clocks to be synchronized. This means you might see corrupted latencies such as negative ones. +* `iperf`: Used for creating traffic flows between the clients and the server. +* `turncat`: Used for opening a connection through STUNner to the iperf server. +* `STUNner`: Our TURN server exposed to `turncat` clients. ## Measurement Setup ### Local setup -All the components are running locally. All of them are using `127.0.0.1` addresses. +All the components are running locally using localhost to simulate the network. ![STUNner benchmark local test architecture](../../img/stunner_benchmark_local.svg) @@ -35,7 +31,7 @@ All the components are running locally. All of them are using `127.0.0.1` addres ## Prerequisites -You must have [`iperfv2`](https://iperf.fr), [`jq`](https://stedolan.github.io/jq/) and most importantly [Go](https://go.dev/doc/install) installed locally to run this tutorial. +You must have [`iperfv2`](https://iperf.fr), [`jq`](https://stedolan.github.io/jq/), and [Go](https://go.dev/doc/install) installed locally to run this tutorial. For Kubernetes benchmarks, you also need a running [STUNner installation](/docs/INSTALL.md). ## Install locally @@ -43,17 +39,6 @@ You are good to go. No installation steps required. ## Install on Kubernetes -Install it in case you would like to benchmark your Kubernetes setup. If you want to benchmark locally skip this step. -Note that the benchmarking script does not support the standalone deployment. -Install the STUNner Gateway operator and STUNner ([more info](https://github.com/l7mp/stunner-helm)): - -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator -create-namespace --namespace=stunner-system -helm install stunner stunner/stunner -create-namespace --namespace=stunner -``` - Configure STUNner to act as a STUN server towards [`turncat`](../../cmd/turncat.md) clients, and to let `iperf` client's traffic reach the `iperf` server. ``` @@ -176,10 +161,11 @@ Results Notice that the average packets/second rate will be slightly lower in case of a hosted Kubernetes cluster than in case of a local `STUNner` installation. -## Tips and Tricks +## Caveats -* It is advised to repeat the measurment with different packet sizes. +* It is advised to repeat the measurment with different packet sizes. Recommended packet sizes in bytes are 64, 128, 256, 512, 1024, and 1200. Small packet sizes result lower effective throughput (when packet drop is < 1%). +* Measuring [measuring one-way latency](https://stackoverflow.com/questions/63793030/iperf2-latency-is-a-two-way-or-one-way-latency) with `iperf` requires the clocks at the iperf client and server to be synchronized. Without this the results may be corrupted, and you may even see negative latencies. -Recommended packet sizes in bytes are 64, 128, 256, 512, 1024, and 1200. +# Help -**Effect of packet sizes:** With smallish packets (e.g., 64B), the average packets/second rate will be higher than with largish packets (e.g., 1200B). Small packet sizes result lower effective throughput (when packet drop is < 1%). You should definitely change the arguments to test the performance of your setup ideally. \ No newline at end of file +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/benchmark/benchmark.sh b/docs/examples/benchmark/benchmark.sh index fa61c67a..cb74aceb 100755 --- a/docs/examples/benchmark/benchmark.sh +++ b/docs/examples/benchmark/benchmark.sh @@ -72,16 +72,16 @@ if [[ $platform == "local" ]]; then UDP_ECHO_IP="127.0.0.1" IPERF_PORT="5000" - go run ../../cmd/stunnerd/main.go --log=all:INFO \ + go run ../../../cmd/stunnerd/main.go --log=all:INFO \ turn://${STUNNER_USERNAME}:${STUNNER_PASSWORD}@${STUNNER_PUBLIC_ADDR}:${STUNNER_PUBLIC_PORT} &> /dev/null 2>&1 & iperf -s -p 5000 -u -e > log.tmp 2>&1 & sleep 2 elif [[ $platform == "k8s" ]]; then - STUNNER_PUBLIC_ADDR=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .listeners[0].public_address) - STUNNER_PUBLIC_PORT=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .listeners[0].public_port) - STUNNER_PASSWORD=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .auth.credentials.password) - STUNNER_USERNAME=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .auth.credentials.username) + # STUNNER_PUBLIC_ADDR=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .listeners[0].public_address) + # STUNNER_PUBLIC_PORT=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .listeners[0].public_port) + # STUNNER_PASSWORD=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .auth.credentials.password) + # STUNNER_USERNAME=$(kubectl get cm stunnerd-config -n stunner -o jsonpath='{.data.stunnerd\.conf}' | jq -r .auth.credentials.username) UDP_ECHO_IP=$(kubectl get svc iperf-server -o jsonpath='{.spec.clusterIP}') IPERF_PORT="5000" else @@ -92,8 +92,8 @@ fi for i in $(seq "$num_of_processes"); do port=$((8999+i)) - go run ../../cmd/turncat/main.go --log=all:INFO udp://127.0.0.1:$port \ - turn://"${STUNNER_USERNAME}":"${STUNNER_PASSWORD}"@"${STUNNER_PUBLIC_ADDR}":"${STUNNER_PUBLIC_PORT}" udp://"${UDP_ECHO_IP}":$IPERF_PORT &> /dev/null 2>&1 & + go run ../../../cmd/turncat/main.go --log=all:INFO udp://127.0.0.1:$port \ + k8s://stunner/udp-gateway:udp-listener udp://"${UDP_ECHO_IP}":$IPERF_PORT >/dev/null 2>&1 & done sleep 2 diff --git a/docs/examples/benchmark/performance-stunner.yaml b/docs/examples/benchmark/performance-stunner.yaml index 7f786748..bd6dc97b 100644 --- a/docs/examples/benchmark/performance-stunner.yaml +++ b/docs/examples/benchmark/performance-stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,7 +12,7 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -24,7 +24,7 @@ spec: password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -36,7 +36,7 @@ spec: port: 9001 protocol: UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: iperf-server diff --git a/docs/examples/cloudretro/README.md b/docs/examples/cloudretro/README.md index 15130a57..79797f33 100644 --- a/docs/examples/cloudretro/README.md +++ b/docs/examples/cloudretro/README.md @@ -15,11 +15,7 @@ In this demo you will learn how to: ## Prerequisites -The below installation instructions require an operational cluster running a supported version of -Kubernetes (>1.22). Most hosted or private Kubernetes cluster services will work, but make sure -that the cluster comes with a functional load-balancer integration (all major hosted Kubernetes -services should support this). Otherwise, STUNner will not be able to allocate a public IP address -for clients to reach your WebRTC infra. +See prerequisites [here](../../INSTALL.md#prerequisites). You will need a basic familiarity [with the CloudRetro architecture](https://webrtchacks.com/open-source-cloud-gaming-with-webrtc), especially the concept @@ -66,7 +62,7 @@ the exposed service of the Coordinator, which clients will connect to. Running command will result the IP address assigned by the Kubernetes load-balancer: ```console -export EXTERNAL_IP=$(kubectl get service -n cloudretro coordinator-lb-svc -o jsonpath='{.status.loadBalancer.ingress[0].ip})' +export EXTERNAL_IP=$(kubectl get service -n cloudretro coordinator-lb-svc -o jsonpath='{.status.loadBalancer.ingress[0].ip}') ``` If Kubernetes refuses to assign an external IP to your service after a couple of minutes, you will @@ -79,14 +75,9 @@ the CloudRetro servers running on a private pod IP address. That is where STUNne ### STUNner -Use the official [Helm charts](../../INSTALL.md#installation) to install STUNner. +First we install the stable version of STUNner, please follow the instructions in [this section](../../INSTALL.md#installation-1). -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace stunner-system -helm install stunner stunner/stunner --namespace stunner -``` +Wait until all the necessary resources are up and running, then you are ready to continue. Next, register STUNner with the Kubernetes Gateway API. @@ -94,7 +85,7 @@ Next, register STUNner with the Kubernetes Gateway API. kubectl apply -f stunner-gwcc.yaml ``` -The default configuration uses the `plaintext` STUN/TURN authentication mode with the +The default configuration uses the `static` STUN/TURN authentication mode with the username/password pair `user-1/pass-1`; make sure to [customize](../../AUTH.md) these defaults. Next, we expose the CloudRetro media services over STUNner. The below Gateway specification will @@ -106,7 +97,7 @@ can connect from behind even the most over-zealous enterprise NAT or firewall. ```console kubectl apply -f - < [!NOTE] +> +> If you have your own TLS certificate, put it in a `Secret` [resource](https://kubernetes.io/docs/concepts/configuration/secret/) and deploy it into the `default` namespace under the `nexus-secret-tls` name. + +### STUNner + +Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy (after some minor modifications) the [manifest](livekit-server.yaml) packaged with STUNner. + +To install the stable version of STUNner, please follow the instructions in [this section](../../INSTALL.md#installation-1). + +Configure STUNner to act as a STUN/TURN server to clients, and route all received media to the Nexus pods. + +```console +git clone https://github.com/l7mp/stunner +cd stunner +kubectl apply -f docs/examples/elixir-webrtc/nexus-call-stunner.yaml +``` + +The relevant parts here are the STUNner [Gateway definition](../../GATEWAY.md#gateway), which exposes the STUNner STUN/TURN server over UDP:3478 to the Internet, and the [UDPRoute definition](../../GATEWAY.md#udproute), which takes care of routing media to the pods running the Nexus Gateway service. + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: nexus + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - kind: Service + name: nexus + namespace: default +``` + +Once the Gateway resource is installed into Kubernetes, STUNner will create a Kubernetes LoadBalancer for the Gateway to expose the TURN server on UDP:3478 to clients. It can take up to a minute for Kubernetes to allocate a public external IP for the service. + +Wait until Kubernetes assigns an external IP and store the external IP assigned by Kubernetes to +STUNner in an environment variable for later use. + +```console +until [ -n "$(kubectl get svc udp-gateway -n stunner -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done +export STUNNERIP=$(kubectl get service udp-gateway -n stunner -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` +### Nexus Docker images + +The crucial step of integrating *any* WebRTC media server with STUNner is to ensure that the server instructs the clients to use STUNner as the STUN/TURN server. +Unfortunately, currently the [official Nexus Docker image](ghcr.io/elixir-webrtc/apps/nexus) does not support this configuration in runtime (by default Google's STUN server is hardcoded into it). +Therefore, we have to modify this setting to STUNner's IP and build a new Docker image. + +In order to achieve this, first clone the Elixir WebRTC sample app repository: + +```console +git clone https://github.com/elixir-webrtc/apps/ +cd apps/nexus +``` + +You have to modify the ICE config to use STUNner with the given credentials in two files: + +`assets/js/home.js`: +``` +... +# const pcConfig = { iceServers: [{ urls: 'stun:stun.l.google.com:19302' }] }; + +# change to: + + const pcConfig = { iceServers: [{ urls: 'turn::3478?transport=udp', username: 'user-1', credential: 'pass-1'}], iceTransportPolicy: 'relay' }; +``` + +`lib/nexus/peer.ex`: +``` +... +# ice_servers: [%{urls: "stun:stun.l.google.com:19302"}], + +# change to: + + ice_servers: [%{urls: "turn::3478?transport=udp", "username": "user-1", "credential": "pass-1"}], + ice_transport_policy: :relay, +``` + +Now rebuild the Docker image, and push it into your image repository: +``` +export MYREPO=myrepo # use your own Docker repository name! +sudo docker build -t $MYREPO/nexus . +sudo docker push $MYREPO/nexus +``` + +After uploading the image, you also have to modify the Nexus image repo location in the Kubernetes deployment file. + +```console +sed -i "s/l7mp/$MYREPO/g" docs/examples/elixir-webrtc/nexus-server.yaml +``` + +We also need the Ingress external IP address we have stored previously: this will make sure that the TLS certificate created by cert-manager will be bound to the proper `nip.io` domain and IP address. + +```console +sed -i "s/ingressserviceip/$INGRESSIP/g" docs/examples/elixir-webrtc/nexus-server.yaml +``` + +Finally, fire up Nexus. + +```console +kubectl apply -f docs/examples/elixir-webrtc/nexus-server.yaml +``` + +The demo installation bundle includes a few resources to deploy Nexus: + +- Nexus deployment and service, +- a cluster issuer for the TLS certificates, +- an Ingress resource to terminate the secure connections between your browser and the Kubernetes cluster. + +Wait until all pods become operational and jump right into testing! + +## Test + +After installing everything, execute the following command to retrieve the URL of your freshly deployed Nexus demo app: + +```console +echo INGRESSIP.nip.io +``` + +Copy the URL into your browser, and if everything is set up correctly, you should be able to connect to a video room. If you repeat the procedure in a separate browser tab you can enjoy a nice video-conferencing session with yourself, with the twist that all media between the browser tabs is flowing through STUNner and the Nexus server deployed in you Kubernetes cluster. + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/elixir-webrtc/nexus-call-stunner.yaml b/docs/examples/elixir-webrtc/nexus-call-stunner.yaml new file mode 100644 index 00000000..ed052177 --- /dev/null +++ b/docs/examples/elixir-webrtc/nexus-call-stunner.yaml @@ -0,0 +1,51 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: stunner-gatewayclass +spec: + controllerName: "stunner.l7mp.io/gateway-operator" + parametersRef: + group: "stunner.l7mp.io" + kind: GatewayConfig + name: stunner-gatewayconfig + namespace: stunner + description: "STUNner is a WebRTC ingress gateway for Kubernetes" + +--- +apiVersion: stunner.l7mp.io/v1 +kind: GatewayConfig +metadata: + name: stunner-gatewayconfig + namespace: stunner +spec: + realm: stunner.l7mp.io + authType: static + userName: "user-1" + password: "pass-1" + +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: nexus + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - kind: Service + name: nexus + namespace: default \ No newline at end of file diff --git a/docs/examples/elixir-webrtc/nexus-server.yaml b/docs/examples/elixir-webrtc/nexus-server.yaml new file mode 100644 index 00000000..dd0db130 --- /dev/null +++ b/docs/examples/elixir-webrtc/nexus-server.yaml @@ -0,0 +1,100 @@ +apiVersion: v1 +kind: Service +metadata: + name: nexus + labels: + app.kubernetes.io/name: nexus +spec: + type: ClusterIP + ports: + - port: 4000 + targetPort: 4000 + protocol: TCP + name: http + selector: + app.kubernetes.io/name: nexus +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nexus + labels: + app.kubernetes.io/name: nexus +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: nexus + template: + metadata: + labels: + app.kubernetes.io/name: nexus + spec: + terminationGracePeriodSeconds: 18000 # 5 hours + containers: + - name: nexus + image: "l7mp/l7mp/elixir-webrtc-nexus" + imagePullPolicy: IfNotPresent + env: + - name: SECRET_KEY_BASE + value: f0e01531666a6ccd5b15879a3c00a15f8feaaea86230cf35d96fe0b8e468bc553549e47edb737a4d1530d1c56a3a7a3827443e1bd00aa46adb44dc6aa087ff9f + - name: PHX_SERVER + value: "1" + - name: PHX_HOST + value: ingressserviceip.nip.io + - name: ADMIN_USERNAME + value: admin + - name: ADMIN_PASSWORD + value: admin + - name: ICE_PORT_RANGE + value: 62000-63000 + - name: NEXUS_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - name: http + containerPort: 4000 + protocol: TCP +--- +# Ingress for both Nexus +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nexus + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + ingressClassName: nginx + tls: + - hosts: + - ingressserviceip.nip.io + secretName: nexus-secret-tls + rules: + - host: ingressserviceip.nip.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nexus + port: + number: 4000 +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: info@l7mp.io + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-secret-prod + solvers: + - http01: + ingress: + class: nginx +--- \ No newline at end of file diff --git a/docs/examples/janus/DOCKERFILE-janus-gateway b/docs/examples/janus/DOCKERFILE-janus-gateway new file mode 100644 index 00000000..08b214bc --- /dev/null +++ b/docs/examples/janus/DOCKERFILE-janus-gateway @@ -0,0 +1,123 @@ +FROM debian:bullseye-slim + +RUN apt-get -y update && \ + apt-get install -y \ + libavutil-dev \ + libavformat-dev \ + libavcodec-dev \ + libmicrohttpd-dev \ + libjansson-dev \ + libssl-dev \ + libsofia-sip-ua-dev \ + libglib2.0-dev \ + libopus-dev \ + libogg-dev \ + libcurl4-openssl-dev \ + liblua5.3-dev \ + libconfig-dev \ + libusrsctp-dev \ + libwebsockets-dev \ + libnanomsg-dev \ + librabbitmq-dev \ + pkg-config \ + gengetopt \ + libtool \ + automake \ + build-essential \ + wget \ + git \ + gtk-doc-tools && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + + +RUN cd /tmp && \ + wget https://github.com/cisco/libsrtp/archive/v2.3.0.tar.gz && \ + tar xfv v2.3.0.tar.gz && \ + cd libsrtp-2.3.0 && \ + ./configure --prefix=/usr --enable-openssl && \ + make shared_library && \ + make install + +RUN cd /tmp && \ + git clone https://gitlab.freedesktop.org/libnice/libnice && \ + cd libnice && \ + git checkout 0.1.17 && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make && \ + make install + +COPY . /usr/local/src/janus-gateway + +RUN cd /usr/local/src/janus-gateway && \ + sh autogen.sh && \ + ./configure --enable-post-processing --prefix=/usr/local && \ + make && \ + make install && \ + make configs + +FROM debian:bullseye-slim + +ARG BUILD_DATE="undefined" +ARG GIT_BRANCH="undefined" +ARG GIT_COMMIT="undefined" +ARG VERSION="undefined" + +LABEL build_date=${BUILD_DATE} +LABEL git_branch=${GIT_BRANCH} +LABEL git_commit=${GIT_COMMIT} +LABEL version=${VERSION} + +RUN apt-get -y update && \ + apt-get install -y \ + libmicrohttpd12 \ + libavutil-dev \ + libavformat-dev \ + libavcodec-dev \ + libjansson4 \ + libssl1.1 \ + libsofia-sip-ua0 \ + libglib2.0-0 \ + libopus0 \ + libogg0 \ + libcurl4 \ + liblua5.3-0 \ + libconfig9 \ + libusrsctp1 \ + libwebsockets16 \ + libnanomsg5 \ + librabbitmq4 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=0 /usr/lib/libsrtp2.so.1 /usr/lib/libsrtp2.so.1 +RUN ln -s /usr/lib/libsrtp2.so.1 /usr/lib/libsrtp2.so + +COPY --from=0 /usr/lib/libnice.la /usr/lib/libnice.la +COPY --from=0 /usr/lib/libnice.so.10.10.0 /usr/lib/libnice.so.10.10.0 +RUN ln -s /usr/lib/libnice.so.10.10.0 /usr/lib/libnice.so.10 +RUN ln -s /usr/lib/libnice.so.10.10.0 /usr/lib/libnice.so + +COPY --from=0 /usr/local/bin/janus /usr/local/bin/janus +COPY --from=0 /usr/local/bin/janus-pp-rec /usr/local/bin/janus-pp-rec +COPY --from=0 /usr/local/bin/janus-cfgconv /usr/local/bin/janus-cfgconv +COPY --from=0 /usr/local/etc/janus /usr/local/etc/janus +COPY --from=0 /usr/local/lib/janus /usr/local/lib/janus +COPY --from=0 /usr/local/share/janus /usr/local/share/janus + +ENV BUILD_DATE=${BUILD_DATE} +ENV GIT_BRANCH=${GIT_BRANCH} +ENV GIT_COMMIT=${GIT_COMMIT} +ENV VERSION=${VERSION} + +EXPOSE 10000-10200/udp +EXPOSE 8188 +EXPOSE 8088 +EXPOSE 8089 +EXPOSE 8889 +EXPOSE 8000 +EXPOSE 7088 +EXPOSE 7089 + +CMD ["/usr/local/bin/janus"] \ No newline at end of file diff --git a/docs/examples/janus/DOCKERFILE-janus-web b/docs/examples/janus/DOCKERFILE-janus-web new file mode 100644 index 00000000..7aa5364f --- /dev/null +++ b/docs/examples/janus/DOCKERFILE-janus-web @@ -0,0 +1,2 @@ +FROM nginx:1.27.1 +COPY . /usr/share/nginx/html \ No newline at end of file diff --git a/docs/examples/janus/README.md b/docs/examples/janus/README.md new file mode 100644 index 00000000..8a128704 --- /dev/null +++ b/docs/examples/janus/README.md @@ -0,0 +1,158 @@ +# STUNner demo: Video-conferencing with Janus + +This document guides you through the installation of [Janus](https://janus.conf.meetecho.com/) by [Meetecho](https://www.meetecho.com/en/) into Kubernetes, when it is used together with the STUNner WebRTC media gateway. + +In this demo you will learn to: + +- integrate a typical WebRTC application with STUNner, +- obtain a valid TLS certificate to secure the signaling plane, +- deploy the Janus WebRTC server into Kubernetes, and +- configure STUNner to expose Janus to clients. + +## Prerequisites + +To run this example, you need: +* a [Kubernetes cluster](../../INSTALL.md#prerequisites), +* a [deployed STUNner](../../INSTALL.md#installation-1) (presumably the latest stable version), +* an [Ingress controller](../TLS.md#ingress) to ingest traffic into the cluster, +* a [Cert-manager](../TLS.md#cert-manager) to automate TLS certificate management. + +> [!NOTE] +> +> If you have your own TLS certificate, put it in a `Secret` [resource](https://kubernetes.io/docs/concepts/configuration/secret/) and deploy it into the `default` namespace under the `janus-web-secret-tls` name. + +## Description + +The recommended way (or at least the possible way, [link](https://janus.discourse.group/t/janus-with-kubernetes-demystifying-the-myths/938), [link](https://bugraoz93.medium.com/active-passive-highly-availability-janus-gateway-on-kubernetes-2189256e5525)) to install Janus into Kubernetes is deploying the media servers into the host-network namespace of the Kubernetes nodes (`hostNetwork: true`). This deployment model, however, comes with a set of uncanny [operational limitations and security concerns](../../WHY.md). Using STUNner, however, media servers can be deployed into ordinary Kubernetes pods and run over a private IP network, like any "normal" Kubernetes workload. + +The figure below shows Janus deployed into regular Kubernetes pods behind STUNner without the host-networking hack. Here, Janus is deployed behind STUNner in the [*media-plane deployment model*](../../DEPLOYMENT.md), so that STUNner acts as a "local" STUN/TURN server for Janus, saving the overhead of using public a 3rd party STUN/TURN server for NAT traversal. + +![STUNner Janus integration deployment architecture](../../img/stunner_janus_arch.svg) + +In this tutorial we deploy [Janus Gateway](https://github.com/meetecho/janus-gateway/tree/master) with a set of [preimplemented and packaged server plugins](https://janus.conf.meetecho.com/docs/pluginslist.html) for media exchange, a [Janus Web Demo](https://github.com/meetecho/janus-gateway/tree/master/html), a Kubernetes Ingress gateway to secure signaling connections and handle TLS, and STUNner as a media gateway to expose the Janus server pool to clients. + +### Docker images + +Janus does not come with an official Docker image; thus, we built one using a self-made Dockerfile based on the available documents in the official [Janus repository](https://github.com/meetecho/janus-gateway). Actually, we've made two Dockerfiles. One for the Janus Gateway server and one for the Janus Web Demos. The [Janus Gateway server Dockerfile](./DOCKERFILE-janus-gateway) should be ran in the root directory of the [Janus repository](https://github.com/meetecho/janus-gateway). The Janus Web Demos Dockerfile should be used in the `/html` directory of the [same repository](https://github.com/meetecho/janus-gateway/tree/master/html). The images (`l7mp/janus-gateway:v1.2.4` and `l7mp/janus-web:latest`) used in the following demo are hosted on Docker Hub under the L7MP organization. + +### STUNner + +Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy (after some minor modifications) the [manifest](janus-server.yaml) packaged with STUNner. + +To install the stable version of STUNner, please follow the instructions in [this section](../../INSTALL.md#installation-1). + +Configure STUNner to act as a STUN/TURN server to clients, and route all received media to the Janus Gateway pods. + +```console +git clone https://github.com/l7mp/stunner +cd stunner +kubectl apply -f docs/examples/janus/janus-call-stunner.yaml +``` + +The relevant parts here are the STUNner [Gateway definition](../../GATEWAY.md#gateway), which exposes the STUNner STUN/TURN server over UDP:3478 to the Internet, and the [UDPRoute definition](../../GATEWAY.md#udproute), which takes care of routing media to the pods running the Janus Gateway service. + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: janus + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - kind: Service + name: janus-gateway + namespace: default +``` + +Once the Gateway resource is installed into Kubernetes, STUNner will create a Kubernetes LoadBalancer for the Gateway to expose the TURN server on UDP:3478 to clients. It can take up to a minute for Kubernetes to allocate a public external IP for the service. + +Wait until Kubernetes assigns an external IP and store the external IP assigned by Kubernetes to +STUNner in an environment variable for later use. + +```console +until [ -n "$(kubectl get svc udp-gateway -n stunner -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done +export STUNNERIP=$(kubectl get service udp-gateway -n stunner -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` + +### Janus + +The crucial step of integrating *any* WebRTC media server with STUNner is to ensure that the server instructs the clients to use STUNner as the STUN/TURN server. In order to achieve this, first we patch the public IP address of the STUNner STUN/TURN server we have learned above into our Janus Web and Gateway deployment manifests: + +```console +sed -i "s/stunner_ip/$STUNNERIP/g" docs/examples/janus/janus-server.yaml +``` + +Janus Web tells the connected clients where to look for the Janus Gateway server and which ICE servers should be used for ICE negotiation. Assuming that Kubernetes assigns the IP address 1.2.3.4 to STUNner (i.e., `STUNNERIP=1.2.3.4`), the relevant part of the Janus Web config would be something like the below: + +```yaml +... + settings.js: | + var server = "wss://server-$INGRESSIP.nip.io" + var iceServers = [{urls: "turn:1.2.3.4:3478?transport=udp", username: "user-1", credential: "pass-1"}] +``` + +This will make sure that Janus Web tells the clients to use STUNner as the STUN/TURN server. If unsure about the STUNner settings to use, you can always use the handy [`stunnerctl` CLI tool](/cmd/stunnerctl/README.md) to dump the running STUNner configuration. + +``` console +stunnerctl -n stunner config udp-gateway +Gateway: stunner/udp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 +Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.88.91:3478 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.4, 10.80.4.47] +``` + +Note that Janus itself will not use STUNner as a TURN server (that would amount to a less efficient [symmetric ICE mode](../../DEPLOYMENT.md)); with the above configuration we are just telling Janus Web to instruct its clients to use STUNner to reach the Janus Gateway server. + +We also need the Ingress external IP address we have stored previously: this will make sure that the TLS certificate created by cert-manager will be bound to the proper `nip.io` domain and IP address. + +```console +sed -i "s/ingressserviceip/$INGRESSIP/g" docs/examples/janus/janus-server.yaml +``` + +Finally, fire up Janus. + +```console +kubectl apply -f docs/examples/janus/janus-server.yaml +``` + +The demo installation bundle includes a lot of resources to deploy Janus: + +- a Janus Gateway server, +- a web server serving the landing page using [Janus Web Demos](https://github.com/meetecho/janus-gateway/tree/master/html) +- a cluster issuer for the TLS certificates, +- an Ingress resource to terminate the secure connections between your browser and the Kubernetes cluster. + +Wait until all pods become operational and jump right into testing! + +## Test + +After installing everything, execute the following command to retrieve the URL of your freshly deployed Janus demo app: + +```console +echo client-$INGRESSIP.nip.io +``` + +Copy the URL into your browser, and now you should be greeted with the About page. On the landing page navigate to the Video call plugin demo (`/demos/videocall.html`). Duplicate the tab and register two users in the system and make a call. If everything is set up correctly, you should be able to connect to a room. If you repeat the procedure in a separate browser tab you can enjoy a nice video-conferencing session with yourself, with the twist that all media between the browser tabs is flowing through STUNner and the Janus Gateway server deployed in you Kubernetes cluster. + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/janus/janus-call-stunner.yaml b/docs/examples/janus/janus-call-stunner.yaml new file mode 100644 index 00000000..df5d4e09 --- /dev/null +++ b/docs/examples/janus/janus-call-stunner.yaml @@ -0,0 +1,51 @@ +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: stunner-gatewayclass +spec: + controllerName: "stunner.l7mp.io/gateway-operator" + parametersRef: + group: "stunner.l7mp.io" + kind: GatewayConfig + name: stunner-gatewayconfig + namespace: stunner + description: "STUNner is a WebRTC ingress gateway for Kubernetes" + +--- +apiVersion: stunner.l7mp.io/v1 +kind: GatewayConfig +metadata: + name: stunner-gatewayconfig + namespace: stunner +spec: + realm: stunner.l7mp.io + authType: static + userName: "user-1" + password: "pass-1" + +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: janus + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - kind: Service + name: janus-gateway + namespace: default \ No newline at end of file diff --git a/docs/examples/janus/janus-server.yaml b/docs/examples/janus/janus-server.yaml new file mode 100644 index 00000000..1d6a6993 --- /dev/null +++ b/docs/examples/janus/janus-server.yaml @@ -0,0 +1,235 @@ +# Janus Gateway +apiVersion: v1 +kind: ConfigMap +metadata: + name: janus-gateway +data: + janus.jcfg: | + general: { + configs_folder = "@confdir@" + plugins_folder = "@plugindir@" + transports_folder = "@transportdir@" + events_folder = "@eventdir@" + loggers_folder = "@loggerdir@" + debug_level = 4 + admin_secret = "janusoverlord" + protected_folders = [ + "/bin", + "/boot", + "/dev", + "/etc", + "/initrd", + "/lib", + "/lib32", + "/lib64", + "/proc", + "/sbin", + "/sys", + "/usr", + "/var", + "/opt/janus/bin", + "/opt/janus/etc", + "/opt/janus/include", + "/opt/janus/lib", + "/opt/janus/lib32", + "/opt/janus/lib64", + "/opt/janus/sbin" + } + certificates: { + } + media: { + } + nat: { + nice_debug = false + ice_ignore_list = "vmnet" + plugins: { + + } + transports: { + + } + loggers: { + + } + events: { + } +--- +apiVersion: v1 +kind: Service +metadata: + name: janus-gateway + labels: + app.kubernetes.io/name: janus-gateway + app.kubernetes.io/instance: janus + app.kubernetes.io/version: "v1.2.4" +spec: + type: ClusterIP + ports: + - port: 8088 + targetPort: 8088 + protocol: TCP + name: http + - port: 8188 + targetPort: 8188 + protocol: TCP + name: websocket + selector: + app.kubernetes.io/name: janus-gateway + app.kubernetes.io/instance: janus +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: janus-gateway + labels: + app.kubernetes.io/name: janus-gateway + app.kubernetes.io/instance: janus + app.kubernetes.io/version: "v1.2.4" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: janus-gateway + app.kubernetes.io/instance: janus + template: + metadata: + labels: + app.kubernetes.io/name: janus-gateway + app.kubernetes.io/instance: janus + spec: + terminationGracePeriodSeconds: 18000 # 5 hours + containers: + - name: janus-gateway + image: "l7mp/janus-gateway:v1.2.4" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8088 + protocol: TCP + - name: websocket + containerPort: 8188 + protocol: TCP +--- +# Janus Web Demos +apiVersion: v1 +kind: ConfigMap +metadata: + name: janus-web +data: + settings.js: | + var server = "wss://server-ingressserviceip.nip.io" + var iceServers = [{urls: "turn:stunner_ip:3478?transport=udp", username: "user-1", credential: "pass-1"}] +--- +apiVersion: v1 +kind: Service +metadata: + name: janus-web + labels: + app.kubernetes.io/name: janus-web + app.kubernetes.io/instance: janus-web + app.kubernetes.io/version: "v1.2.4" +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app.kubernetes.io/name: janus-web + app.kubernetes.io/instance: janus-web +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: janus-web + labels: + app.kubernetes.io/name: janus-web + app.kubernetes.io/instance: janus-web + app.kubernetes.io/version: "v1.2.4" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: janus-web + app.kubernetes.io/instance: janus-web + template: + metadata: + labels: + app.kubernetes.io/name: janus-web + app.kubernetes.io/instance: janus-web + spec: + containers: + - name: janus-web + image: "l7mp/janus-web:latest" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 80 + protocol: TCP + volumeMounts: + - name: config + mountPath: /usr/share/nginx/html/demos/settings.js + subPath: settings.js + volumes: + - name: config + configMap: + name: janus-web + items: + - key: settings.js + path: settings.js +--- +# Ingress for both Janus Gateway and Janus Web Demos +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: janus-web-demos + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/ssl-redirect: "true" +spec: + ingressClassName: nginx + tls: + - hosts: + - client-ingressserviceip.nip.io + - server-ingressserviceip.nip.io + secretName: janus-web-secret-tls + rules: + - host: server-ingressserviceip.nip.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: janus-gateway + port: + number: 8188 + - host: client-ingressserviceip.nip.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: janus-web + port: + number: 80 +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: info@l7mp.io + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-secret-prod + solvers: + - http01: + ingress: + class: nginx +--- \ No newline at end of file diff --git a/docs/examples/jitsi/README.md b/docs/examples/jitsi/README.md index 742fd38f..aa779736 100644 --- a/docs/examples/jitsi/README.md +++ b/docs/examples/jitsi/README.md @@ -11,11 +11,17 @@ In this demo you will learn to: ## Prerequisites -The tutorial assumes a fresh STUNner installation; see the STUNner installation and configuration guide. Create a namespace called stunner if there is none. You need a WebRTC-compatible browser to run this tutorial. Basically any modern browser will do; we usually test our WebRTC applications with Firefox and Chrome. +To run this example, you need: +* a [Kubernetes cluster](../../INSTALL.md#prerequisites), +* a [deployed STUNner](../../INSTALL.md#installation-1) (presumably the latest stable version), +* an [Ingress controller](../TLS.md#ingress) to ingest traffic into the cluster, +* a [Cert-manager](../TLS.md#cert-manager) to automate TLS certificate management. -As a regrettable exception, Minikube is unfortunately not supported for this demo. The reason is that [Let's Encrypt certificate issuance is not available with nip.io](https://medium.com/@EmiiKhaos/there-is-no-possibility-that-you-can-get-lets-encrypt-certificate-with-nip-io-7483663e0c1b); late on you will learn more about why this is crucial above. +> [!NOTE] +> +> If you have your own TLS certificate, put it in a `Secret` [resource](https://kubernetes.io/docs/concepts/configuration/secret/) and deploy it into the `default` namespace under the `jitsi-secret` name. -## Setup +## Description The recommended way to install Jitsi into Kubernetes is deploying the media servers into the host-network namespace of the Kubernetes nodes (`hostNetwork: true`), or using a NodePort service or a dedicated Ingress to ingest WebRTC media traffic into the network. However, these options allow only one JVB instance per Kubernetes node and the host-network deployment model also comes with a set of uncanny [operational limitations and security concerns](../../WHY.md). Using STUNner, however, media servers can be deployed into ordinary Kubernetes pods and run over a private IP network, like any "normal" Kubernetes workload. @@ -25,79 +31,11 @@ The figure below shows Jitsi deployed into regular Kubernetes pods behind STUNne In this tutorial we deploy a video room example using the [Jitsi framework](https://jitsi.github.io/handbook/docs/architecture) for media exchange, a Kubernetes Ingress gateway to secure signaling connections and handle TLS, and STUNner as a media gateway to expose the Jitsi JVB to clients. -## Installation - -Let's start with a disclaimer. The Jitsi client example browser must work over a secure HTTPS connection, because [getUserMedia](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#browser_compatibility) is available only in secure contexts. This implies that the client-server signaling connection must be secure too. Unfortunately, self-signed TLS certs will not work, so we have to come up with a way to provide our clients with a valid TLS cert. - -In the below example, STUNner will be installed into the identically named namespace, while Jitsi and the Ingress gateway will live in the default namespace. - -### TLS certificates - -As mentioned above, the Jitsi server will need a valid TLS cert, which means it must run behind an existing DNS domain name backed by a CA signed TLS certificate. This is simple if you have your own domain, but if you don't then [nip.io](https://nip.io/) provides a dead simple wildcard DNS for any IP address. We will use this to "own a domain" and obtain a CA signed certificate for Jitsi. This will allow us to point the domain name `client-.nip.io` to an ingress HTTP gateway in our Kubernetes cluster, which will then use some automation (namely, cert-manager) to obtain a valid CA signed cert. - -Note that public wildcard DNS domains might run into [rate limiting](https://letsencrypt.org/docs/rate-limits/) issues. If this occurs you can try [alternative services](https://moss.sh/free-wildcard-dns-services/) instead of nip.io. - -### Ingress - -The first step of obtaining a valid cert is to install a Kubernetes Ingress: this will be used during the validation of our certificates and to terminate client TLS encrypted contexts. - -Install an ingress controller into your cluster. We used the official [nginx ingress](https://github.com/kubernetes/ingress-nginx), but this is not required. - -```console -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm install ingress-nginx ingress-nginx/ingress-nginx -``` - -Wait until Kubernetes assigns an external IP to the Ingress. - -```console -until [ -n "$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -``` - -Store the Ingress IP address Kubernetes assigned to our Ingress; this will be needed later when we configure the validation pipeline for our TLS certs. - -```console -kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -export INGRESSIP=$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -export INGRESSIP=$(echo $INGRESSIP | sed 's/\./-/g') -``` - -### Cert manger - -We use the official [cert-manager](https://cert-manager.io) to automate TLS certificate management. - -First, install cert-manager's CRDs. - -```console -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.crds.yaml -``` - -Then add the Helm repository, which contains the cert-manager Helm chart, and install the charts: - -```console -helm repo add cert-manager https://charts.jetstack.io -helm repo update -helm install my-cert-manager cert-manager/cert-manager \ - --create-namespace \ - --namespace cert-manager \ - --version v1.8.0 -``` - -At this point we have all the necessary boilerplate set up to automate TLS issuance for Jitsi. - ### STUNner -Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy the [manifest](jitsi-server.yaml) packaged with STUNner. +Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy (after some minor modifications) the [manifest](jitsi-server.yaml) packaged with STUNner. -Install the STUNner gateway operator and STUNner via [Helm](https://github.com/l7mp/stunner-helm): - -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system -helm install stunner stunner/stunner --create-namespace --namespace=stunner -``` +To install the stable version of STUNner, please follow the instructions in [this section](../../INSTALL.md#installation-1). Configure STUNner to act as a STUN/TURN server to clients, and route all received media to the Jitsi server pods. @@ -110,16 +48,16 @@ kubectl apply -f docs/examples/jitsi/jitsi-call-stunner.yaml The relevant parts here are the STUNner [Gateway definition](../../GATEWAY.md), which exposes the STUNner STUN/TURN server over UDP:3478 to the Internet, and the [UDPRoute definition](../../GATEWAY.md), which takes care of routing media to the pods running the Jitsi service. Also, with the GatewayConfig object we set the `authType: longterm` parameter because Prosody can't use Plaintext authentication only long term. ```yaml -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig namespace: stunner spec: - authType: longterm + authType: ephemeral sharedSecret: "my-shared-secret" --- -apiVersion: gateway.networking.k8s.io/v1beta1 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -129,9 +67,9 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: jitsi-media-plane @@ -142,6 +80,7 @@ spec: rules: - backendRefs: - name: jitsi-jvb + namespace: default ``` Once the Gateway resource is installed into Kubernetes, STUNner will create a Kubernetes LoadBalancer for the Gateway to expose the TURN server on UDP:3478 to clients. It can take up to a minute for Kubernetes to allocate a public external IP for the service. @@ -201,3 +140,7 @@ echo $INGRESSIP.nip.io ``` Copy the URL into your browser, and now you should be greeted with the Jitsi webpage. In the landing page you should create a room first. After you created a room you can set your username and join the room. On another page you have to open this page again and you should see the previously created room in the list. You only have to connect this room with another user. + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/jitsi/jitsi-call-stunner.yaml b/docs/examples/jitsi/jitsi-call-stunner.yaml index c72534e9..8ef8e935 100644 --- a/docs/examples/jitsi/jitsi-call-stunner.yaml +++ b/docs/examples/jitsi/jitsi-call-stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -11,16 +11,16 @@ spec: namespace: stunner description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig namespace: stunner spec: - authType: longterm + authType: ephemeral sharedSecret: "my-shared-secret" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -30,9 +30,9 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: jitsi-media-plane @@ -43,4 +43,4 @@ spec: rules: - backendRefs: - name: jitsi-jvb - namespace: default \ No newline at end of file + namespace: default diff --git a/docs/examples/kurento-magic-mirror/README.md b/docs/examples/kurento-magic-mirror/README.md index 4ef29050..dbffe576 100644 --- a/docs/examples/kurento-magic-mirror/README.md +++ b/docs/examples/kurento-magic-mirror/README.md @@ -79,3 +79,7 @@ health-checks and load-balancing should just work as expected. ```console $ kubectl scale deployment kms --replicas=4 ``` + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/kurento-magic-mirror/kurento-magic-mirror-stunner.yaml b/docs/examples/kurento-magic-mirror/kurento-magic-mirror-stunner.yaml index 08d108dd..333f1e7c 100644 --- a/docs/examples/kurento-magic-mirror/kurento-magic-mirror-stunner.yaml +++ b/docs/examples/kurento-magic-mirror/kurento-magic-mirror-stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,7 +12,7 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -24,7 +24,7 @@ spec: password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -37,7 +37,7 @@ spec: protocol: UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: media-plane diff --git a/docs/examples/kurento-one2one-call/README.md b/docs/examples/kurento-one2one-call/README.md index b90f6c49..bb5dc3a9 100644 --- a/docs/examples/kurento-one2one-call/README.md +++ b/docs/examples/kurento-one2one-call/README.md @@ -2,9 +2,7 @@ ## Media-plane mode: One to one video call with Kurento via STUNner -This tutorial demonstrates the *media-plane deployment model* of STUNner, that is, when WebRTC -clients connect to each other via the Kurento media server deployed into Kubernetes. The media -servers are exposed to clients via a STUNner gateway. +This tutorial demonstrates the *media-plane deployment model* of STUNner, that is, when WebRTC clients connect to each other via the Kurento media server deployed into Kubernetes. The media servers are exposed to clients via a STUNner gateway. In this demo you will learn how to: @@ -14,104 +12,37 @@ In this demo you will learn how to: - configure STUNner to expose Kurento to clients, and - update the STUN/TURN credentials used by STUNner to improve security. - ## Installation ### Prerequisites -The tutorial assumes a fresh STUNner installation; see the [STUNner installation and configuration -guide](../../INSTALL.md). Create a namespace called `stunner` if there is none. You need a -WebRTC-compatible browser to run this tutorial. Basically any modern browser will do; we usually -test our WebRTC applications with Firefox. +The tutorial assumes a fresh STUNner installation; see the [STUNner installation and configuration guide](../../INSTALL.md). Create a namespace called `stunner` if there is none. You need a WebRTC-compatible browser to run this tutorial. Basically any modern browser will do; we usually test our WebRTC applications with Firefox. ### Setup -The tutorial has been adopted from the [Kurento](https://www.kurento.org/) [one-to-one video call -tutorial](https://doc-kurento.readthedocs.io/en/latest/tutorials/node/tutorial-one2one.html), with -minimal -[modifications](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call) to -deploy it into Kubernetes and integrate it with STUNner. We will deploy a -[Node.js](https://nodejs.org) application server into Kubernetes for creating a browser-based -two-party WebRTC video-calls, the Kurento media server for media exchange and, potentially, -automatic audio/video transcoding, and configure STUNner to expose the Kurento server pool to -clients. +The tutorial has been adopted from the [Kurento](https://www.kurento.org/) [one-to-one video call tutorial](https://doc-kurento.readthedocs.io/en/latest/tutorials/node/tutorial-one2one.html), with minimal [modifications](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call) to deploy it into Kubernetes and integrate it with STUNner. We will deploy a [Node.js](https://nodejs.org) application server into Kubernetes for creating a browser-based two-party WebRTC video-calls, the Kurento media server for media exchange and, potentially, automatic audio/video transcoding, and configure STUNner to expose the Kurento server pool to clients. ![STUNner media-plane deployment architecture](../../img/stunner_kurento_one2one_arch.svg) ### Application server -The application server implements a simple JSON/WebSocket API two browser clients can call to -establish a two-party call. The caller and the callee will connect to each other via STUNner as -the TURN server, using the Kurento media server to mediate audio/video calls. - -As the first step, each client registers a unique username with the application server by sending a -`register` message, which the server acknowledges in a `registerResponse` message. To start a call, -the caller sets up a [WebRTC -PeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection), generates an -SDP Offer, and sends it along to the application server in a `call` message. The application server -rings the callee in an `incomingCall` message. If accepting the call, the callee sets up a WebRTC -PeerConnection, generates an SDP Offer and sends it back the application server in an -`incomingCallResponse` message. - -At this point the application server has both party's SDP Offer, so the next step is to set up the -media pipeline in Kurento and process the SDP Offers through the media server. This is done by -opening a WebSocket connection to the URI `ws://kms.default.svc.cluster.local:8888/kurento` -(this is set on the server's command line in the Deployment -[manifest](kurento-one2one-call-server.yaml)): here, -`kms.default.svc.cluster.local` is the DNS name assigned by Kubernetes to the `kms` service -(recall, this is the service associated with the media server Deployment) and Kurento listens on -the TCP port 8888 for control connections. Note that this call gets load-balanced through the -Kubernetes CNI's service load-balancer so it will hit one random media server replica (if there are -more). This ensures that new calls are distributed evenly across media servers. - -The media server responds with an SDP Answer for both the callee and the caller, which the -application server immediately sends back to the appropriate clients: the caller receives the SDP -Answer in a `callResponse` message while the callee receives it in a `startCommunication` -message. Meanwhile, the caller and the callee generate local ICE candidates and send them over to -the application server, which forwards the candidates to Kurento. Kurento also generates a set of -ICE candidates, these will be passed by the application server to the clients. Note that the media -will flow between the clients and Kurento; the application server is there just to mediate the call -setup. Once the ICE process connects, the caller and the callee start to exchange audio/video -frames with Kurento via STUNner, until one of the parties sends a `stop` message to the application -server to terminate the call. For more info on the application server protocol, consult the -[Kurento -documentation](https://doc-kurento.readthedocs.io/en/latest/tutorials/node/tutorial-one2one.html). - -In order to start the ICE conversation using STUNner as the STUN/TURN server, the browsers will -need to learn an ICE server configuration from the application server with STUNner's external IP -addresses/ports and the required STUN/TURN credentials. This must happen *before* the -PeerConnection is created in the clients: once the PeerConnection is running we can no longer -change the ICE configuration. - -We solve this problem by (1) generating a new ICE configuration every time a new client registers -with the application server and (2) sending the ICE configuration back to the client in the -`regiterResponse` message. Note that this choice is suboptimal for time-locked STUNner -authentication modes (i.e., the `ephemeral` mode, see below), because clients' STUN/TURN -credentials might expire by the time they decide to connect. It is up to the application server -developer to make sure that clients' ICE server configuration is periodically updated. - -The default STUNner [install](/doc/INSTALL.md) contains a utility called the [STUNner -authentication service](https://github.com/l7mp/stunner-auth-service) that is purposed specifically -to generate ICE configurations for the application server. The service watches the running STUNner -configuration(s) from the Kubernetes API server and makes sure to generate STUN/TURN credentials -and ICE server configuration from the most recent STUNner config. - -The full application server code can be found -[here](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call); below we -summarize the most important steps needed to call the STUNner authentication service in the -application to generate an ICE config for each client. - -1. Define the address and the port of the STUNner authentication service as environment variables - for the application server pod. This will allow the application server to query the STUNner - authentication server for TURN credentials. By default, the authentication service is available - at the address `stunner-auth.stunner-system.svc.cluster.local` on port TCP 8088 over HTTP, these - defaults can be overridden using the `STUNNER_AUTH_ADDR` and `STUNNER_AUTH_PORT` environment - variables. The ICE configuration returned by the auth service will contain an URI for each - public STUNner Gateway: you can filter on particular Kubernetes namespaces, gateways or gateway - listeners by the `STUNNER_NAMESPACE`, `STUNNER_GATEWAY` and `STUNNER_LISTENER` environment - variables. Since WebRTC media will be ingested into he cluster over the UDP listener called - `udp-listener` on the STUNner Gateway called `udp-gateway` deployed into the `stunner` - namespace, the Kubernetes manifest for the application server will look like the below. +The application server implements a simple JSON/WebSocket API for two browser clients to establish a two-party call. The caller and the callee will connect to each other via STUNner as the TURN server, using the Kurento media server to mediate audio/video calls. + +As the first step, each client registers a unique username with the application server by sending a `register` message, which the server acknowledges in a `registerResponse` message. To start a call, the caller sets up a [WebRTC PeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection), generates an SDP Offer, and sends it along to the application server in a `call` message. The application server rings the callee in an `incomingCall` message. If accepting the call, the callee sets up a WebRTC PeerConnection, generates an SDP Offer and sends it back the application server in an `incomingCallResponse` message. + +At this point the application server has both party's SDP Offer, so the next step is to set up the media pipeline in Kurento and process the SDP Offers through the media server. This is done by opening a WebSocket connection to the URI `ws://kms.default.svc.cluster.local:8888/kurento` (this is set on the server's command line in the Deployment [manifest](kurento-one2one-call-server.yaml)): here, `kms.default.svc.cluster.local` is the DNS name assigned by Kubernetes to the `kms` service and Kurento listens on the TCP port 8888 for control connections. Note that this call gets load-balanced through the Kubernetes CNI's service load-balancer so it will hit one random media server replica (if there are more). This ensures that new calls are distributed evenly across media servers. + +The media server responds with an SDP Answer for both the callee and the caller, which the application server immediately sends back to the appropriate clients: the caller receives the SDP Answer in a `callResponse` message while the callee receives it in a `startCommunication` message. Meanwhile, the caller and the callee generate local ICE candidates and send them over to the application server, which forwards the candidates to Kurento. Kurento also generates a set of ICE candidates, these will be passed by the application server to the clients. Note that the media will flow between the clients and Kurento; the application server is there just to mediate the call setup. Once the ICE process connects, the caller and the callee start to exchange audio/video frames with Kurento via STUNner, until one of the parties sends a `stop` message to the application server to terminate the call. For more info on the application server protocol, consult the [Kurento documentation](https://doc-kurento.readthedocs.io/en/latest/tutorials/node/tutorial-one2one.html). + +In order to start the ICE conversation using STUNner as the STUN/TURN server, the browsers will need to learn an ICE server configuration from the application server with STUNner's external IP addresses/ports and STUN/TURN credentials. This must happen *before* the PeerConnection is created in the clients: once the PeerConnection is running we can no longer change the ICE configuration. + +We solve this problem by (1) generating a new ICE configuration every time a new client registers with the application server and (2) sending the ICE configuration back to the client in the `registerResponse` message. Note that this choice is suboptimal for [time-locked STUNner authentication modes](/doc/AUTH.md) (i.e., the `ephemeral` mode, see below), because clients' STUN/TURN credentials might expire by the time they decide to connect. It is up to the application server developer to make sure that clients' ICE server configuration is periodically updated. + +The default STUNner [install](/doc/INSTALL.md) contains a utility called the [STUNner authentication service](https://github.com/l7mp/stunner-auth-service) that is purposed specifically to generate ICE configurations for the application server. The service watches the running STUNner configuration(s) from the Kubernetes API server and makes sure to generate STUN/TURN credentials and ICE server configuration from the most recent STUNner config. + +The full application server code can be found [here](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call); below we summarize the most important steps needed to call the STUNner authentication service in the application to generate an ICE config for each client. + +1. Define the address and the port of the STUNner authentication service as environment variables for the application server pod. This will allow the application server to query the STUNner authentication server for TURN credentials. By default, the authentication service is available at the address `stunner-auth.stunner-system.svc.cluster.local` on port TCP 8088 over HTTP, these defaults can be overridden using the `STUNNER_AUTH_ADDR` and `STUNNER_AUTH_PORT` environment variables. The ICE configuration returned by the auth service will contain an URI for each public STUNner Gateway: you can filter on particular Kubernetes namespaces, gateways or gateway listeners by the `STUNNER_NAMESPACE`, `STUNNER_GATEWAY` and `STUNNER_LISTENER` environment variables. Since WebRTC media will be ingested into he cluster over the UDP listener called `udp-listener` on the STUNner Gateway called `udp-gateway` deployed into the `stunner` namespace, the Kubernetes manifest for the application server will look like the below. ```yaml apiVersion: apps/v1 @@ -142,10 +73,7 @@ application to generate an ICE config for each client. [...] ``` -1. Modify the application server code to query the STUNner authentication server every time a a - valid ICE config in needed. In particular, the code will return the ICE configuration before - returning a `registerResponse` to the client, so that the generated ICE configuration can be - piggy-backed on the response message. +1. Modify the application server code to query the STUNner authentication server every time a a valid ICE config in needed. In particular, the code will return the ICE configuration before returning a `registerResponse` to the client, so that the generated ICE configuration can be piggy-backed on the response message. ```js function register(id, name, ws, callback) { @@ -183,10 +111,7 @@ application to generate an ICE config for each client. } ``` -1. Next, modify the [client-side JavaScript - code](https://github.com/l7mp/kurento-tutorial-node/blob/master/kurento-one2one-call/static/js/index.js) - to parse the ICE configuration received from the application server from the `registerResponse` - message. +1. Next, modify the [client-side JavaScript code](https://github.com/l7mp/kurento-tutorial-node/blob/master/kurento-one2one-call/static/js/index.js) to parse the ICE configuration received from the application server from the `registerResponse` message. ```js var iceConfiguration; @@ -199,12 +124,7 @@ application to generate an ICE config for each client. } ``` -1. Then, every time the client calls the [PeerConnection - constructor](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection), - pass in the stored [ICE - configuration](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceServer). Note that - `kurentoUtils.WebRtcPeer.WebRtcPeerSendrecv` is a small wrapper that makes it more convenient to - create PeerConnections with Kurento. +1. Then, every time the client calls the [PeerConnection constructor](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/RTCPeerConnection), pass in the stored [ICE configuration](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceServer). Note that `kurentoUtils.WebRtcPeer.WebRtcPeerSendrecv` is a small wrapper that makes it more convenient to create PeerConnections with Kurento. ```js var options = { @@ -215,20 +135,13 @@ application to generate an ICE config for each client. webRtcPeer = kurentoUtils.WebRtcPeer.WebRtcPeerSendrecv(options, ...); ``` -You can build the application server container locally from the tutorial -[repo](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call), or you can -use the below manifest to fire up the prebuilt container image in a single step. This will deploy -the application server into the `stunner` namespace and exposes it in the Kubernetes LoadBalancer -service called `webrtc-server`. +You can build the application server container locally from the tutorial [repo](https://github.com/l7mp/kurento-tutorial-node/tree/master/kurento-one2one-call), or you can use the below manifest to fire up the prebuilt container image in a single step. This will deploy the application server into the `stunner` namespace and exposes it in the Kubernetes LoadBalancer service called `webrtc-server`. ```console kubectl apply -f docs/examples/kurento-one2one-call/kurento-one2one-call-server.yaml ``` -Note that we disable STUN/TURN in Kurento: STUNner will make sure that your media servers will not -need to use NAT traversal (despite running with a private IP) and everything should still work just -fine. Below is the corresponding snippet from the [manifest](kurento-one2one-call-server.yaml), which sets -Kurento's environment variables accordingly: +Note that we disable STUN/TURN in Kurento: STUNner will make sure that your media servers will not need to use NAT traversal (despite running with a private IP) and everything should still work just fine. Below is the corresponding snippet from the [manifest](kurento-one2one-call-server.yaml), which sets Kurento's environment variables accordingly: ```yaml spec: @@ -249,27 +162,20 @@ spec: [...] ``` -And that's all. We added roughly 10-20 lines of fairly trivial code to the Kurento demo to make it -work with STUNner, with most of the changes needed to return the public STUN/TURN URI and -credentials to clients. If you allocate STUNner to a stable IP and domain name, you don't even need -to modify *anything* in the demo and it will just work. +And that's all. We added roughly 10-20 lines of fairly trivial code to the Kurento demo to make it work with STUNner, with most of the changes needed to return the public STUN/TURN URI and credentials to clients. If you allocate STUNner to a stable IP and domain name, you don't even need to modify *anything* in the demo and it will just work. ### STUNner configuration -Next, we deploy STUNner into the Kubernetes. The manifest below will set up a minimal STUNner -gateway hierarchy to do just that: the setup includes a Gateway listener at UDP:3478 and a -UDPRoute to forward incoming calls into the cluster. +Next, we deploy STUNner into Kubernetes. The manifest below will set up a minimal STUNner gateway hierarchy to do just that: the setup includes a Gateway listener at UDP:3478 and a UDPRoute to forward incoming calls into the cluster. ```console kubectl apply -f docs/examples/kurento-one2one-call/kurento-one2one-call-stunner.yaml ``` -The most important component in the STUNner configuration is the TURN Gateway: this will expose a -public TURN server on the UDP port 3478 through which clients will connect to the Kurento media -server pods. +The most important component in the STUNner configuration is the TURN Gateway: this will expose a public TURN server on the UDP port 3478 through which clients will connect to the Kurento media server pods. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -279,25 +185,18 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP ``` -In order to realize the media-plane deployment model we set the `kms` service, which wraps the -Kurento media server deployment, as the target in the UDPRoute. Note that the target service lives -in another namespace (the UDPRoute is in `stunner` whereas the `kms` service is in the `default` -namespace), STUNner will still be able to forward connections (this is a small departure from the -[Kubernetes Gateway API](https://gateway-api.sigs.k8s.io) spec, which requires you to install a -TargetRef into the target namespace; currently STUNner ignores this for simplicity). The rest, that -is, cross-connecting the clients' media streams with Kurento's WebRTC endpoints, is just pure TURN -magic. +We set the `kms` service, which wraps the Kurento media server deployment, as the target in the UDPRoute. Note that the target service lives in another namespace (the UDPRoute is in `stunner` whereas the `kms` service is in the `default` namespace), STUNner will still be able to forward connections (this is a small departure from the [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io) spec). The rest, that is, connecting clients' media streams to Kurento's WebRTC endpoints is just pure TURN magic. Below is the corresponding UDPRoute. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: - name: stunner-headless + name: kms-media-plane namespace: stunner spec: parentRefs: @@ -311,172 +210,115 @@ spec: ### Check your configuration Check whether you have all the necessary objects installed into the `stunner` namespace. + ```console -kubectl get gatewayconfigs,gateways,udproutes -n stunner -NAME REALM AUTH AGE -gatewayconfig.stunner.l7mp.io/stunner-gatewayconfig stunner.l7mp.io plaintext 95m +kubectl get gatewayconfigs,gateways,udproutes.stunner.l7mp.io -n stunner +NAME REALM DATAPLANE AGE +gatewayconfig.stunner.l7mp.io/stunner-gatewayconfig stunner.l7mp.io default 84s -NAME CLASS ADDRESS READY AGE -gateway.gateway.networking.k8s.io/udp-gateway stunner-gatewayclass True 95m +NAME CLASS ADDRESS PROGRAMMED AGE +gateway.gateway.networking.k8s.io/udp-gateway stunner-gatewayclass 34.118.112.176 True 84s -NAME AGE -udproute.gateway.networking.k8s.io/kms-media-plane 95m +NAME AGE +udproute.stunner.l7mp.io/kms-media-plane 84s ``` -You can also use the handy `stunnerctl` CLI tool to dump the running STUNner configuration. +You can also use the handy CLI tool called [`stunnerctl`](/cmd/stunnerctl/README.md) to dump the running STUNner configuration. Make sure to issue `make build` first to build `stunnerctl`, along with a set of other handy STUNner utilities, in the `bin/` directory. ```console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener 1 - Name: udp-listener - Listener: udp-listener - Protocol: UDP - Public address: 34.118.18.210 - Public port: 3478 +bin/stunnerctl -n stunner config udp-gateway +Gateway: stunner/udp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 +Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.112.176:3478 + Routes: [stunner/kms-media-plane] + Endpoints: [10.76.1.4, 10.80.4.47] ``` ### Run the test -At this point, everything should be set up to make a video-call from your browser via -STUNner. Learn the external IP address Kubernetes assigned to the LoadBalancer service of the -application server. +At this point, everything should be set up to make a video-call from your browser via STUNner. Learn the external IP address Kubernetes assigned to the LoadBalancer service of the application server. ``` console -export WEBRTC_SERVER_IP=$(kubectl get service -n stunner webrtc-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +export WEBRTC_SERVER_IP=$(kubectl get service webrtc-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}') ``` -Then, open `https://${WEBRTC_SERVER_IP}:8443` in your browser, accept the self-signed TLS certificate, -register a user, repeat this process in an another browser window using a different user name, then -call one user from the other and enjoy a nice video-conference with yourself. +Then, open `https://${WEBRTC_SERVER_IP}:8443` in your browser, accept the self-signed TLS certificate, register a user, repeat this process in an another browser window using a different user name, then call one user from the other and enjoy a nice video-conference with yourself. ### What is going on here? -The HTML page served by the application server contains a handy console port, which allows to track -the call setup process. We use the logs from one of the clients to demonstrate call establishment -with STUNner. +The HTML page served by the application server contains a handy console port, which allows to track the call setup process. We use the logs from one of the clients to demonstrate call establishment with STUNner. -- After registering with the application server, the console should show the content of the - `registerResponse` message. If all goes well, the response should show the ICE configuration - returned by the application server. The configuration should contain a TURN URI for the UDP - Gateway we have created in STUNner. In addition, the authentication credentials and the public - IP addresses and ports should match those in the output of `stunnerctl`. +- After registering with the application server, the console should show the content of the `registerResponse` message. If all goes well, the response should show the ICE configuration returned by the application server. The configuration should contain a TURN URI for the UDP Gateway we have created in STUNner. In addition, the authentication credentials and the public IP addresses and ports should match those in the output of `stunnerctl`. ```js { - "iceServers": [ - { - "url": "turn:34.118.18.210:3478?transport=UDP", - "username": "user-1", - "credential": "pass-1" - } - ], - "iceTransportPolicy": "relay" + "id": "registerResponse", + "response": "accepted", + "iceConfiguration": { + "iceServers": [ + { + "urls": ["turn:34.118.112.176:3478?transport=udp"], + "username": "user-1", + "credential": "pass-1" + } + ], + "iceTransportPolicy": "relay" + } } ``` -- Once bootstrapped with the above ICE server configuration, the browser will ask STUNner to open - a TURN transport relay connection for exchanging the video stream with Kurento and generates a - local ICE candidate for each relay connection it creates. Note that only TURN-relay candidates - are generated: host and server-reflexive candidates would not work with STUNner anyway. (This is - why we set the `iceTransportPolicy` to type `relay` in the ICE server configuration above.) - Locally generated ICE candidates are sent by the browser over to the application server, which - in turn passes them over verbatim to Kurento. +- Once bootstrapped with the above ICE server configuration, the browser will ask STUNner to open a TURN transport relay connection for exchanging the video stream with Kurento and generates a local ICE candidate for each relay connection it creates. Note that only TURN-relay candidates are generated: host and server-reflexive candidates would not work with STUNner anyway. (This is why we set the `iceTransportPolicy` to type `relay` in the ICE server configuration above.) Locally generated ICE candidates are sent by the browser over to the application server, which in turn passes them over verbatim to Kurento. ```console - Sending message: {[...] "candidate:0 1 UDP 91889663 10.116.1.42 51510 typ relay raddr 10.116.1.42 rport 51510" [...]} + Sending message: {[...] "candidate:0 1 UDP 92020735 10.76.0.19 49944 typ relay raddr 10.76.0.19 rport 49944" [...]} ``` - Observe that the ICE candidate contains a private IP address (`10.116.1.42` in this case) as the - TURN relay connection address: this just happens to be the IP address of the STUNner pod that - receives the TURN allocation request from the browser. + Observe that the ICE candidate contains a private IP address (`10.76.0.19` in this case) as the TURN relay connection address: this just happens to be the IP address of the STUNner pod that receives the TURN allocation request from the browser. -- The media server generates ICE candidates as well. Since we disabled STUN/TURN in Kurento, only - host-type ICE candidates are generated by the media server. These will be sent back to the - clients as remote ICE candidates. +- The media server generates ICE candidates as well. Since we disabled STUN/TURN in Kurento, only host-type ICE candidates are generated by the media server. These will be sent back to the clients as remote ICE candidates. ```console - Received message: {[...] "candidate:1 1 UDP 2015363327 10.116.2.44 17325 typ host" [...]} + Received message: {[...] "candidate:1 1 UDP 2015363327 10.76.0.17 13081 typ host" [...]} ``` - Observe that the ICE candidate again contains a private IP: in fact, `10.116.2.44` is the pod IP - address belonging to the Kurento media server instance that received the call setup request from - the application server. - -- Once ICE candidates are exchanged, both clients have a set of local and remote ICE candidates - they can start to probe for connectivity. Local candidates were obtained from STUNner, these are - all relay-candidates and contain a pod IP address as the transport relay address, and the remote - candidates were generated by the media server. These are of host-type and likewise contain a pod - IP address. Since in the Kubernetes networking model ["pods can communicate with all other pods - on any other node without NAT"](https://kubernetes.io/docs/concepts/services-networking), all - local-remote ICE candidate pairs will have direct connectivity and ICE connectivity check will - succeed on the first candidate pair! - -After connecting, video starts to flow between the each client and the media server via the -UDP/TURN connection opened by STUNner, and the media server can perform all audio- and -video-processing the tasks a media server is expected to perform. Note that browsers may be behind -any type of NAT: STUNner makes sure that whatever aggressive middlebox exists between itself and a -client, media traffic will still flow seamlessly. + Observe that the ICE candidate again contains a private IP: in fact, `10.76.0.17` is the pod IP address that belongs to the Kurento media server pod that received the call setup request from the application server. + +- Once ICE candidates are exchanged, both clients have a set of local and remote ICE candidates they can start to probe for connectivity. Local candidates were obtained from STUNner, these are all relay-candidates and contain a pod IP address as the transport relay address, and the remote candidates were generated by the media server. These are of host-type and likewise contain a pod IP address. Since in the Kubernetes networking model ["pods can communicate with all other pods on any other node without NAT"](https://kubernetes.io/docs/concepts/services-networking), all local-remote ICE candidate pairs will have direct connectivity and ICE connectivity check will succeed on the first candidate pair! + +After connecting, audio and video start to flow between the each client and the media server via the UDP/TURN connection opened by STUNner, and the media server cross connects the audio- and video streams. Note that browsers may be behind any type of NAT: STUNner makes sure that whatever aggressive middlebox exists between itself and a client, media traffic will still flow seamlessly. ### Troubleshooting -Like in any sufficiently complex application, there are lots of moving parts in a Kubernetes-based -WebRTC service and many things can go wrong. Below is a list of steps to help debugging WebRTC -applications with STUNner. - -* Cannot reach the application server: Make sure that the LoadBalancer IP is reachable and the TCP - port 8443 is available from your client. -* No ICE candidate appears: Most probably this occurs because the browser's ICE configuration does - not match the running STUNner config. Check that the ICE configuration returned by the - application server in the `registerResponse` message matches the output of `stunnerctl - running-config`. Examine the `stunner` pods' logs (`kubectl logs...`): permission-denied messages - typically indicate that STUN/TURN authentication was unsuccessful. -* No video-connection: This is most probably due to a communication issue between your client and - STUNner. Try disabling STUNner's UDP Gateway and force the browser to use TCP. -* Still no connection: follow the excellent [TURN troubleshooting - guide](https://www.giacomovacca.com/2022/05/troubleshooting-turn.html) to track down the - issue. Remember: your ultimate friends `tcpdump` and `Wireshark` are always there for you to - help! +Like in any sufficiently complex application, there are lots of moving parts in a Kubernetes-based WebRTC service and many things can go wrong. Below is a list of steps to help debugging WebRTC applications with STUNner. + +* Cannot reach the application server: Make sure that the LoadBalancer IP is reachable and the TCP port 8443 is available from your client. +* No ICE candidate appears: Most probably this occurs because the browser's ICE configuration does not match the running STUNner config. Check that the ICE configuration returned by the application server in the `registerResponse` message matches the output of `stunnerctl running-config`. Examine the `stunner` pods' logs (`kubectl logs...`): permission-denied messages typically indicate that STUN/TURN authentication was unsuccessful. +* No audio/video-connection: This is most probably due to a communication issue between your client and STUNner. Try disabling STUNner's UDP Gateway and force the browser to use TCP. +* Still no connection: follow the excellent [TURN troubleshooting guide](https://www.giacomovacca.com/2022/05/troubleshooting-turn.html) to track down the issue. Remember: your ultimate friends `tcpdump` and `Wireshark` are always there for you to help! ## Update STUN/TURN credentials -As exemplified by `stunnerctl` output, STUNner currently runs with fairly poor security: using -`static` authentication (note that `static` is an alias to the legacy `plaintext` authentication -type you see above), sharing a single username/password pair between all active sessions. +As shown in the `stunnerctl` output, STUNner currently runs with fairly poor security: using `static` authentication, sharing a single username/password pair between all active sessions. ``` console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 +bin/stunnerctl -n stunner config udp-gateway +Gateway: stunner/udp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 ... ``` -Since static credentials are just what they are, static, it is easy to extract the STUN/TURN -credentials on the client side for potentially nefarious purposes. Note that attackers should not -be able to make too much harm with these credentials, since the only Kubernetes service they can -reach via STUNner is the Kurento media server pool. This is why we have installed the UDPRoute: -STUNner will allow clients to connect *only* to the backend service(s) of the UDPRoute, and nothing -else. Then, the attackers would need access to the application-server to open WebRTC endpoints on -the media server for their own purposes, but application servers should be secure by default no? - -In other words, *STUNner's default security model is exactly the same as if we put the application -servers and media servers on public-facing physical servers*. - -Still, it would be nice to use per-session passwords. STUNner allows you to do that, by changing -the authentication type to `ephemeral` (the legacy alias is `longterm`, but this is deprecated) -instead of `static`. Even better: STUNner's ephemeral TURN credentials are valid only for a -specified time (one day by default, but you can override this querying the [authentication -service](https://github.com/l7mp/stunner-auth-service)), after which they expire and attackers can -no longer reuse them. And to make things even better we don't even have to work too much to switch -STUNner to the `ephemeral` authentication mode: it is enough to update the GatewayConfig and -everything should happen from this point automagically. +Since static credentials are just what they are, static, it is easy to extract the STUN/TURN credentials on the client side for potentially nefarious purposes. Note that attackers should not be able to make too much harm with these credentials, since the only Kubernetes service they can reach via STUNner is the Kurento media server pool. This is why we have installed the UDPRoute: STUNner will allow clients to connect *only* to the backend service(s) of the UDPRoute, and nothing else. Then, attackers would need access to the application-server to open WebRTC endpoints on the media server for their own purposes, but media servers should be secure by default no? + +In other words, *STUNner's default security model is exactly the same as if we put the application servers and media servers on public-facing physical servers*. + +That being said, it would be nice to use per-session passwords. STUNner allows you to do that, by changing the authentication type to `ephemeral` instead of `static`. Even better: STUNner's ephemeral TURN credentials are valid only for a specified time (one day by default, but you can override this via the [authentication service](https://github.com/l7mp/stunner-auth-service)), after which they expire and attackers can no longer reuse them. To make things even better, we don't even have to work too much to switch STUNner to the `ephemeral` authentication mode: it is enough to update the GatewayConfig and everything should happen from this point automagically. ```console kubectl apply -f - <1.22). Most hosted or private Kubernetes cluster services will work, but make sure that the cluster comes with a functional load-balancer integration (all major hosted Kubernetes services should support this). Otherwise, STUNner will not be able to allocate a public IP address for clients to reach your WebRTC infra. As a regrettable exception, Minikube is unfortunately not supported for this demo. The reason is that [Let's Encrypt certificate issuance is not available with nip.io](https://medium.com/@EmiiKhaos/there-is-no-possibility-that-you-can-get-lets-encrypt-certificate-with-nip-io-7483663e0c1b); late on you will learn more about why this is crucial above. +To run this example, you need: +* a [Kubernetes cluster](../../INSTALL.md#prerequisites), +* a [deployed STUNner](../../INSTALL.md#installation-1) (presumably the latest stable version), +* an [Ingress controller](../TLS.md#ingress) to ingest traffic into the cluster, +* a [Cert-manager](../TLS.md#cert-manager) to automate TLS certificate management. -## Setup +> [!NOTE] +> +> If you have your own TLS certificate, put it in a `Secret` [resource](https://kubernetes.io/docs/concepts/configuration/secret/) and deploy it into the `default` namespace under the `livekit-secret-tls` name. + + +## Description The recommended way to install LiveKit into Kubernetes is deploying the media servers into the host-network namespace of the Kubernetes nodes (`hostNetwork: true`). This deployment model, however, comes with a set of uncanny [operational limitations and security concerns](../../WHY.md). Using STUNner, however, media servers can be deployed into ordinary Kubernetes pods and run over a private IP network, like any "normal" Kubernetes workload. @@ -23,79 +32,10 @@ The figure below shows LiveKit deployed into regular Kubernetes pods behind STUN In this tutorial we deploy a video room example using [LiveKit's React SDK](https://github.com/livekit/livekit-react/tree/master/example), the [LiveKit server](https://github.com/livekit/livekit) for media exchange, a Kubernetes Ingress gateway to secure signaling connections and handle TLS, and STUNner as a media gateway to expose the LiveKit server pool to clients. -## Installation - -Let's start with a disclaimer. The LiveKit client example browser must work over a secure HTTPS connection, because [getUserMedia](https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#browser_compatibility) is available only in secure contexts. This implies that the client-server signaling connection must be secure too. Unfortunately, self-signed TLS certs [will not work](https://docs.livekit.io/deploy/#domain,-ssl-certificates,-and-load-balancer), so we have to come up with a way to provide our clients with a valid TLS cert. This will have the unfortunate consequence that the majority of the below installation guide will be about securing client connections to LiveKit over TLS; as it turns out, once HTTPS is correctly working integrating LiveKit with STUNner is very simple. - -In the below example, STUNner will be installed into the identically named namespace, while LiveKit and the Ingress gateway will live in the default namespace. - -### TLS certificates - -As mentioned above, the LiveKit server will need a valid TLS cert, which means it must run behind an existing DNS domain name backed by a CA signed TLS certificate. This is simple if you have your own domain, but if you don't then [nip.io](https://nip.io) provides a dead simple wildcard DNS for any IP address. We will use this to "own a domain" and obtain a CA signed certificate for LiveKit. This will allow us to point the domain name `client-.nip.io` to an ingress HTTP gateway in our Kubernetes cluster, which will then use some automation (namely, cert-manager) to obtain a valid CA signed cert. - -Note that public wildcard DNS domains might run into [rate limiting](https://letsencrypt.org/docs/rate-limits/) issues. If this occurs you can try [alternative services](https://moss.sh/free-wildcard-dns-services/) instead of `nip.io`. - -### Ingress - -The first step of obtaining a valid cert is to install a Kubernetes Ingress: this will be used during the validation of our certificates and to terminate client TLS encrypted contexts. - -Install an ingress controller into your cluster. We used the official [nginx ingress](https://github.com/kubernetes/ingress-nginx), but this is not required. - -```console -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm install ingress-nginx ingress-nginx/ingress-nginx -``` - -Wait until Kubernetes assigns an external IP to the Ingress. - -```console -until [ -n "$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done -``` - -Store the Ingress IP address Kubernetes assigned to our Ingress; this will be needed later when we configure the validation pipeline for our TLS certs. - -```console -kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -export INGRESSIP=$(kubectl get service ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -export INGRESSIP=$(echo $INGRESSIP | sed 's/\./-/g') -``` - -### Cert manager - -We use the official [cert-manager](https://cert-manager.io) to automate TLS certificate management. - -First, install cert-manager's CRDs. - -```console -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.8.0/cert-manager.crds.yaml -``` - -Then add the Helm repository, which contains the cert-manager Helm chart, and install the charts: - -```console -helm repo add cert-manager https://charts.jetstack.io -helm repo update -helm install my-cert-manager cert-manager/cert-manager \ - --create-namespace \ - --namespace cert-manager \ - --version v1.8.0 -``` - -At this point we have all the necessary boilerplate set up to automate TLS issuance for LiveKit. ### STUNner -Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy the [manifest](livekit-server.yaml) packaged with STUNner. - -Install the STUNner gateway operator and STUNner via [Helm](https://github.com/l7mp/stunner-helm): - -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system -helm install stunner stunner/stunner --create-namespace --namespace=stunner -``` +Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy (after some minor modifications) the [manifest](livekit-server.yaml) packaged with STUNner. Configure STUNner to act as a STUN/TURN server to clients, and route all received media to the LiveKit server pods. @@ -108,7 +48,7 @@ kubectl apply -f docs/examples/livekit/livekit-call-stunner.yaml The relevant parts here are the STUNner [Gateway definition](../../GATEWAY.md), which exposes the STUNner STUN/TURN server over UDP:3478 to the Internet, and the [UDPRoute definition](../../GATEWAY.md), which takes care of routing media to the pods running the LiveKit service. ```yaml -apiVersion: gateway.networking.k8s.io/v1beta1 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -120,7 +60,7 @@ spec: port: 3478 protocol: UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: livekit-media-plane @@ -165,20 +105,18 @@ rtc: port: 3478 ``` -This will make sure that LiveKit is started with STUNner as the STUN/TURN server. If unsure about the STUNner settings to use, you can always use the handy `stunnerctl` CLI tool to dump the running STUNner configuration. +This will make sure that LiveKit is started with STUNner as the STUN/TURN server. If unsure about the STUNner settings to use, you can always use the handy [`stunnerctl` CLI tool](/cmd/stunnerctl/README.md) to dump the running STUNner configuration. ``` console -cd stunner -cmd/stunnerctl/stunnerctl running-config default/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener 1 - Name: udp-listener - Listener: udp-listener - Protocol: UDP - Public address: 1.2.3.4 - Public port: 3478 +stunnerctl -n stunner config udp-gateway +Gateway: stunner/udp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 +Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.88.91:3478 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.4, 10.80.4.47] ``` Note that LiveKit itself will not use STUNner (that would amount to a less efficient [symmetric ICE mode](../../DEPLOYMENT.md)); with the above configuration we are just telling LiveKit to instruct its clients to use STUNner to reach the LiveKit media servers. @@ -229,4 +167,8 @@ livekit-cli create-token \ --valid-for 24h ``` -Copy the access token into the token field and hit the Connect button. If everything is set up correctly, you should be able to connect to a room. If you repeat the procedure in a separate browser tab you can enjoy a nice video-conferencing session with yourself, with the twist that all media between the browser tabs is flowing through STUNner and the LiveKit-server deployed in you Kubernetes cluster. \ No newline at end of file +Copy the access token into the token field and hit the Connect button. If everything is set up correctly, you should be able to connect to a room. If you repeat the procedure in a separate browser tab you can enjoy a nice video-conferencing session with yourself, with the twist that all media between the browser tabs is flowing through STUNner and the LiveKit-server deployed in you Kubernetes cluster. + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). \ No newline at end of file diff --git a/docs/examples/livekit/livekit-call-stunner.yaml b/docs/examples/livekit/livekit-call-stunner.yaml index f99aa44f..c168a3c7 100644 --- a/docs/examples/livekit/livekit-call-stunner.yaml +++ b/docs/examples/livekit/livekit-call-stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1beta1 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,19 +12,19 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig namespace: stunner spec: realm: stunner.l7mp.io - authType: plaintext + authType: static userName: "user-1" password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1beta1 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -34,9 +34,9 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: livekit-media-plane @@ -47,4 +47,4 @@ spec: rules: - backendRefs: - name: livekit-server - namespace: default \ No newline at end of file + namespace: default diff --git a/docs/examples/livekit/livekit-server.yaml b/docs/examples/livekit/livekit-server.yaml index cbbd9ba6..08cebbba 100644 --- a/docs/examples/livekit/livekit-server.yaml +++ b/docs/examples/livekit/livekit-server.yaml @@ -10,7 +10,7 @@ spec: serviceName: redis replicas: 1 selector: - matchLabels: + matchLabels: app: redis template: metadata: @@ -85,7 +85,7 @@ data: port_range_start: 50000 tcp_port: 7801 stun_servers: - - stunner_ip:3478 + - stunner_ip:3478 turn_servers: - credential: pass-1 host: stunner_ip @@ -104,7 +104,7 @@ metadata: labels: app.kubernetes.io/name: livekit-server app.kubernetes.io/instance: livekit - app.kubernetes.io/version: "v1.4.2" + app.kubernetes.io/version: "v1.8.0" spec: type: LoadBalancer ports: @@ -127,7 +127,7 @@ metadata: labels: app.kubernetes.io/name: livekit-server app.kubernetes.io/instance: livekit - app.kubernetes.io/version: "v1.4.2" + app.kubernetes.io/version: "v1.8.0" spec: replicas: 1 selector: @@ -147,7 +147,7 @@ spec: terminationGracePeriodSeconds: 18000 # 5 hours containers: - name: livekit-server - image: "livekit/livekit-server:v1.4.2" + image: "livekit/livekit-server:v1.8.0" imagePullPolicy: IfNotPresent args: ["--disable-strict-config"] env: @@ -213,10 +213,10 @@ metadata: name: tls-example-ingress namespace: default annotations: - kubernetes.io/ingress.class: "nginx" + # kubernetes.io/ingress.class: "nginx" cert-manager.io/cluster-issuer: "letsencrypt-prod" spec: - # ingressClassName: nginx + ingressClassName: nginx tls: - hosts: - client-ingressserviceip.nip.io @@ -258,4 +258,4 @@ spec: - http01: ingress: class: nginx ---- \ No newline at end of file +--- diff --git a/docs/examples/mediasoup/README.md b/docs/examples/mediasoup/README.md new file mode 100644 index 00000000..f3e6bcf8 --- /dev/null +++ b/docs/examples/mediasoup/README.md @@ -0,0 +1,163 @@ +# STUNner demo: Video-conferencing with mediasoup + +This document guides you through the installation of [mediasoup](https://mediasoup.org/) into Kubernetes, when it is used together with the STUNner WebRTC media gateway. + +In this demo you will learn to: + +- integrate a typical WebRTC application with STUNner, +- obtain a valid TLS certificate to secure the signaling plane, +- deploy the mediasoup server into Kubernetes, and +- configure STUNner to expose mediasoup to clients. + +## Prerequisites + +To run this example, you need: +* a [Kubernetes cluster](../../INSTALL.md#prerequisites), +* a [deployed STUNner](../../INSTALL.md#installation-1) (presumably the latest stable version), +* an [Ingress controller](../TLS.md#ingress) to ingest traffic into the cluster, +* a [Cert-manager](../TLS.md#cert-manager) to automate TLS certificate management. + +> [!NOTE] +> +> If you have your own TLS certificate, put it in a `Secret` [resource](https://kubernetes.io/docs/concepts/configuration/secret/) and deploy it into the `default` namespace under the `mediasoup-demo-tls` name. + + +## Description + +The recommended way to install mediasoup ([link](https://mediasoup.discourse.group/t/server-in-kubernetes-with-turn/3434),[link](https://www.reddit.com/r/kubernetes/comments/sdkhwn/deploying_mediasoup_webrtc_sfu_in_kubernetes/)) into Kubernetes is deploying the media servers into the host-network namespace of the Kubernetes nodes (`hostNetwork: true`). This deployment model, however, comes with a set of uncanny [operational limitations and security concerns](../../WHY.md). Using STUNner, however, media servers can be deployed into ordinary Kubernetes pods and run over a private IP network, like any "normal" Kubernetes workload. + +The figure below shows mediasoup deployed into regular Kubernetes pods behind STUNner without the host-networking hack. Here, mediasoup is deployed behind STUNner in the [*media-plane deployment model*](../../DEPLOYMENT.md), so that STUNner acts as a "local" STUN/TURN server for mediasoup, saving the overhead of using public a 3rd party STUN/TURN server for NAT traversal. + +![STUNner mediasoup integration deployment architecture](../../img/stunner_mediasoup.svg) + +In this tutorial we deploy a video room example using [mediasoup's demo application](https://github.com/versatica/mediasoup-demo/) with slight modifications (more on these below), the [mediasoup server](https://github.com/versatica/mediasoup/) for media exchange, a Kubernetes Ingress gateway to secure signaling connections and handle TLS, and STUNner as a media gateway to expose the mediasoup server pool to clients. + +### Modifications on the mediasoup demo + +Below are the modification that has been done starting from [mediasoup-demo](https://github.com/versatica/mediasoup-demo/): + +- Added a multistage Dockerfile + + - stage 0: run gulp dist to create the frontend app file (they will be served by nodejs from the backend) + - stage 1: build the image for the mediasoup-server and copy the mediasoup-client file + +- Added a simple script that gathers the internal/private IP of the running pod, this is not foolproof, however, +with an additional environment variable we can load the pod's private IP into the code + +- Added the following in server.js in the function "async function createExpressApp()" to serve the mediasoup-client file + +``` +147: expressApp.use(express.static('public')) +``` + +- Added the parsing of url parameters to configure TURN server and a simple if/else in server/app/lib/RoomClient.js. The mediasoup clients will use the configured TURN server to gather the ICE candidates. Example: https://mediasoup-demo.example.com/?enableIceServer=yes&iceServerHost=100.100.100.100&iceServerPort=3478&iceServerProto=udp&iceServerUser=user-1&iceServerPass=pass-1 + +### STUNner + +Now comes the fun part. The simplest way to run this demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy (after some minor modifications) the [manifest](mediasoup-server.yaml) packaged with STUNner. + +To install the stable version of STUNner, please follow the instructions in [this section](../../INSTALL.md#installation-1). + +Configure STUNner to act as a STUN/TURN server to clients, and route all received media to the mediasoup server pods. + +```console +git clone https://github.com/l7mp/stunner +cd stunner +kubectl apply -f docs/examples/mediasoup/mediasoup-call-stunner.yaml +``` + +The relevant parts here are the STUNner [Gateway definition](../../GATEWAY.md), which exposes the STUNner STUN/TURN server over UDP:3478 to the Internet, and the [UDPRoute definition](../../GATEWAY.md), which takes care of routing media to the pods running behind the `mediasoup-server` service. + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: mediasoup-media-plane + namespace: stunner +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - group: "" + kind: Service + name: mediasoup-server + namespace: mediasoup +``` + +Once the Gateway resource is installed into Kubernetes, STUNner will create a Kubernetes LoadBalancer for the Gateway to expose the TURN server on UDP:3478 to clients. It can take up to a minute for Kubernetes to allocate a public external IP for the service. + +Wait until Kubernetes assigns an external IP and store the external IP assigned by Kubernetes to +STUNner in an environment variable for later use. + +```console +until [ -n "$(kubectl get svc udp-gateway -n stunner-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do sleep 1; done +export STUNNERIP=$(kubectl get service udp-gateway -n stunner-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` + +### mediasoup + +The crucial step of integrating *any* WebRTC media server with STUNner is to ensure that the server instructs the clients to use STUNner as the STUN/TURN server. However, there is a slight issue. In this deployment it's not the server that instructs the clients to use STUNner but the user itself. Obviously, it is not the optimal way but for the sake of the demo purpose we keep it that way. In case anyone would want to create a production ready deployment, they would need to add extra capabilities to the mediasoup server: +- first to make sure turn servers can be configured in the server's config.js file +- second to make sure that clients can fetch (or get automatically) the configured turn servers from the mediasoup server + +We need the Ingress external IP address we have stored previously: this will make sure that the TLS certificate created by cert-manager will be bound to the proper `nip.io` domain and IP address. + +```console +sed -i "s/ingressserviceip/$INGRESSIP/g" docs/examples/mediasoup/mediasoup-server.yaml +``` + +Finally, fire up mediasoup. + +```console +kubectl create ns mediasoup +kubectl apply -f docs/examples/mediasoup/mediasoup-server.yaml +``` + +The demo installation bundle includes a lot of resources to deploy mediasoup: + +- a mediasoup-server, +- an application server serving the landing page using [mediasoup-demo](https://github.com/versatica/mediasoup-demo/) +- a cluster issuer for the TLS certificates, +- an Ingress resource to terminate the secure connections between your browser and the Kubernetes cluster. + +Wait until all pods become operational and jump right into testing! + +## Test + +After installing everything, execute the following command to retrieve the URL of your fresh mediasoup demo app: + +```console +echo "https://mediasoup-$INGRESSIP.nip.io:443?enableIceServer=yes&iceServerHost=$STUNNERIP&iceServerPort=3478&iceServerProto=udp&iceServerUser=user-1&iceServerPass=pass-1" +``` + +Opening the output in a browser should get the mediasoup client demo app + +In case you changed something additionally in the STUNner configuration during deployment watch out for the URL parameters: + - `enableIceServer` must be `yes` in order to use STUNner as a TURN server + - `iceServerHost` should point to the public IP that was allocated for the STUNner load balancer service + - `iceServerPort` is the port of your TURN server configured in the Gateway resource + - `iceServerProto` is the expected protocol on the port configured above + - `iceServerUser` is the username used for authentication in STUNner + - `iceServerPass` is the credential used for authentication in STUNner + + +## Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). + +## Acknowledgments + +This demo is adopted from [damhau/mediasoup-demo-docker](https://github.com/damhau/mediasoup-demo-docker). Huge thanks to @damhau for the great demo! diff --git a/docs/examples/mediasoup/mediasoup-call-stunner.yaml b/docs/examples/mediasoup/mediasoup-call-stunner.yaml new file mode 100644 index 00000000..2e16f70b --- /dev/null +++ b/docs/examples/mediasoup/mediasoup-call-stunner.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: stunner-gatewayclass +spec: + controllerName: "stunner.l7mp.io/gateway-operator" + parametersRef: + group: "stunner.l7mp.io" + kind: GatewayConfig + name: stunner-gatewayconfig + namespace: stunner-system + description: "STUNner is a WebRTC ingress gateway for Kubernetes" +--- +apiVersion: stunner.l7mp.io/v1 +kind: GatewayConfig +metadata: + name: stunner-gatewayconfig + namespace: stunner-system +spec: + realm: stunner.l7mp.io + authType: static + userName: "user-1" + password: "pass-1" +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: udp-gateway + namespace: stunner-system +spec: + gatewayClassName: stunner-gatewayclass + listeners: + - name: udp-listener + port: 3478 + protocol: TURN-UDP +--- +apiVersion: stunner.l7mp.io/v1 +kind: UDPRoute +metadata: + name: livekit-media-plane + namespace: stunner-system +spec: + parentRefs: + - name: udp-gateway + rules: + - backendRefs: + - group: "" + kind: Service + name: mediasoup-server + namespace: mediasoup +--- diff --git a/docs/examples/mediasoup/mediasoup-server.yaml b/docs/examples/mediasoup/mediasoup-server.yaml new file mode 100644 index 00000000..67117d93 --- /dev/null +++ b/docs/examples/mediasoup/mediasoup-server.yaml @@ -0,0 +1,111 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: mediasoup-server + name: mediasoup-server + namespace: mediasoup +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: mediasoup-server + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mediasoup-server + spec: + containers: + - env: + - name: PROTOO_LISTEN_PORT + value: "443" + - name: MEDIASOUP_ANNOUNCED_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + image: l7mp/mediasoup-demo-docker:latest + imagePullPolicy: IfNotPresent + name: mediasoup-server + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +--- +apiVersion: v1 +kind: Service +metadata: + name: mediasoup-server + namespace: mediasoup +spec: + ports: + - name: https-443 + port: 443 + protocol: TCP + targetPort: 443 + selector: + app.kubernetes.io/name: mediasoup-server + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/upstream-hash-by: "$arg_roomId" + name: mediasoup-server + namespace: mediasoup +spec: + rules: + - host: mediasoup-ingressserviceip.nip.io + http: + paths: + - backend: + service: + name: mediasoup-server + port: + number: 443 + path: / + pathType: Prefix + tls: + - hosts: + - mediasoup-ingressserviceip.nip.io + secretName: mediasoup-demo-tls +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + generation: 1 + name: letsencrypt-prod +spec: + acme: + email: info@yourdomain.com + privateKeySecretRef: + name: letsencrypt-secret-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx +--- \ No newline at end of file diff --git a/docs/examples/neko/README.md b/docs/examples/neko/README.md index cc81c51d..0777cc4f 100644 --- a/docs/examples/neko/README.md +++ b/docs/examples/neko/README.md @@ -9,32 +9,24 @@ However, integrating Neko into Kubernetes is far from trivial. In this demo you will learn the following steps to: - integrate a typical WebRTC application server to be used with STUNner, -- deploy the Neko into Kubernetes behind STUNner, +- deploy Neko into Kubernetes behind STUNner, ## Installation ### Prerequisites -Consult the [STUNner installation and configuration guide](../../INSTALL.md) to set up STUNner. +To run this example, you need: +* a [Kubernetes cluster](../../INSTALL.md#prerequisites), +* a [deployed STUNner](../../INSTALL.md#installation-1) (presumably the latest stable version), +* optionally, an [Ingress controller](../TLS.md#ingress) to ingest traffic into the cluster. ### Quick installation -The simplest way to deploy the demo is to clone the [STUNner git -repository](https://github.com/l7mp/stunner) and deploy the -[manifest](neko.yaml) packaged with STUNner. - -Install the STUNner gateway operator and STUNner ([more info](https://github.com/l7mp/stunner-helm)): - -```console -helm repo add stunner https://l7mp.io/stunner -helm repo update -helm install stunner-gateway-operator stunner/stunner-gateway-operator --create-namespace --namespace=stunner-system -helm install stunner stunner/stunner -``` +The simplest way to deploy the demo is to clone the [STUNner git repository](https://github.com/l7mp/stunner) and deploy the [manifest](neko.yaml) packaged with STUNner. Configure STUNner to act as a STUN server towards clients, and to let media reach the media server. -``` +```console git clone https://github.com/l7mp/stunner cd stunner/docs/examples/neko kubectl apply -f stunner.yaml @@ -43,24 +35,24 @@ kubectl apply -f stunner.yaml This will expose STUNner on a public IP on UDP port 3478. A Kubernetes `LoadBalancer` assigns an ephemeral public IP address to the service, so first we need to learn the external IP. -``` +```console kubectl get service udp-gateway -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}' STUNNERIP=$(kubectl get service udp-gateway -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}') ``` -NOTE: this IP should be accessible from your browser. If that "public IP" is behind a NAT, you can overwrite it with the actual -public IP that routes to the service by hand (e.g. `STUNNERIP=`). +> [!NOTE] +> This IP should be accessible from your browser. If that "public IP" is behind a NAT, you can overwrite it with the actual public IP that routes to the service by hand (e.g. `STUNNERIP=`). We need to give this public IP the Neko configuration in the `NEKO_ICESERVERS` environment variable, inside the `json` content (basically this will tell you browser to use STUNner as a STUN/TURN server). You can do that by hand, or by this fancy `sed` command: -``` -sed -i "s/1.1.1.1/$STUNNERIP/g" neko.yaml +```console +sed -i "s/turn:[\.0-9]*:3478/turn:$STUNNERIP:3478/g" neko.yaml ``` -Now apply the Neko manifests: -``` +Now apply the Neko manifests and wait for the `neko` deployment to be available (should take a couple of seconds): +```console kubectl apply -f neko.yaml -kubectl get pods +kubectl wait --for=condition=Available deployment neko --timeout 5m ``` In this setup we use `ingress` to expose the Neko UI. Feel free to customize the `ingress` resource to your setup. @@ -68,6 +60,9 @@ If you don't have an ingress controller, you can use the `neko-tcp` service with Ideally, by opening your ingress controller in your browser, you should see the Neko UI. You can log in with the `admin`:`admin` credentials. The WebRTC stream then should be relayed through STUNner. +> [!NOTE] +> Tested with Chromium/Google Chrome. + ## Help STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). diff --git a/docs/examples/neko/neko.yaml b/docs/examples/neko/neko.yaml index 53b69600..b99a0a68 100644 --- a/docs/examples/neko/neko.yaml +++ b/docs/examples/neko/neko.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: neko - image: m1k1o/neko:firefox + image: docker.io/m1k1o/neko:firefox env: - name: NEKO_SCREEN value: 1280x720@30 @@ -35,7 +35,7 @@ spec: fieldRef: fieldPath: status.podIP - name: NEKO_ICESERVERS - value: '[{"urls": ["turn:1.1.1.1:3478?transport=udp"], "username": "user-1", "credential": "pass-1","iceTransportPolicy": "relay"}]' + value: '[{"urls": ["turn:1.1.1.1:3478?transport=udp"], "username": "user-1", "credential": "pass-1"}]' ports: - name: http containerPort: 8080 diff --git a/docs/examples/neko/stunner.yaml b/docs/examples/neko/stunner.yaml index 776d6372..5cf09326 100644 --- a/docs/examples/neko/stunner.yaml +++ b/docs/examples/neko/stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,19 +12,19 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig namespace: default spec: realm: stunner.l7mp.io - authType: plaintext + authType: static userName: "user-1" password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -34,10 +34,10 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: neko-plane @@ -48,4 +48,4 @@ spec: rules: - backendRefs: - name: neko-udp - namespace: default \ No newline at end of file + namespace: default diff --git a/docs/examples/simple-tunnel/README.md b/docs/examples/simple-tunnel/README.md index 1f87a266..dbd10150 100644 --- a/docs/examples/simple-tunnel/README.md +++ b/docs/examples/simple-tunnel/README.md @@ -1,9 +1,6 @@ -# STUNner Tutorial +# STUNner Tutorial: STUNner benchmark with iperf -# Open a tunnel via STUNner - -This tutorial shows how to tunnel an external connection via STUNner to a UDP service deployed into -Kubernetes. The tutorial can also be used to quickly check a STUNner installation. +This tutorial shows how to make a simple benchmark via STUNner to evaluate your cloud provider's UDP and TCP stack. In this tutorial you will learn how to: * configure a UDP service in Kubernetes, @@ -15,16 +12,11 @@ In this tutorial you will learn how to: ### Prerequisites -The tutorial assumes a fresh STUNner installation; see the [STUNner installation and configuration -guide](../../INSTALL.md). Create a namespace called `stunner` if there is none. You must have -[`iperfv2`](https://iperf.fr) installed locally to run this tutorial. +The tutorial assumes a fresh STUNner installation; see the [STUNner installation and configuration guide](../../INSTALL.md). Create a namespace called `stunner` if there is none. You must have [`iperfv2`](https://iperf.fr) installed locally to run this tutorial. ### Setup -In this tutorial we perform a quick Kubernetes/STUNner benchmark: we fire up an iperf server inside -the cluster and perform a speed test from the local console. We will use the -[`turncat`](../../cmd/turncat) client utility to tunnel test traffic to the iperf server via STUNner -acting as a STUN/TURN gateway. +In this tutorial we perform a quick Kubernetes/STUNner benchmark: we fire up an iperf server inside the cluster and perform a speed test from the local console. We will use the [`turncat`](../../cmd/turncat.md) client utility to tunnel test traffic to the iperf server via STUNner acting as a STUN/TURN gateway. ![STUNner benchmarks setup](../../img/stunner_benchmark.svg) @@ -39,10 +31,7 @@ cd stunner kubectl apply -f docs/examples/simple-tunnel/iperf-server.yaml ``` -This will start an Deployment that runs the iperf server and wraps it in a Kubernetes service -called `iperf-server` of type ClusterIP. Check this service and make sure that it is not exposed to -the outside world (i.e., `EXTERNAL-IP` is set to `` by Kubernetes); this makes sure that the -only way to reach this service from the local iperf speed-test client is through STUNner. +This will start an Deployment that runs the iperf server and wraps it in a Kubernetes service called `iperf-server` of type ClusterIP. Check this service and make sure that it is not exposed to the outside world (i.e., `EXTERNAL-IP` is set to `` by Kubernetes); this makes sure that the only way to reach this service from the local iperf speed-test client is through STUNner. ```console kubectl get service iperf-server -o wide @@ -52,20 +41,16 @@ iperf-server ClusterIP 10.120.5.36 5001/UDP,5001/TCP 19s ### STUNner configuration -Expose the service via the STUNner. The pre-compiled manifest below will create the required -GatewayClass and GateayConfig resources, fire up a Gateway listener at UDP:3478 and another one on -TCP:3478, and route client connections received on the gateways to the `iperf-server` -service. +Expose the service via the STUNner. The pre-compiled manifest below will create the required GatewayClass and GateayConfig resources, fire up a Gateway listener at UDP:3478 and another one on TCP:3478, and route client connections received on the gateways to the `iperf-server` service. + ```console kubectl apply -f docs/examples/simple-tunnel/iperf-stunner.yaml ``` -For convenience, below is a dump of the Gateway and UDPRoute resources the manifests create. Note -that the UDPRoute specifies the `iperf-server` service as the `backendRef`, which makes sure that -STUNner will forward the client connections received in any of the Gateways to the iperf server. +For convenience, below is a dump of the Gateway and UDPRoute resources the manifests create. Note that the UDPRoute specifies the `iperf-server` service as the `backendRef`, which makes sure that STUNner will forward the client connections received in any of the Gateways to the iperf server. ```yaml -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -75,10 +60,10 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: tcp-gateway @@ -88,10 +73,10 @@ spec: listeners: - name: tcp-listener port: 3478 - protocol: TCP + protocol: TURN-TCP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: iperf-server @@ -111,93 +96,83 @@ spec: Check whether you have all the necessary STUNner resources installed namespace. ```console -kubectl get gatewayconfigs,gateways,udproutes -n stunner -NAME REALM AUTH AGE -gatewayconfig.stunner.l7mp.io/stunner-gatewayconfig stunner.l7mp.io plaintext 3m53s +kubectl get gatewayconfigs,gateways,udproutes.stunner.l7mp.io -n stunner +NAME REALM DATAPLANE AGE +gatewayconfig.stunner.l7mp.io/stunner-gatewayconfig stunner.l7mp.io default 139m -NAME CLASS ADDRESS READY AGE -gateway.gateway.networking.k8s.io/tcp-gateway stunner-gatewayclass True 14s -gateway.gateway.networking.k8s.io/udp-gateway stunner-gatewayclass True 14s +NAME CLASS ADDRESS PROGRAMMED AGE +gateway.gateway.networking.k8s.io/tcp-gateway stunner-gatewayclass 35.187.97.94 True 139m +gateway.gateway.networking.k8s.io/udp-gateway stunner-gatewayclass 35.205.10.190 True 139m -NAME AGE -udproute.gateway.networking.k8s.io/iperf-server 14s +NAME AGE +udproute.stunner.l7mp.io/iperf-server 139m ``` -You can also use the handy `stunnerctl` CLI tool to dump the running STUNner configuration. +You can also use the handy [`stunnerctl` CLI tool](/cmd/stunnerctl/README.md) to dump the running STUNner configuration for the UDP gateway. Make sure to issue `make build` first to build `stunnerctl`, along with a set of other handy STUNner utilities, in the `bin/` directory. ``` console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -STUN/TURN authentication type: plaintext -STUN/TURN username: user-1 -STUN/TURN password: pass-1 -Listener 1 - Name: udp-listener - Listener: udp-listener - Protocol: UDP - Public address: 34.116.220.190 - Public port: 30501 -Listener 2 - Name: tcp-listener - Listener: tcp-listener - Protocol: TCP - Public address: 34.118.93.28 - Public port: 3478 +bin/stunnerctl -n stunner config udp-gateway +Gateway: stunner/udp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 +Listeners: + - Name: stunner/udp-gateway/udp-listener + Protocol: TURN-UDP + Public address:port: 34.118.88.91:3478 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.4, 10.80.4.47] ``` -NOTE: It usually takes 30-60 seconds for Kubernetes to assign an external IP address to STUNner -gateways. As long as the external address is in `` status, STUNner exposes the Gateway on -a NodePort: in the above example the UDP Gateway's `udp-listener` is exposed on a node IP -(`34.116.220.190`) and the NodePort 30501. Once Kubernetes finishes the exposition of the Gateway -service, STUNner picks up the new address/port and updates the config accordingly. The end -result should be something similar to the below; observe how the `udp-listener` public port has -changed to the requested port 3478 and the public address is updated as well. +Likewise, the below will dump the config for the TCP gateway. ``` console -cmd/stunnerctl/stunnerctl running-config stunner/stunnerd-config -[...] -Listener 1 - Name: udp-listener - Listener: udp-listener - Protocol: UDP - Public address: 34.118.16.31 - Public port: 3478 -[...] +bin/stunnerctl -n stunner config tcp-gateway +Gateway: stunner/tcp-gateway (loglevel: "all:INFO") +Authentication type: static, username/password: user-1/pass-1 +Listeners: + - Name: stunner/tcp-gateway/tcp-listener + Protocol: TURN-TCP + Public address:port: 34.116.180.89:3478 + Routes: [stunner/iperf-server] + Endpoints: [10.76.1.4, 10.80.4.47] ``` +NOTE: It usually takes 30-60 seconds for Kubernetes to assign an external IP address to STUNner gateways. As long as the external address is in `` status, STUNner exposes the Gateway on a NodePort. Once Kubernetes finishes the exposition of the Gateway service, STUNner will pick up the new address/port and update the config accordingly. + If in doubt, you can always query Kubernetes for the service statuses. + ``` console kubectl get -n stunner services -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -stunner ClusterIP 10.120.4.118 3478/UDP 2d -tcp-gateway LoadBalancer 10.120.11.196 34.118.93.28 3478:30959/TCP 14h -udp-gateway LoadBalancer 10.120.3.228 34.118.16.31 3478:30501/UDP 6m42s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +stunner ClusterIP 10.0.9.70 3478/UDP 15m +tcp-gateway LoadBalancer 10.0.3.91 35.187.97.94 3478:31781/TCP 143m +udp-gateway LoadBalancer 10.0.14.218 35.205.10.190 3478:31048/UDP 143m ``` ### Run the benchmark -We will need to learn the ClusterIP assigned by Kubernetes to the `iperf-server` service: this will -be the peer address to which `turncat` will ask STUNner to relay the iperf test traffic. +We will need to learn the ClusterIP assigned by Kubernetes to the `iperf-server` service: this will be the peer address to which `turncat` will ask STUNner to relay the iperf test traffic. + ``` console export IPERF_ADDR=$(kubectl get svc iperf-server -o jsonpath="{.spec.clusterIP}") ``` -Next, set up `turncat` to listen on `UDP:127.0.0.1:5000` and tunnel connections from this -listener via the STUNner STUN/TURN listener `udp-listener` to the iperf server. Luckily, `turncat` -is clever enough to [parse the running STUNner configuration](../../cmd/turncat) from Kubernetes and set -the STUN/TURN server public address/port and the authentication credentials -accordingly. +Next, set up `turncat` to listen on `UDP:127.0.0.1:5000` and tunnel connections from this listener via the STUNner STUN/TURN listener `udp-listener` to the iperf server. Luckily, `turncat` is clever enough to [parse the running STUNner configuration](../../cmd/turncat.md) from Kubernetes and set the STUN/TURN server public address/port and the authentication credentials accordingly. + ``` console -./turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/stunnerd-config:udp-listener \ +bin/turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/udp-gateway:udp-listener \ udp://$IPERF_ADDR:5001 ``` -Fire up an iperf client from another terminal to start the benchmark. +The most important part here is the TURN meta-URI: `k8s://stunner/udp-gateway:udp-listener` instructs `turncat` to look for the Gateway called `udp-gateway` in the `stunner` namespace and create a connection to the TURN listener called `udp-listener` of the Gateway. + +Fire up an iperf client from another terminal that will connect to STUNner via `turncat` and start the benchmark. ```console iperf -c localhost -p 5000 -u -i 1 -l 100 -b 800000 -t 10 ``` If successful, the iperf server logs should contain the benchmark results. + ```console kubectl logs $(kubectl get pods -l app=iperf-server -o jsonpath='{.items[0].metadata.name}') ------------------------------------------------------------ @@ -207,36 +182,38 @@ UDP buffer size: 208 KByte (default) ------------------------------------------------------------ [ 1] local 10.116.2.30%eth0 port 5001 connected with 10.116.1.21 port 56439 (peer 2.1.7) [ ID] Interval Transfer Bandwidth Jitter Lost/Total Latency avg/min/max/stdev PPS inP NetPwr +... [ 1] 0.0000-9.9204 sec 977 KBytes 807 Kbits/sec 1.426 ms 0/10003 (0%) 14.256/10.791/97.428/ 4.993 ms 1008 pps 1.40 KByte 7.07 ``` -The results show that we have managed to squeeze 1000 packets/sec through STUNner without packet -loss, at an average one-way latency of 14.2 ms and average jitter 1.426 ms. Not bad from a -Kubernetes cluster running in some remote datacenter! +The results show that we have managed to send 1000 packets/sec through STUNner to the iperf server without packet loss, at an average one-way latency of 14.2 ms and 1.426 ms jitter. Not bad from a Kubernetes cluster running in some remote datacenter! + +Repeating the test, this time with a STUN/TURN over TCP, casts a somewhat different picture. Notice the new meta-URI: `k8s://stunner/tcp-gateway:tcp-listener` to select the TURN server exposed on TCP for`turncat`. -Repeating the test, this time with a STUN/TURN over TCP, casts a somewhat more negative -picture. Change the STUN/TURN URI in the `turncat` CLI to connect via the `tcp-listener`. ``` console -./turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/stunnerd-config:tcp-listener \ +bin/turncat --log=all:INFO udp://127.0.0.1:5000 k8s://stunner/tcp-gateway:tcp-listener \ udp://$IPERF_ADDR:5001 ``` Run the benchmark again at 10kpps and watch the logs. + ``` console iperf -c localhost -p 5000 -u -l 100 -b 8000000 -o /dev/null -t 10 && \ kubectl logs $(kubectl get pods -l app=iperf-server -o jsonpath='{.items[0].metadata.name}') | tail -n 1 [ 3] 0.0000-9.9365 sec 9.41 MBytes 7.94 Mbits/sec 0.085 ms 1361/100003 (1.4%) 148.261/21.098/454.266/73.704 ms 9927 pps 144 KByte 6.70 ``` -It seems that average latency has jumped to 148 ms, with a max latency of close to 460 ms! That's -why you should try to [avoid TCP at all -cost](https://bloggeek.me/why-you-should-prefer-udp-over-tcp-for-your-webrtc-sessions) in real-time -communications. +It seems that average latency has jumped to 148 ms, with a max latency of close to 460 ms! That's why you should [avoid TCP at all cost](https://bloggeek.me/why-you-should-prefer-udp-over-tcp-for-your-webrtc-sessions) in real-time communications. ### Cleaning up Stop `turncat` and wipe all Kubernetes configuration. + ```console kubectl delete -f docs/examples/simple-tunnel/iperf-server.yaml kubectl delete -f docs/examples/simple-tunnel/iperf-stunner.yaml ``` + +# Help + +STUNner development is coordinated in Discord, feel free to [join](https://discord.gg/DyPgEsbwzc). diff --git a/docs/examples/simple-tunnel/iperf-server.yaml b/docs/examples/simple-tunnel/iperf-server.yaml index e89b5895..84fb31ab 100644 --- a/docs/examples/simple-tunnel/iperf-server.yaml +++ b/docs/examples/simple-tunnel/iperf-server.yaml @@ -15,7 +15,7 @@ spec: app: iperf-server spec: containers: - - image: l7mp/net-debug:latest + - image: docker.io/l7mp/net-debug:latest name: net-debug command: ["/usr/bin/iperf"] args: ["-s", "-p", "5001", "-u", "-e"] diff --git a/docs/examples/simple-tunnel/iperf-stunner.yaml b/docs/examples/simple-tunnel/iperf-stunner.yaml index f1a295cf..f21e3ff7 100644 --- a/docs/examples/simple-tunnel/iperf-stunner.yaml +++ b/docs/examples/simple-tunnel/iperf-stunner.yaml @@ -1,4 +1,4 @@ -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: GatewayClass metadata: name: stunner-gatewayclass @@ -12,7 +12,7 @@ spec: description: "STUNner is a WebRTC ingress gateway for Kubernetes" --- -apiVersion: stunner.l7mp.io/v1alpha1 +apiVersion: stunner.l7mp.io/v1 kind: GatewayConfig metadata: name: stunner-gatewayconfig @@ -24,7 +24,7 @@ spec: password: "pass-1" --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: udp-gateway @@ -34,10 +34,10 @@ spec: listeners: - name: udp-listener port: 3478 - protocol: UDP + protocol: TURN-UDP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: gateway.networking.k8s.io/v1 kind: Gateway metadata: name: tcp-gateway @@ -47,10 +47,10 @@ spec: listeners: - name: tcp-listener port: 3478 - protocol: TCP + protocol: TURN-TCP --- -apiVersion: gateway.networking.k8s.io/v1alpha2 +apiVersion: stunner.l7mp.io/v1 kind: UDPRoute metadata: name: iperf-server diff --git a/docs/examples/udp-echo/README.md b/docs/examples/udp-echo/README.md new file mode 100644 index 00000000..692ef50c --- /dev/null +++ b/docs/examples/udp-echo/README.md @@ -0,0 +1,342 @@ +# STUNner Tutorial: Deploy a UDP echo service behind STUNner + +This tutorial shows how to tunnel an external connection via STUNner to a UDP service deployed into Kubernetes. The tutorial can also be used as an introduction to the main concepts in STUNner and to quickly check a STUNner installation. + +In this tutorial you will learn how to: +* configure a UDP service in Kubernetes, +* configure STUNner to expose the service to clients, +* use [`turncat`](../../cmd/turncat.md) to connect to the UDP service via STUNner, +* benchmark your cloud-setup with [`iperfv2`](https://iperf.fr). + +## Prerequisites + +The tutorial assumes a fresh STUNner installation; see the [STUNner installation and configuration +guide](../../INSTALL.md). + +## Configuration + +The standard way to interact with STUNner is via the standard Kubernetes [Gateway +API](https://gateway-api.sigs.k8s.io). This is much akin to the way you configure *all* Kubernetes +workloads: specify your intents in YAML files and issue a `kubectl apply`, and the [STUNner gateway +operator](https://github.com/l7mp/stunner-gateway-operator) will automatically create the STUNner +dataplane (that is, the `stunnerd` pods that implement the STUN/TURN service) and downloads the new +configuration to the dataplane pods. + +It is generally a good idea to maintain STUNner configuration into a separate Kubernetes +namespace. Below we will use the `stunner` namespace; create it with `kubectl create namespace +stunner` if it does not exist. + +1. Given a fresh STUNner install, the first step is to register STUNner with the Kubernetes Gateway + API. This amounts to creating a + [GatewayClass](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.GatewayClass), + which serves as the [root level configuration](/docs/GATEWAY.md#gatewayclass) for your STUNner + deployment. + + Each GatewayClass must specify a controller that will manage the Gateway objects created under + the class hierarchy. This must be set to `stunner.l7mp.io/gateway-operator` in order for STUNner + to pick up the GatewayClass. In addition, a GatewayClass can refer to further + implementation-specific configuration via a reference called `parametersRef`; in our case, this + will be a GatewayConfig object to be specified next. + + ``` console + kubectl apply -f - < + units="mm" + inkscape:showpageshadow="2" + inkscape:deskcolor="#d1d1d1" /> + hide_knots="false" + nodesatellites_param="F,0,0,1,0,4.4979167,0,1 @ F,0,0,1,0,4.4979167,0,1 @ F,0,0,1,0,4.4979167,0,1 @ F,0,0,1,0,4.4979167,0,1" /> + + + + + + + + + + + + + transform="translate(-101.99988,-74.454498)"> + style="opacity:0.92;fill:#ffffff;stroke-width:0.264999;stroke-miterlimit:4;stroke-dasharray:none" + id="rect17965" + width="145.36844" + height="124.38023" + x="101.99988" + y="74.454498" /> + y="86.053436" /> GatewayClass + y="91.999321">GatewayClass + y="101.32552" /> GatewayConfig + y="107.27142">GatewayConfig + y="102.0098" /> Gateway + y="106.36816">Gateway + gw-ns/gw + + Deployment + + LB Service + y="118.40133" /> UDPRoute + y="124.34724">UDPRoute + y="134.04221" /> Service + y="139.98804">Service + y="134.11505" /> Service - - STUNner - ConfigMap + y="140.06088">Service - - stunnerd. - conf + x="119.60569" + y="157.10823" /> + y="118.31358" /> UDPRoute + y="124.25938">UDPRoute + y="134.02731" /> Service + y="139.97322">Service @@ -480,24 +528,31 @@ style="fill:none;stroke:#000000;stroke-width:0.2;stroke-linecap:round;stroke-linejoin:miter;stroke-miterlimit:50;stroke-dasharray:none;stroke-opacity:1;paint-order:fill markers stroke" id="rect11705" width="92.906738" - height="66.294807" + height="62.699524" x="145.10664" y="84.031242" /> + STUNner Gateway Hierarchy + x="145.85818" + y="82.809853">Gateway API + transform="matrix(0.7,0,0,0.7,27.506081,50.259234)" + style="opacity:0.92;stroke-width:1.42857"> stunnerd + x="161.94887" + y="160.68379">stunnerd + sodipodi:nodetypes="cc" /> + Render + x="126.29139" + y="113.37669">Control Map + x="133.14531" + y="153.06804" + id="tspan382">Execute Watch + x="134.42651" + y="88.119209">Watch + STUN/TURN + + UDP + + UDP + x="130.05907" + y="172.40094" /> + x="150.91322" + y="166.99174" /> Cluster + x="154.32851" + y="172.00801">Cluster Listener + x="133.41063" + y="177.0959">Listener + x="150.91328" + y="179.03008" /> Cluster + x="154.32864" + y="184.04634">Cluster + d="m 126.88437,161.82693 h 44.42234 a 3.0382178,4.4979167 0 0 1 3.03822,4.49792 v 19.75365 a 3.0382178,4.4979167 0 0 1 -3.03822,4.49792 h -44.42234 a 3.0382178,4.4979167 0 0 1 -3.03822,-4.49792 v -19.75365 a 3.0382178,4.4979167 0 0 1 3.03822,-4.49792 z" + style="opacity:0.92;fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.285714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" /> + gw-ns/gw + gw-ns/gw diff --git a/docs/img/stunner_janus_arch.svg b/docs/img/stunner_janus_arch.svg new file mode 100644 index 00000000..a11d2428 --- /dev/null +++ b/docs/img/stunner_janus_arch.svg @@ -0,0 +1,4 @@ + + + +
Kubernetes  cluster
HTTPS
Ingress
UDP/RTP
Secure WebSocket
(wss)
STUN/TURN
Janus Web Demo
\ No newline at end of file diff --git a/docs/img/stunner_mediasoup.svg b/docs/img/stunner_mediasoup.svg new file mode 100644 index 00000000..5949b7bf --- /dev/null +++ b/docs/img/stunner_mediasoup.svg @@ -0,0 +1,4 @@ + + + +
Kubernetes  cluster
Kubernetes  cluster
HTTPS
HTTPS
Ingress
Ingress
UDP/RTP
UDP/RTP
STUN/TURN
STUN/TURN
\ No newline at end of file diff --git a/docs/img/stunner_neko.svg b/docs/img/stunner_neko.svg new file mode 100644 index 00000000..961ab1a1 --- /dev/null +++ b/docs/img/stunner_neko.svg @@ -0,0 +1,1169 @@ + +audio/videouser input (keys pressed)HTTPS/WebSocket Kubernetes clusterApplication ServerSTUNner AuthenticationServiceAUTH SERVICEHTTPREST diff --git a/docs/index.md b/docs/index.md new file mode 120000 index 00000000..42061c01 --- /dev/null +++ b/docs/index.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/docs/requirements.in b/docs/requirements.in new file mode 100644 index 00000000..e5bcdbf9 --- /dev/null +++ b/docs/requirements.in @@ -0,0 +1,4 @@ +markdown-include +markdown-gfm-admonition +mkdocs +mkdocstrings[python] \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt index f6cb652b..32fc8dc3 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,66 +1,83 @@ # -# This file is autogenerated by pip-compile with python 3.10 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: # # pip-compile docs/requirements.in # -click==8.1.3 - # via mkdocs +click==8.1.7 + # via + # mkdocs + # mkdocstrings +colorama==0.4.6 + # via griffe ghp-import==2.1.0 # via mkdocs -griffe==0.22.0 +griffe==1.5.1 # via mkdocstrings-python -importlib-metadata==4.12.0 - # via mkdocs -jinja2==3.1.2 +jinja2==3.1.4 # via # mkdocs # mkdocstrings -markdown==3.3.7 +markdown==3.7 # via + # markdown-gfm-admonition # markdown-include # mkdocs # mkdocs-autorefs # mkdocstrings # pymdown-extensions -markdown-include==0.6.0 +markdown-gfm-admonition==0.1.1 # via -r docs/requirements.in -markupsafe==2.1.1 +markdown-include==0.8.1 + # via -r docs/requirements.in +markupsafe==3.0.2 # via # jinja2 + # mkdocs + # mkdocs-autorefs # mkdocstrings mergedeep==1.3.4 - # via mkdocs -mkdocs==1.3.0 + # via + # mkdocs + # mkdocs-get-deps +mkdocs==1.6.1 # via # -r docs/requirements.in # mkdocs-autorefs # mkdocstrings -mkdocs-autorefs==0.4.1 - # via mkdocstrings -mkdocstrings[python]==0.19.0 +mkdocs-autorefs==1.2.0 + # via + # mkdocstrings + # mkdocstrings-python +mkdocs-get-deps==0.2.0 + # via mkdocs +mkdocstrings[python]==0.27.0 # via # -r docs/requirements.in # mkdocstrings-python -mkdocstrings-python==0.7.1 +mkdocstrings-python==1.12.2 # via mkdocstrings -packaging==21.3 +packaging==24.2 + # via mkdocs +pathspec==0.12.1 # via mkdocs -pymdown-extensions==9.5 +platformdirs==4.3.6 + # via + # mkdocs-get-deps + # mkdocstrings +pymdown-extensions==10.12 # via mkdocstrings -pyparsing==3.0.9 - # via packaging -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via ghp-import -pyyaml==6.0 +pyyaml==6.0.2 # via # mkdocs + # mkdocs-get-deps + # pymdown-extensions # pyyaml-env-tag pyyaml-env-tag==0.1 # via mkdocs six==1.16.0 # via python-dateutil -watchdog==2.1.9 +watchdog==6.0.0 # via mkdocs -zipp==3.8.0 - # via importlib-metadata diff --git a/go.mod b/go.mod index ae8bb475..3b089f2e 100644 --- a/go.mod +++ b/go.mod @@ -1,79 +1,126 @@ module github.com/l7mp/stunner -go 1.19 +go 1.22.0 + +toolchain go1.22.4 require ( - github.com/fsnotify/fsnotify v1.6.0 - github.com/pion/dtls/v2 v2.2.6 + github.com/deepmap/oapi-codegen/v2 v2.1.0 + github.com/fsnotify/fsnotify v1.7.0 + github.com/getkin/kin-openapi v0.123.0 + github.com/go-logr/logr v1.4.2 + github.com/go-logr/zapr v1.3.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 + github.com/gorilla/websocket v1.5.1 + github.com/oapi-codegen/runtime v1.1.1 + github.com/pion/datachannel v1.5.9 + github.com/pion/dtls/v3 v3.0.3 github.com/pion/logging v0.2.2 - github.com/pion/transport/v2 v2.0.2 - // replace from l7mp/turn - github.com/pion/turn/v2 v2.1.0 - github.com/prometheus/client_golang v1.14.0 + github.com/pion/transport/v3 v3.0.7 + github.com/pion/turn/v4 v4.0.0 + github.com/pion/webrtc/v4 v4.0.1 + github.com/prometheus/client_golang v1.20.4 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 - k8s.io/api v0.24.3 - k8s.io/apimachinery v0.24.3 - sigs.k8s.io/controller-runtime v0.12.3 - sigs.k8s.io/yaml v1.3.0 -) - -require ( - github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb - github.com/pion/randutil v0.1.0 - github.com/pion/transport v0.14.1 - golang.org/x/sys v0.6.0 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/exporters/prometheus v0.53.0 + go.opentelemetry.io/otel/metric v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/sdk/metric v1.31.0 + go.uber.org/zap v1.26.0 + golang.org/x/sys v0.26.0 + golang.org/x/time v0.5.0 + gonum.org/v1/gonum v0.15.1 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/cli-runtime v0.29.1 + k8s.io/client-go v0.31.3 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + sigs.k8s.io/yaml v1.4.0 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/go-logr/logr v1.2.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pion/stun v0.4.0 // indirect - github.com/pion/udp/v2 v2.0.1 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pion/ice/v4 v4.0.2 // indirect + github.com/pion/interceptor v0.1.37 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.14 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v3 v3.0.4 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - golang.org/x/crypto v0.7.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect - gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/wlynxg/anet v0.0.3 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.starlark.net v0.0.0-20240123142251-f86470692795 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/client-go v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.16.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) // replace github.com/pion/turn/v2 => github.com/l7mp/turn/v2 v2.0.11 diff --git a/go.sum b/go.sum index 156f09b0..f4688bd9 100644 --- a/go.sum +++ b/go.sum @@ -1,723 +1,320 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen/v2 v2.1.0 h1:I/NMVhJCtuvL9x+S2QzZKpSjGi33oDZwPRdemvOZWyQ= +github.com/deepmap/oapi-codegen/v2 v2.1.0/go.mod h1:R1wL226vc5VmCNJUvMyYr3hJMm5reyv25j952zAVXZ8= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= +github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb h1:tsEKRC3PU9rMw18w/uAptoijhgG4EvlA5kfJPtwrMDk= -github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb/go.mod h1:NtmN9h8vrTveVQRLHcX2HQ5wIPBDCsZ351TGbZWgg38= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pion/dtls/v2 v2.2.6 h1:yXMxKr0Skd+Ub6A8UqXTRLSywskx93ooMRHsQUtd+Z4= -github.com/pion/dtls/v2 v2.2.6/go.mod h1:t8fWJCIquY5rlQZwA2yWxUS1+OCrAdXrhVKXB5oD/wY= +github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= +github.com/pion/datachannel v1.5.9/go.mod h1:kDUuk4CU4Uxp82NH4LQZbISULkX/HtzKa4P7ldf9izE= +github.com/pion/dtls/v3 v3.0.3 h1:j5ajZbQwff7Z8k3pE3S+rQ4STvKvXUdKsi/07ka+OWM= +github.com/pion/dtls/v3 v3.0.3/go.mod h1:weOTUyIV4z0bQaVzKe8kpaP17+us3yAuiQsEAG1STMU= +github.com/pion/ice/v4 v4.0.2 h1:1JhBRX8iQLi0+TfcavTjPjI6GO41MFn4CeTBX+Y9h5s= +github.com/pion/ice/v4 v4.0.2/go.mod h1:DCdqyzgtsDNYN6/3U8044j3U7qsJ9KFJC92VnOWHvXg= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/stun v0.4.0 h1:vgRrbBE2htWHy7l3Zsxckk7rkjnjOsSM7PHZnBwo8rk= -github.com/pion/stun v0.4.0/go.mod h1:QPsh1/SbXASntw3zkkrIk3ZJVKz4saBY2G7S10P3wCw= -github.com/pion/transport v0.14.1 h1:XSM6olwW+o8J4SCmOBb/BpwZypkHeyM0PGFCxNQBr40= -github.com/pion/transport v0.14.1/go.mod h1:4tGmbk00NeYA3rUa9+n+dzCCoKkcy3YlYb99Jn2fNnI= -github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= -github.com/pion/transport/v2 v2.0.2 h1:St+8o+1PEzPT51O9bv+tH/KYYLMNR5Vwm5Z3Qkjsywg= -github.com/pion/transport/v2 v2.0.2/go.mod h1:vrz6bUbFr/cjdwbnxq8OdDDzHf7JJfGsIRkxfpZoTA0= -github.com/pion/turn/v2 v2.1.0 h1:5wGHSgGhJhP/RpabkUb/T9PdsAjkGLS6toYz5HNzoSI= -github.com/pion/turn/v2 v2.1.0/go.mod h1:yrT5XbXSGX1VFSF31A3c1kCNB5bBZgk/uu5LET162qs= -github.com/pion/udp/v2 v2.0.1 h1:xP0z6WNux1zWEjhC7onRA3EwwSliXqu1ElUZAQhUP54= -github.com/pion/udp/v2 v2.0.1/go.mod h1:B7uvTMP00lzWdyMr/1PVZXtV3wpPIxBRd4Wl6AksXn8= +github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= +github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M= +github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM= +github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA= +github.com/pion/webrtc/v4 v4.0.1 h1:6Unwc6JzoTsjxetcAIoWH81RUM4K5dBc1BbJGcF9WVE= +github.com/pion/webrtc/v4 v4.0.1/go.mod h1:SfNn8CcFxR6OUVjLXVslAQ3a3994JhyE3Hw1jAuqEto= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.starlark.net v0.0.0-20240123142251-f86470692795 h1:LmbG8Pq7KDGkglKVn8VpZOZj6vb9b8nKEGcg9l03epM= +go.starlark.net v0.0.0-20240123142251-f86470692795/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= -gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v5 v5.9.0 h1:hx1VU2SGj4F8r9b8GUwJLdc8DNO8sy79ZGui0G05GLo= +gopkg.in/evanphx/json-patch.v5 v5.9.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY= -k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI= -k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg= -k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= +k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.16.0 h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g= +sigs.k8s.io/kustomize/api v0.16.0/go.mod h1:MnFZ7IP2YqVyVwMWoRxPtgl/5hpA+eCCrQR/866cm5c= +sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= +sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/handlers.go b/handlers.go index 28eb2138..60814cd8 100644 --- a/handlers.go +++ b/handlers.go @@ -7,7 +7,7 @@ import ( "github.com/l7mp/stunner/internal/object" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" a12n "github.com/l7mp/stunner/pkg/authentication" ) @@ -21,36 +21,36 @@ func (s *Stunner) NewAuthHandler() a12n.AuthHandler { auth := s.GetAuth() switch auth.Type { - case v1alpha1.AuthTypePlainText: - auth.Log.Infof("plaintext auth request: username=%q realm=%q srcAddr=%v\n", + case stnrv1.AuthTypeStatic: + auth.Log.Infof("static auth request: username=%q realm=%q srcAddr=%v\n", username, realm, srcAddr) key := a12n.GenerateAuthKey(auth.Username, auth.Realm, auth.Password) if username == auth.Username { - auth.Log.Debug("plaintext auth request: valid username") + auth.Log.Debug("static auth request: valid username") return key, true } - auth.Log.Info("plaintext auth request: failed: invalid username") + auth.Log.Info("static auth request: failed: invalid username") return nil, false - case v1alpha1.AuthTypeLongTerm: - auth.Log.Infof("longterm auth request: username=%q realm=%q srcAddr=%v", + case stnrv1.AuthTypeEphemeral: + auth.Log.Infof("ephemeral auth request: username=%q realm=%q srcAddr=%v", username, realm, srcAddr) if err := a12n.CheckTimeWindowedUsername(username); err != nil { - auth.Log.Infof("longterm auth request: failed: %s", err) + auth.Log.Infof("ephemeral auth request: failed: %s", err) return nil, false } password, err := a12n.GetLongTermCredential(username, auth.Secret) if err != nil { - auth.Log.Infof("longterm auth request: error generating password: %s", + auth.Log.Infof("ephemeral auth request: error generating password: %s", err) return nil, false } - auth.Log.Info("longterm auth request: success") + auth.Log.Info("ephemeral auth request: success") return a12n.GenerateAuthKey(username, auth.Realm, password), true default: @@ -67,14 +67,14 @@ func (s *Stunner) NewPermissionHandler(l *object.Listener) a12n.PermissionHandle return func(src net.Addr, peer net.IP) bool { // need auth for logging - // dynamic: authHandler might have changed behind ur back + // dynamic: authHandler might have changed behind our back auth := s.GetAuth() peerIP := peer.String() - auth.Log.Debugf("permission handler for listener %q: client %q, peer %q", - l.Name, src.String(), peerIP) - clusters := s.clusterManager.Keys() + auth.Log.Debugf("permission handler for listener %q: client %q, peer %q", l.Name, + src.String(), peerIP) + clusters := s.clusterManager.Keys() for _, r := range l.Routes { auth.Log.Tracef("considering route to cluster %q", r) if util.Member(clusters, r) { @@ -114,3 +114,8 @@ func (s *Stunner) NewRealmHandler() object.RealmHandler { return "" } } + +// NewStatusHandler creates a helper function for printing the status of STUNner. +func (s *Stunner) NewStatusHandler() object.StatusHandler { + return func() stnrv1.Status { return s.Status() } +} diff --git a/handlers_test.go b/handlers_test.go index 513a76e0..4dd4fe38 100644 --- a/handlers_test.go +++ b/handlers_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/pion/transport/test" - "github.com/pion/turn/v2" + "github.com/pion/transport/v3/test" + "github.com/pion/turn/v4" "github.com/stretchr/testify/assert" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" "github.com/l7mp/stunner/pkg/logger" ) @@ -39,35 +39,35 @@ func longTermCredentials(username string, sharedSecret string) (string, error) { type StunnerTestAuthWithVnet struct { testName string - conf v1alpha1.StunnerConfig + conf stnrv1.StunnerConfig authCred func() (string, string) clientAddr string } var testStunnerAuthWithVnet = []StunnerTestAuthWithVnet{ { - testName: "plaintext", + testName: "static", clientAddr: "1.1.1.1", - conf: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + conf: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -75,26 +75,26 @@ var testStunnerAuthWithVnet = []StunnerTestAuthWithVnet{ authCred: func() (string, string) { return "user1", "passwd1" }, }, { - testName: "longterm - plain timestamp in username", - conf: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "ephemeral - plain timestamp in username", + conf: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -105,26 +105,26 @@ var testStunnerAuthWithVnet = []StunnerTestAuthWithVnet{ }, }, { - testName: "longterm - timestamp:userid in username", - conf: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "ephemeral - timestamp:userid in username", + conf: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -137,26 +137,26 @@ var testStunnerAuthWithVnet = []StunnerTestAuthWithVnet{ }, }, { - testName: "longterm - userid:timestamp in username", - conf: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "ephemeral - userid:timestamp in username", + conf: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -169,26 +169,26 @@ var testStunnerAuthWithVnet = []StunnerTestAuthWithVnet{ }, }, { - testName: "longterm - userid:timestamp:ramdom-crap in username", - conf: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "ephemeral - userid:timestamp:ramdom-crap in username", + conf: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -230,7 +230,7 @@ func TestStunnerAuthServerVNet(t *testing.T) { }) log.Debug("starting stunnerd") - assert.NoError(t, stunner.Reconcile(c), "starting server") + assert.NoError(t, stunner.Reconcile(&c), "starting server") log.Debug("creating a client") lconn, err := v.wan.ListenPacket("udp4", "0.0.0.0:0") diff --git a/internal/icetester/artifacts.go b/internal/icetester/artifacts.go new file mode 100644 index 00000000..be9c9eb2 --- /dev/null +++ b/internal/icetester/artifacts.go @@ -0,0 +1,398 @@ +package icetester + +import ( + "context" + "fmt" + "strings" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +const ( + icetesterDataplaneName = "icetester-dataplane" +) + +func newICETesterNamespace(name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]any{ + "name": name, + }, + }, + } +} + +func newICETesterGatewayClass(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "gateway.networking.k8s.io/v1", + "kind": "GatewayClass", + "metadata": map[string]any{ + "name": "icetest-gatewayclass", + }, + "spec": map[string]any{ + "controllerName": "stunner.l7mp.io/gateway-operator", + "parametersRef": map[string]any{ + "group": "stunner.l7mp.io", + "kind": "GatewayConfig", + "name": "icetest-gatewayconfig", + "namespace": namespace, + }, + "description": "STUNner is a WebRTC ingress gateway for Kubernetes", + }, + }, + } +} + +func newICETesterGatewayConfig(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "stunner.l7mp.io/v1", + "kind": "GatewayConfig", + "metadata": map[string]any{ + "name": "icetest-gatewayconfig", + "namespace": namespace, + }, + "spec": map[string]any{ + "realm": "icetest.l7mp.io", + "authType": "ephemeral", + "sharedSecret": "icetest-secret", + "dataplane": icetesterDataplaneName, + "logLevel": "all:INFO", + }, + }, + } +} + +func newICETesterUDPGateway(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "gateway.networking.k8s.io/v1", + "kind": "Gateway", + "metadata": map[string]any{ + "name": "icetest-udp-gateway", + "namespace": namespace, + }, + "spec": map[string]any{ + "gatewayClassName": "icetest-gatewayclass", + "listeners": []any{ + map[string]any{ + "name": "icetest-udp-listener", + "port": int64(3478), + "protocol": "TURN-UDP", + }, + }, + }, + }, + } +} + +func newICETesterTCPGateway(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "gateway.networking.k8s.io/v1", + "kind": "Gateway", + "metadata": map[string]any{ + "name": "icetest-tcp-gateway", + "namespace": namespace, + }, + "spec": map[string]any{ + "gatewayClassName": "icetest-gatewayclass", + "listeners": []any{ + map[string]any{ + "name": "icetest-tcp-listener", + "port": int64(3478), + "protocol": "TURN-TCP", + }, + }, + }, + }, + } +} + +func newICETesterUDPRoute(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "stunner.l7mp.io/v1", + "kind": "UDPRoute", + "metadata": map[string]any{ + "name": "icetest-route", + "namespace": namespace, + }, + "spec": map[string]any{ + "parentRefs": []any{ + map[string]any{ + "name": "icetest-udp-gateway", + }, + map[string]any{ + "name": "icetest-tcp-gateway", + }, + }, + "rules": []any{ + map[string]any{ + "backendRefs": []any{ + map[string]any{ + "name": "icetest-backend", + }, + map[string]any{ + "name": "icetest-udp-gateway", + }, + map[string]any{ + "name": "icetest-tcp-gateway", + }, + }, + }, + }, + }, + }, + } +} + +func newICETesterBackendPod(namespace, iceTesterImage string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "/v1", + "kind": "Pod", + "metadata": map[string]any{ + "name": "icetest-backend", + "namespace": namespace, + "labels": map[string]any{ + "app": "icetester", + }, + }, + "spec": map[string]any{ + "containers": []any{ + map[string]any{ + "name": "icetester", + "image": iceTesterImage, + "command": []any{"icetester"}, + "args": []any{"-l", "all:DEBUG"}, + }, + }, + }, + }, + } +} + +func newICETesterBackendService(namespace string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "v1", + "kind": "Service", + "metadata": map[string]any{ + "name": "icetest-backend", + "namespace": namespace, + "labels": map[string]any{ + "app": "icetester", + }, + }, + "spec": map[string]any{ + "ports": []any{ + map[string]any{ + "name": "whip-port", + "port": int64(v1.DefaultICETesterPort), + "protocol": "TCP", + }, + }, + "selector": map[string]any{ + "app": "icetester", + }, + }, + }, + } +} + +func newICETesterICETesterResources(ns, iceTesterImage string) []*unstructured.Unstructured { + return []*unstructured.Unstructured{ + newICETesterGatewayClass(ns), + newICETesterGatewayConfig(ns), + newICETesterUDPGateway(ns), + newICETesterTCPGateway(ns), + newICETesterUDPRoute(ns), + newICETesterBackendPod(ns, iceTesterImage), + newICETesterBackendService(ns), + } +} + +func (t *iceTester) makeDataplane(ctx context.Context) (*unstructured.Unstructured, error) { + obj := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "stunner.l7mp.io/v1", + "kind": "Dataplane", + "metadata": map[string]any{ + "name": "default", + }, + }, + } + d, err := t.get(ctx, obj, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("Failed to query default Dataplane: %w", err) + } + + // customize + d.SetName(icetesterDataplaneName) + d.SetResourceVersion("") + for _, s := range []struct { + path []string + value any + }{ + {path: []string{"spec", "terminationGracePeriodSeconds"}, value: int64(0)}, + {path: []string{"spec", "resources", "requests", "cpu"}, value: "250m"}, + {path: []string{"spec", "resources", "requests", "memory"}, value: "256Mi"}, + {path: []string{"spec", "resources", "limits", "cpu"}, value: "250m"}, + {path: []string{"spec", "resources", "limits", "memory"}, value: "256Mi"}, + } { + if err := unstructured.SetNestedField(d.Object, s.value, s.path...); err != nil { + return nil, fmt.Errorf("Failed to set field <%s> to %s: %w", + strings.Join(s.path, "."), s.value, err) + } + } + + if err := t.createOrUpdate(ctx, d, metav1.GetOptions{}, metav1.CreateOptions{}, metav1.UpdateOptions{}); err != nil { + return nil, fmt.Errorf("Failed to create/update tester Dataplane: %w", err) + } + + return d, nil +} + +// k8s client funcs +func (t *iceTester) create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions) error { + gvr, err := getGVR(t.k8sConfig, obj) + if err != nil { + return err + } + + if obj.GetNamespace() != "" { + // For namespaced resources + _, err = t.Resource(gvr).Namespace(obj.GetNamespace()).Create(ctx, obj, opts) + } else { + // For cluster-scoped resources + _, err = t.Resource(gvr).Create(ctx, obj, opts) + } + + return err +} + +func (t *iceTester) get(ctx context.Context, obj *unstructured.Unstructured, opts metav1.GetOptions) (*unstructured.Unstructured, error) { + gvr, err := getGVR(t.k8sConfig, obj) + if err != nil { + return nil, err + } + + var getObj *unstructured.Unstructured + if obj.GetNamespace() != "" { + // For namespaced resources + getObj, err = t.Resource(gvr).Namespace(obj.GetNamespace()).Get(ctx, obj.GetName(), opts) + } else { + // For cluster-scoped resources + getObj, err = t.Resource(gvr).Get(ctx, obj.GetName(), opts) + } + + return getObj, err +} + +func (t *iceTester) getCRD(ctx context.Context, name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) { + gvr := schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + } + + return t.Resource(gvr).Get(ctx, name, opts) +} + +func (t *iceTester) list(ctx context.Context, obj *unstructured.Unstructured, opts metav1.ListOptions) ([]*unstructured.Unstructured, error) { + gvr, err := getGVR(t.k8sConfig, obj) + if err != nil { + return nil, err + } + + var listObj *unstructured.UnstructuredList + if obj.GetNamespace() != "" { + // For namespaced resources + listObj, err = t.Resource(gvr).Namespace(obj.GetNamespace()).List(ctx, opts) + } else { + // For cluster-scoped resources + listObj, err = t.Resource(gvr).List(ctx, opts) + } + if err != nil { + return nil, err + } + + var list = make([]*unstructured.Unstructured, len(listObj.Items)) + for i, o := range listObj.Items { + list[i] = &o + } + + return list, err +} + +func (t *iceTester) delete(ctx context.Context, obj *unstructured.Unstructured, opts metav1.DeleteOptions) error { + gvr, err := getGVR(t.k8sConfig, obj) + if err != nil { + return err + } + + if obj.GetNamespace() != "" { + // For namespaced resources + return t.Resource(gvr).Namespace(obj.GetNamespace()).Delete(ctx, obj.GetName(), opts) + } else { + // For cluster-scoped resources + return t.Resource(gvr). + Delete(ctx, obj.GetName(), opts) + } +} + +func (t *iceTester) update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { + gvr, err := getGVR(t.k8sConfig, obj) + if err != nil { + return nil, err + } + + current, err := t.get(ctx, obj, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // Copy the ResourceVersion to ensure proper update + obj.SetResourceVersion(current.GetResourceVersion()) + + // Perform the update + if obj.GetNamespace() != "" { + return t.Resource(gvr).Namespace(obj.GetNamespace()).Update(ctx, obj, opts) + } + + return t.Resource(gvr).Update(ctx, obj, opts) +} + +func (t *iceTester) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, gopts metav1.GetOptions, copts metav1.CreateOptions, uopts metav1.UpdateOptions) error { + if _, err := t.get(ctx, obj, gopts); err != nil { + if apierrors.IsNotFound(err) { + return t.create(ctx, obj, copts) + } else { + _, err := t.update(ctx, obj, uopts) + return err + } + } else { + return err + } +} + +func (t *iceTester) safelyRemove(ctx context.Context, obj *unstructured.Unstructured, gopts metav1.GetOptions, dopts metav1.DeleteOptions) error { + _ = t.delete(ctx, obj, dopts) // nolint:errcheck + return eventually(ctx, func(ctx context.Context) (bool, error) { + if _, err := t.get(ctx, obj, gopts); err != nil && apierrors.IsNotFound(err) { + return true, nil + } else { + return false, err + } + }, 30*time.Second, 250*time.Millisecond) +} diff --git a/internal/icetester/events.go b/internal/icetester/events.go new file mode 100644 index 00000000..14b64a25 --- /dev/null +++ b/internal/icetester/events.go @@ -0,0 +1,75 @@ +package icetester + +import "time" + +// ICE test steps: +// 1. Check install: CDS server and API service available, GW resources and icetest-backend created +// 2. Wait until a GW public IP becomes available: emit one event for all configured TURN protocols +// 3. Run ICE test: emit one event per each configured TURN protocol +// 4. Delete K8s resources: Un-apply static manifests + +type EventType int + +const ( + EventInit EventType = iota + EventInstallationComplete + EventGatewayAvailable + EventICEConfigAvailable + EventAsymmetricICETest + EventSymmetricICETest +) + +func (t EventType) String() string { + switch t { + case EventInit: + return "Initializing" + case EventInstallationComplete: + return "Checking installation" + case EventGatewayAvailable: + return "Checking Gateway" + case EventICEConfigAvailable: + return "Obtaining ICE server configuration" + case EventAsymmetricICETest: + return "Running asymmetric ICE test" + case EventSymmetricICETest: + return "Running symmetric ICE test" + default: + return "N/A" + } +} + +type Event struct { + Type EventType + Error error + Diagnostics string + Args map[string]any + Timestamp time.Time + InProgress bool +} + +const ( + // init + diagK8sConfigUnavailable = "The Kubernetes configuration is unavailable. Please check whether kubectl works first." + + diagK8sClientError = "Kubernetes client is dysfunctional or the Kubernetes API server is unreachable. Does kubectl work? Is the Kubernetes context set for the right cluster?" + + diagNamespaceAlreadyExists = "The Kubernetes namespace to be used for the test already exists, most probably due to an earlier unclean exit. The tester refuses to run in such cases in order to avoid interference with existing workload. If you are sure about that the namespace is unused use '--force-cleanup' to remove it before running the test." + + diagFailedToCreateNamespace = "A namespace for running the test could not be created. This either means that the namespace already exists (e.g., if the icetester could not exit cleanly) or you spefified an existing namespace (the tester refuses to run in an existing namespace in order to avoid interfering with the resources existing there), or the current Kubernetes user does not have enough rigts to create a namespace. Does 'kubectl create namespace my-namespace' work?" + + // install + diagFailedToQueryOrCreateArtifacts = "Some Kubernetes resources needed for running the tests could not be queried or created. Typically, this occurs because the Gateway API custom resources or STUNNer's own custom resources have not been installed, or the current Kubernetes user does not have enough rigts to get or create the resource, or some other error occurred." + + diagCDSServerUnavailable = "The STUNner gateway operator is not installed or the installation is incomplete. Is the gateway operator pod running? It is usually called 'stunner-gateway-operator-controller-manager-XXX' in the 'stunner-system', or in the namespace you installed STUNner. What is the pod status?" + + diagAuthServiceUnavailable = "The STUNner auth service is not installed or the installation is incomplete. Is the STUNner authentication server pod running? Search for the service called 'stunner-auth' in the 'stunner-system', or in the namespace you installed STUNner. What is the pod status of the pod called 'stunner-auth-XXX' in the same namespace?" + + diagICETesterBackendUnavailable = "The ICE tester backend is not inserted into the Kubernetes cluster or it is dysfunctional. This is usually a bug in the 'stunnerctl', please file an issue." + + diagCDSServerConnectionFailed = "The STUNner gateway operator is installed but it is dysfunctional. This often occurs because the Gateway API CRDs are missing or are of the wrong version and thus the operator fails to start, or the operator does not have enough RBAC permissions to access the Kubernetes resources it works on." + + diagPublicAddrNotFound = "At least one Gateway could not be exposed on a public IP/port. This is the most typical problem you will see with STUNner: it usually means that the load-balancer integration in your Kubernetes cluster does not work, or, if you are on NodePorts, none of the Kubernetes nodes have a publicly avaalble external IP (look for ExternalIP in your node status)." + + // test + diagICETestFailed = "The ICE test has failed. Check the reported error!" +) diff --git a/internal/icetester/floodtest.go b/internal/icetester/floodtest.go new file mode 100644 index 00000000..bbdee640 --- /dev/null +++ b/internal/icetester/floodtest.go @@ -0,0 +1,136 @@ +package icetester + +import ( + "context" + "encoding/binary" + "net" + "sort" + "sync" + "sync/atomic" + "time" + + "gonum.org/v1/gonum/stat" +) + +const ( + MaxPacketCount = 10000 +) + +type Stats struct { + SendRate float64 // packets per second + LossRate float64 // percentage + MeanLatency float64 // milliseconds + MedianLatency float64 // milliseconds + P95Latency float64 // milliseconds + P99Latency float64 // milliseconds + PacketsSent uint32 + PacketsReceived uint32 + Duration time.Duration +} + +type Packet struct { + SeqNum uint32 + Timestamp int64 +} + +func FloodTest(ctx context.Context, conn net.Conn, interval time.Duration, packetSize int) (*Stats, error) { + // Prepare buffer pool + bufferPool := sync.Pool{ + New: func() interface{} { + return make([]byte, packetSize) + }, + } + + // Stats + received := make(map[uint32]int64) + latencies := make([]float64, 0) + + // Atomic counter for sequence numbers + var currentSeq uint32 + + // Start receiver goroutine + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + buffer := bufferPool.Get().([]byte) + defer bufferPool.Put(buffer) //nolint:staticcheck + + for { + _, err := conn.Read(buffer) + if err != nil { + return + } + + seqNum := binary.BigEndian.Uint32(buffer[0:4]) + timestamp := int64(binary.BigEndian.Uint64(buffer[4:12])) + received[seqNum] = timestamp + latency := float64(time.Now().UnixNano()-timestamp) / float64(time.Millisecond) + latencies = append(latencies, latency) + } + }() + + // Start sender goroutine + wg.Add(1) + var startTime, endTime time.Time + go func() { + defer wg.Done() + startTime = time.Now() + defer func() { endTime = time.Now() }() + + for { + buffer := bufferPool.Get().([]byte) + seq := atomic.AddUint32(¤tSeq, 1) - 1 + + binary.BigEndian.PutUint32(buffer[0:4], seq) + binary.BigEndian.PutUint64(buffer[4:12], uint64(time.Now().UnixNano())) + + // Fill rest of buffer with sequence number for verification + for j := 12; j < packetSize; j++ { + buffer[j] = byte(seq) + } + + _, err := conn.Write(buffer) + if err != nil { + return + } + bufferPool.Put(buffer) //nolint:staticcheck + + if interval != 0 { + select { + case <-time.After(interval): + case <-ctx.Done(): + return + } + } + } + }() + + // Wait for test duration plus a grace period for receiving remaining packets + <-ctx.Done() + conn.Close() // this will stop the goroutines + + wg.Wait() + + // Calculate statistics + packetsSent := atomic.LoadUint32(¤tSeq) + duration := endTime.Sub(startTime) + stats := &Stats{ + PacketsSent: packetsSent, + PacketsReceived: uint32(len(received)), + Duration: duration, + SendRate: float64(packetsSent) / duration.Seconds(), + LossRate: (float64(packetsSent) - float64(len(received))) / float64(packetsSent) * 100, + } + + sort.Float64s(latencies) + if len(latencies) > 0 { + // Convert slice to float64 slice if not already + stats.MeanLatency = stat.Mean(latencies, nil) + stats.MedianLatency = stat.Quantile(0.5, stat.Empirical, latencies, nil) + stats.P95Latency = stat.Quantile(0.95, stat.Empirical, latencies, nil) + stats.P99Latency = stat.Quantile(0.99, stat.Empirical, latencies, nil) + } + + return stats, nil +} diff --git a/internal/icetester/icetester.go b/internal/icetester/icetester.go new file mode 100644 index 00000000..10957b45 --- /dev/null +++ b/internal/icetester/icetester.go @@ -0,0 +1,685 @@ +package icetester + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "slices" + "time" + + "github.com/pion/ice/v4" + "github.com/pion/logging" + "github.com/pion/webrtc/v4" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cliopt "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + v1 "github.com/l7mp/stunner/pkg/apis/v1" + cdsclient "github.com/l7mp/stunner/pkg/config/client" + "github.com/l7mp/stunner/pkg/logger" + "github.com/l7mp/stunner/pkg/whipconn" +) + +const ( + DefaultICETesterImage = "docker.io/l7mp/icetester:latest" + DefaultICETesterTimeout = 5 * time.Minute + DefaultICETesterPacketRate time.Duration = 0 + + floodTestPacketSize = 100 + floodTestTimeout = 20 * time.Second +) + +var ( + crdCheckList = []string{ + // Gateway API + "gatewayclasses.gateway.networking.k8s.io", + "gateways.gateway.networking.k8s.io", + "tcproutes.gateway.networking.k8s.io", + "udproutes.gateway.networking.k8s.io", + // STUNner + "dataplanes.stunner.l7mp.io", + "gatewayconfigs.stunner.l7mp.io", + "staticservices.stunner.l7mp.io", + "udproutes.stunner.l7mp.io", + } +) + +type ICETestType int + +const ( + ICETestAsymmetric ICETestType = iota + ICETestSymmetric +) + +func (t ICETestType) String() string { + switch t { + case ICETestAsymmetric: + return "Asymmetric" + case ICETestSymmetric: + return "Symmetric" + default: + return "N/A" + } +} + +type Config struct { + EventChannel chan Event + + K8sConfigFlags *cliopt.ConfigFlags + CDSConfigFlags *cdsclient.CDSConfigFlags + AuthConfigFlags *cdsclient.AuthConfigFlags + + Namespace string + TURNTransports []v1.ListenerProtocol + ICETesterImage string + ForceCleanup bool + PacketRate int + + Logger logger.LoggerFactory +} + +type iceTester struct { + k8sConfig *rest.Config + *dynamic.DynamicClient + + eventCh chan Event + + k8sConfigFlags *cliopt.ConfigFlags + cdsConfigFlags *cdsclient.CDSConfigFlags + authConfigFlags *cdsclient.AuthConfigFlags + + namespace string + transports []v1.ListenerProtocol + iceTesterImage string + forceCleanup bool + floodTestSendInterval time.Duration + + logger logger.LoggerFactory + log logging.LeveledLogger +} + +func NewICETester(config Config) (*iceTester, error) { + logr := config.Logger + if logr == nil { + logr = logger.NewLoggerFactory("all:INFO") + } + + image := DefaultICETesterImage + if config.ICETesterImage != "" { + image = config.ICETesterImage + } + + var sendInterval time.Duration + if config.PacketRate == 0 { + sendInterval = 0 + } else { + sendInterval = time.Duration(int64(float64(time.Second) / float64(config.PacketRate))) + } + + tester := &iceTester{ + eventCh: config.EventChannel, + + k8sConfigFlags: config.K8sConfigFlags, + cdsConfigFlags: config.CDSConfigFlags, + authConfigFlags: config.AuthConfigFlags, + + namespace: config.Namespace, + transports: config.TURNTransports, + iceTesterImage: image, + forceCleanup: config.ForceCleanup, + floodTestSendInterval: sendInterval, + + logger: logr, + log: logr.NewLogger("icetester"), + } + + return tester, nil +} + +func (t *iceTester) Start(ctx context.Context) error { + log := t.log + + ///////// EventInit in-progress + t.sendEventInit(EventInit, nil) //nolint:errcheck + + log.Infof("Creating a Kubernetes client") + k8sConfig, err := t.k8sConfigFlags.ToRESTConfig() + if err != nil { + return t.sendEventComplete(EventInit, + fmt.Errorf("Error building a Kubernetes config: %w", err), + diagK8sConfigUnavailable, + nil, + ) + } + t.k8sConfig = k8sConfig + + cs, err := dynamic.NewForConfig(k8sConfig) + if err != nil { + return t.sendEventComplete(EventInit, + fmt.Errorf("Error creating a Kubernetes client: %w", err), + diagK8sClientError, + nil, + ) + } + t.DynamicClient = cs + + // log.Infof("Checking basic connectivity") + // apiGroupList := &unstructured.Unstructured{} + // apiGroupList.SetGroupVersionKind(schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "APIGroup"}) + // if _, err := t.get(ctx, apiGroupList, metav1.GetOptions{}); err != nil { + // return t.sendEventComplete(EventInit, + // fmt.Errorf("Failed to query API server: %w", err), + // diagK8sClientError, + // nil, + // ) + // } + + log.Infof("Creating a namespace for testing") + ns := newICETesterNamespace(t.namespace) + if _, err := t.get(ctx, ns, metav1.GetOptions{}); err == nil { + if t.forceCleanup { + if err := t.safelyRemove(ctx, ns, metav1.GetOptions{}, metav1.DeleteOptions{}); err != nil { + return t.sendEventComplete(EventInit, + fmt.Errorf("Failed to clean up tester namespace %s (--force-cleanup enforced): %w", + ns.GetName(), err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + } else { + return t.sendEventComplete(EventInit, + fmt.Errorf("Namespace %s already exists, halting test", ns.GetName()), + diagNamespaceAlreadyExists, + nil, + ) + } + } else if !apierrors.IsNotFound(err) { + return t.sendEventComplete(EventInit, + fmt.Errorf("Error querying testing namespace %s: %w", ns.GetName(), err), + diagFailedToCreateNamespace, + nil, + ) + } + + if err := t.create(ctx, ns, metav1.CreateOptions{}); err != nil { + return t.sendEventComplete(EventInit, + fmt.Errorf("Error creating a namespace for running the tests: %w", err), + diagFailedToCreateNamespace, + nil, + ) + } + defer func() { + // do not use ctx: it might have timed out + if err := t.delete(context.TODO(), ns, metav1.DeleteOptions{}); err != nil { + log.Errorf("Error deleting namespace: %s", err.Error()) + } + }() + + log.Infof("Creating custom Dataplane") + d, err := t.makeDataplane(ctx) + if err != nil { + return t.sendEventComplete(EventInit, + fmt.Errorf("Failed to create custom Dataplane: %w", err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + defer func() { + // do not use ctx: it might have timed out + if err := t.delete(context.TODO(), d, metav1.DeleteOptions{}); err != nil { + log.Errorf("Error deleting custom Dataplane: %s", err.Error()) + } + }() + + ///////// EventInit ready + t.sendEventComplete(EventInit, nil, "", nil) //nolint:errcheck + + ///////// EventInstallationComplete in-progress + t.sendEventInit(EventInstallationComplete, nil) //nolint:errcheck + + log.Infof("Querying CRDs") + for _, crd := range crdCheckList { + if _, err := t.getCRD(ctx, crd, metav1.GetOptions{}); err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Failed to query CRD %s: %w", crd, err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + } + + log.Infof("Inserting tester artifacts") + for _, obj := range newICETesterICETesterResources(t.namespace, t.iceTesterImage) { + if err := t.safelyRemove(ctx, obj, metav1.GetOptions{}, metav1.DeleteOptions{}); err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Failed to clean up resource %s/%s of kind %s: %w", + obj.GetNamespace(), obj.GetName(), obj.GetKind(), err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + + if err := t.create(ctx, obj, metav1.CreateOptions{}); err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Failed to create resource %s/%s of kind %s: %w", + obj.GetNamespace(), obj.GetName(), obj.GetKind(), err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + + log.Debugf("Created resource %s/%s of kind %s", obj.GetNamespace(), + obj.GetName(), obj.GetKind()) + } + defer func() { + for _, obj := range newICETesterICETesterResources(t.namespace, t.iceTesterImage) { + // do not use ctx: it might have timed out + if err := t.delete(context.TODO(), obj, metav1.DeleteOptions{}); err != nil { + log.Errorf("Error deleting resource: %s", err.Error()) + } + } + }() + + log.Infof("Checking tester backend") + iceTesterPod := newICETesterBackendPod(t.namespace, t.iceTesterImage) + if err := eventually(ctx, t.podStatusChecker(iceTesterPod, metav1.GetOptions{}), 30*time.Second, 250*time.Millisecond); err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("ICE tester backend %s/%s not running or ready: %w", + iceTesterPod.GetNamespace(), iceTesterPod.GetName(), err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + + whipEndpoint, err := cdsclient.DiscoverK8sPod(ctx, t.k8sConfigFlags, t.namespace, "app=icetester", v1.DefaultICETesterPort, + t.logger.NewLogger("auth-fwd")) + if err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Error searching for ICE tester backend pod: %w", err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + + log.Info("Searching for CDS server") + cdsPod, err := cdsclient.DiscoverK8sCDSServer(ctx, t.k8sConfigFlags, t.cdsConfigFlags, + t.logger.NewLogger("cds-fwd")) + if err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Error searching for CDS server: %w", err), + diagCDSServerUnavailable, + nil, + ) + } + + log.Info("Searching for authentication service") + authPod, err := cdsclient.DiscoverK8sAuthServer(ctx, t.k8sConfigFlags, t.authConfigFlags, + t.logger.NewLogger("auth-fwd")) + if err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Error searching for auth service: %w", err), + diagAuthServiceUnavailable, + nil, + ) + } + + ///////// EventInstallationComplete ready + t.sendEventComplete(EventInstallationComplete, nil, "", nil) //nolint:errcheck + + ///////// EventGatewayAvailable in-progress + t.sendEventInit(EventGatewayAvailable, nil) //nolint:errcheck + + log.Info("Checking dataplane") + for _, proto := range t.transports { + gw := gwFromProto(proto, t.namespace) + + log.Infof("Checing public address for Gateway %s", gw.GetName()) + cds, err := cdsclient.NewConfigNamespaceNameAPI(cdsPod.Addr, t.namespace, gw.GetName(), + t.logger.NewLogger("cds-client")) + if err != nil { + return t.sendEventComplete(EventGatewayAvailable, + fmt.Errorf("Could not connect to CDS server for obtaning the configuration of Gateway %s: %w", + gw.GetName(), err), + diagCDSServerConnectionFailed, + nil, + ) + } + + if err := eventually(ctx, func(ctx context.Context) (bool, error) { + confs, err := cds.Get(ctx) + if err != nil { + // log.Tracef("Could not get dataplane config: %w", err) + return false, nil + } + + if len(confs) != 1 { + return false, errors.New("Expected exactly one dataplane config") // this should never rhappen + } + + found := false + for _, c := range confs[0].Clusters { + if len(c.Endpoints) != 0 { + found = true + break + } + } + if !found { + // errors.New("No clusters found") + return false, nil // operator not ready yet: retry + + } + + for _, l := range confs[0].Listeners { + if l.PublicAddr == "" || l.PublicPort == 0 { + return false, nil // no public address yet: retry + } + } + + return true, nil + }, 60*time.Second, 250*time.Millisecond); err != nil { + return t.sendEventComplete(EventGatewayAvailable, + fmt.Errorf("Failed to find public address for Gateway %s/%s: %w", t.namespace, gw.GetName(), err), + diagPublicAddrNotFound, + nil, + ) + } + + log.Infof("Checking dataplane pod for Gateway %s/%s", gw.GetNamespace(), gw.GetName()) + // name is unstable, choose by label + opts := metav1.ListOptions{LabelSelector: makeSelector(map[string]string{"app": "my-app", "some-label": "some-value"})} + if err := eventually(ctx, t.podListStatusChecker(opts), 60*time.Second, 250*time.Millisecond); err != nil { + return t.sendEventComplete(EventGatewayAvailable, + fmt.Errorf("Dataplane pod for Gateway %s/%s not running or ready: %w", + iceTesterPod.GetNamespace(), iceTesterPod.GetName(), err), + diagFailedToQueryOrCreateArtifacts, + nil, + ) + } + } + + ///////// EventGatewayAvailable ready + t.sendEventComplete(EventGatewayAvailable, nil, "", nil) //nolint:errcheck + + ///////// EventICEConfigAvailable in-progress + t.sendEventInit(EventICEConfigAvailable, nil) //nolint:errcheck + + iceServers := map[v1.ListenerProtocol]webrtc.Configuration{} + for _, proto := range t.transports { + log.Infof("Testing ICE connection over TURN transport %s", proto.String()) + + gw := gwFromProto(proto, t.namespace) + + log.Infof("Obtaining ICE server config for Gateway %s/%s", t.namespace, gw.GetName()) + if err := eventually(ctx, func(ctx context.Context) (bool, error) { + u := url.URL{ + Scheme: "http", + Host: authPod.Addr, + Path: "/ice", + } + q := u.Query() + q.Set("service", "turn") + q.Set("namespace", t.namespace) + q.Set("gateway", gw.GetName()) + u.RawQuery = q.Encode() + + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return false, fmt.Errorf("Error preparing auth service request: %w", err) + } + + res, err := http.DefaultClient.Do(req) + if err != nil { + return false, fmt.Errorf("Failed to query auth service: %w", err) + } + + if res.StatusCode != http.StatusOK { + return false, nil + // return false, fmt.Errorf("Wrong HTTP status querying auth service: %d", res.StatusCode) // this may return false for a while + } + + var iceconf struct { + ICEServers []webrtc.ICEServer + ICETransportPolicy webrtc.ICETransportPolicy + } + if err := json.NewDecoder(res.Body).Decode(&iceconf); err != nil { // Handle errors + return false, fmt.Errorf("Failed to parse the ICE server config obtained from the auth service: %w", err) + } + + iceServers[proto] = webrtc.Configuration{ + ICEServers: iceconf.ICEServers, + ICETransportPolicy: iceconf.ICETransportPolicy, + } + + return true, nil + }, 10*time.Second, 250*time.Millisecond); err != nil { + return t.sendEventComplete(EventInstallationComplete, + fmt.Errorf("Could not obtain ICE server config for Gateway %s/%s: %w", t.namespace, gw.GetName(), err), + diagCDSServerConnectionFailed, + nil, + ) + } + } + + ///////// EventICEServerConfigAvailable ready + t.sendEventComplete(EventICEConfigAvailable, nil, "", nil) //nolint:errcheck + + for _, iceTestType := range []ICETestType{ICETestAsymmetric, ICETestSymmetric} { + var eventType EventType + switch iceTestType { + case ICETestAsymmetric: + eventType = EventAsymmetricICETest + case ICETestSymmetric: + eventType = EventSymmetricICETest + default: + } + for _, proto := range t.transports { + iceConfig := iceServers[proto] + + ///////// EventICETestComplete in-progress + t.sendEventInit(eventType, map[string]any{"ICETransport": proto.String()}) //nolint:errcheck + + log.Infof("Performing %s ICE test for ICE transport %s", iceTestType.String(), proto.String()) + + listenerConfig := whipconn.Config{} + // in symmetric tests the listener uses the same ICE servers as the dialer + if iceTestType == ICETestSymmetric { + listenerConfig.ICEServers = t.updateICEServerAddr(iceConfig.ICEServers, proto) + listenerConfig.ICETransportPolicy = webrtc.ICETransportPolicyRelay + } + log.Debugf("Setting ICE tester listener config: %#v", listenerConfig) + if err := eventually(ctx, func(ctx context.Context) (bool, error) { + uri := url.URL{ + Scheme: "http", + Host: whipEndpoint.Addr, + Path: "/config", + } + + b, err := json.Marshal(listenerConfig) + if err != nil { + return false, fmt.Errorf("Error preparing config: %w", err) // should never happen + } + + _, err = http.Post(uri.String(), "application/json", bytes.NewReader(b)) + if err != nil { + return false, fmt.Errorf("Failed to POST config: %w", err) // should never happen + } + + // query back + req, err := http.NewRequest(http.MethodGet, uri.String(), nil) + if err != nil { + return false, fmt.Errorf("Error preparing GET request for querying the config: %w", err) + } + req.Header.Add("Content-Type", "application/json") + + res, err := http.DefaultClient.Do(req) + if err != nil { + return false, fmt.Errorf("Failed to GET config: %w", err) + } + + err = json.NewDecoder(res.Body).Decode(&listenerConfig) + if err != nil { + return false, fmt.Errorf("Failed to decode config: %w", err) + } + + return true, nil + }, 10*time.Second, 250*time.Millisecond); err != nil { + return t.sendEventComplete(eventType, + fmt.Errorf("Could not update config on ICE tester backend: %w", err), + diagICETesterBackendUnavailable, + nil, + ) + } + + dialerConfig := listenerConfig + dialerConfig.ICEServers = iceConfig.ICEServers + dialerConfig.ICETransportPolicy = webrtc.ICETransportPolicyRelay + dialerConfig.WHIPEndpoint = whipconn.WhipEndpoint + log.Debugf("Using ICE tester dialer config: %#v", dialerConfig) + + log.Debug("Dialing") + var clientConn net.Conn + if err := eventually(ctx, func(ctx context.Context) (bool, error) { + conn, err := whipconn.NewDialer(dialerConfig, t.logger).DialContext(ctx, whipEndpoint.Addr) + if err != nil { + return false, nil // not fatal, should be retried + } + clientConn = conn + return true, nil + }, 90*time.Second, 1000*time.Millisecond); err != nil { + return t.sendEventComplete(EventAsymmetricICETest, + fmt.Errorf("Could not send WHIP request to ICE tester backend: %w", err), + diagICETestFailed, + map[string]any{"ICETransport": proto.String()}) + } + + localSelected, remoteSelected, err := t.getSelectedICECandidates(clientConn) + if err != nil { + return t.sendEventComplete(eventType, + fmt.Errorf("Failed to find selected ICE candidate pair: %w", err), + diagICETestFailed, + map[string]any{"ICETransport": proto.String()}) + } + localCandidates, remoteCandidates, err := t.getICECandidates(clientConn, localSelected, remoteSelected) + if err != nil { + return t.sendEventComplete(eventType, + fmt.Errorf("Failed to find ICE candidates: %w", err), + diagICETestFailed, + map[string]any{"ICETransport": proto.String()}) + } + + timeout, stop := context.WithTimeout(ctx, floodTestTimeout) + defer stop() // useless + stats, err := FloodTest(timeout, clientConn, t.floodTestSendInterval, floodTestPacketSize) + if err != nil { + return t.sendEventComplete(eventType, + fmt.Errorf("Flood test failed: %w", err), + diagICETestFailed, + map[string]any{"ICETransport": proto.String()}) + } + + // floodtest closes the connection on normal exit, but not on error + clientConn.Close() + + ///////// EventICETestComplete ready + //nolint:errcheck + t.sendEventComplete(eventType, nil, "", map[string]any{ + "ICETransport": proto.String(), + "Stats": stats, + "LocalICECandidates": localCandidates, + "RemoteICECandidates": remoteCandidates, + }) + } + } + + return nil +} + +func (t *iceTester) sendEventInit(typ EventType, args map[string]any) { + t.eventCh <- Event{Type: typ, InProgress: true, Timestamp: time.Now(), Args: args} +} + +func (t *iceTester) sendEventComplete(typ EventType, err error, diag string, args map[string]any) error { + t.eventCh <- Event{Type: typ, Error: err, Timestamp: time.Now(), Diagnostics: diag, Args: args} + return err +} + +func (t *iceTester) getSelectedICECandidates(conn net.Conn) (string, string, error) { + whipconn, ok := conn.(*whipconn.DialerConn) + if !ok { + return "", "", errors.New("failed to cast net.Conn to whipconn") + } + + peerConn := whipconn.GetPeerConnection() + transport := peerConn.SCTP().Transport().ICETransport() + selectedPair, err := transport.GetSelectedCandidatePair() + if err != nil { + return "", "", err + } + + return selectedPair.Local.String(), selectedPair.Remote.String(), nil +} + +// parse from the sdps +type CandidateDesc struct { + Candidate string + Selected bool +} + +func (t *iceTester) getICECandidates(conn net.Conn, localSelected, remoteSelected string) ([]CandidateDesc, []CandidateDesc, error) { + local, remote := []CandidateDesc{}, []CandidateDesc{} + + whipconn, ok := conn.(*whipconn.DialerConn) + if !ok { + return local, remote, errors.New("failed to cast net.Conn to whipconn") + } + + peerConn := whipconn.GetPeerConnection() + + getcands := func(desc *webrtc.SessionDescription, selected string) []CandidateDesc { + ret := []CandidateDesc{} + sdp, err := desc.Unmarshal() + if err != nil { + return ret + } + + for _, m := range sdp.MediaDescriptions { + for _, attr := range m.Attributes { + if attr.IsICECandidate() { + ice, err := ice.UnmarshalCandidate(attr.String()) + if err == nil { + cand := ice.String() + if !ContainsDesc(ret, cand) { // candidates are often dumplicated for some reason + mark := false + if cand == selected { + mark = true + } + ret = append(ret, CandidateDesc{Candidate: cand, Selected: mark}) + } + } + } + } + } + + return ret + } + + local = getcands(peerConn.LocalDescription(), localSelected) + remote = getcands(peerConn.RemoteDescription(), remoteSelected) + + return local, remote, nil +} + +func ContainsDesc(cs []CandidateDesc, c string) bool { + ss := make([]string, len(cs)) + for i, c := range cs { + ss[i] = c.Candidate + } + return slices.Contains(ss, c) +} diff --git a/internal/icetester/utils.go b/internal/icetester/utils.go new file mode 100644 index 00000000..bb271bfe --- /dev/null +++ b/internal/icetester/utils.go @@ -0,0 +1,183 @@ +package icetester + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/pion/webrtc/v4" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + memory "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + + v1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +var turnURIAddrRegexp = regexp.MustCompile(`:(\d+.\d+.\d+.\d+):`) + +func getGVR(config *rest.Config, obj *unstructured.Unstructured) (schema.GroupVersionResource, error) { + dc, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return schema.GroupVersionResource{}, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc)) + + gv, err := schema.ParseGroupVersion(obj.GetAPIVersion()) + if err != nil { + return schema.GroupVersionResource{}, err + } + + gvk := gv.WithKind(obj.GetKind()) + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return schema.GroupVersionResource{}, err + } + + return mapping.Resource, nil +} + +func gwFromProto(proto v1.ListenerProtocol, ns string) *unstructured.Unstructured { + switch proto { + case v1.ListenerProtocolTURNUDP, v1.ListenerProtocolUDP: + return newICETesterUDPGateway(ns) + case v1.ListenerProtocolTCP, v1.ListenerProtocolTURNTCP: + return newICETesterTCPGateway(ns) + default: + return nil + } +} + +// updateICEServerAddr modifies the cluster-side ICE server config for symmetric ICe tests so that +// the backend will be configured with the gateway as a TURN server +func (t *iceTester) updateICEServerAddr(ss []webrtc.ICEServer, proto v1.ListenerProtocol) []webrtc.ICEServer { + // use service as the TURN server address + gw := gwFromProto(proto, t.namespace) + ret := []webrtc.ICEServer{} + for _, s := range ss { + urls := []string{} + for _, u := range s.URLs { + urls = append(urls, turnURIAddrRegexp.ReplaceAllString(u, + fmt.Sprintf(":%s.%s.svc.cluster.local:", gw.GetName(), gw.GetNamespace()))) + } + ret = append(ret, webrtc.ICEServer{ + URLs: urls, + Username: s.Username, + Credential: s.Credential, + }) + } + return ret +} + +func makeSelector(matcher map[string]string) string { + labelSelector := metav1.LabelSelector{MatchLabels: matcher} + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + panic(err) + } + return selector.String() +} + +// checkers +type checkerFunc func(ctx context.Context) (bool, error) + +func eventually(ctx context.Context, condition checkerFunc, timeout, interval time.Duration) error { + timeoutCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + ok, err := condition(ctx) + if err != nil { + return err + } + if ok { + return nil + } + + select { + case <-timeoutCtx.Done(): + return timeoutCtx.Err() + case <-ticker.C: + continue + } + } +} + +func (t *iceTester) podStatusChecker(obj *unstructured.Unstructured, opts metav1.GetOptions) checkerFunc { + return func(ctx context.Context) (bool, error) { + pod, err := t.get(ctx, obj, opts) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, err + } + return false, nil //retry + } + + phase, found, err := unstructured.NestedString(pod.Object, "status", "phase") + if err != nil || !found { + return false, nil // retry + } + + if phase != "Running" { + return false, nil // retry + } + + conditions, found, err := unstructured.NestedSlice(pod.Object, "status", "conditions") + if err != nil || !found { + return false, nil // retry + } + + ready := false + for _, c := range conditions { + condition, ok := c.(map[string]any) + if !ok { + continue + } + if condition["type"] == "Ready" && condition["status"] == "True" { + ready = true + break + } + } + + return ready, nil + } +} + +// check status of all pods in the podlist, best used for querying pods by a label-selector +func (t *iceTester) podListStatusChecker(opts metav1.ListOptions) checkerFunc { + return func(ctx context.Context) (bool, error) { + obj := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "/v1", + "kind": "Pod", + }, + } + + pods, err := t.list(ctx, obj, opts) + if err != nil { + return false, err + } + + for _, p := range pods { + chk := t.podStatusChecker(p, metav1.GetOptions{}) + ok, err := chk(ctx) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + } + + return true, nil + } +} diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 328cc5a7..d4a791e0 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -8,7 +8,7 @@ import ( "github.com/pion/logging" "github.com/l7mp/stunner/internal/object" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // Manager stores STUNner objects @@ -20,7 +20,7 @@ type Manager interface { // Delete deletes the object from the store, may return ErrReturnRequired Delete(o object.Object) error // PrepareReconciliation prepares the reconciliation of the manager - PrepareReconciliation(confs []v1alpha1.Config, stunenerConf v1alpha1.Config) (*ReconciliationState, error) + PrepareReconciliation(confs []stnrv1.Config, stunenerConf stnrv1.Config) (*ReconciliationState, error) // FinishReconciliation finishes the reconciliation from the specified state FinishReconciliation(state *ReconciliationState) error // Keys returns the names iof all objects in the store in alphabetical order, suitable for iteration diff --git a/internal/manager/reconcile.go b/internal/manager/reconcile.go index 37bd3f27..7c8a30b5 100644 --- a/internal/manager/reconcile.go +++ b/internal/manager/reconcile.go @@ -4,12 +4,12 @@ import ( "fmt" "github.com/l7mp/stunner/internal/object" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) type ReconcileJob struct { Object object.Object - NewConfig, OldConfig v1alpha1.Config + NewConfig, OldConfig stnrv1.Config } type ReconciliationState struct { @@ -20,7 +20,7 @@ type ReconciliationState struct { // PrepareReconciliation prepares the reconciliation of the objects handled by the manager and returns a // set of reconciliation jobs to be performed, ErrRestartRequired if the server needs to be // restarted, and an error if the config was not accepted. Configuration must be validated. -func (m *managerImpl) PrepareReconciliation(confs []v1alpha1.Config, stunnerConf v1alpha1.Config) (*ReconciliationState, error) { +func (m *managerImpl) PrepareReconciliation(confs []stnrv1.Config, stunnerConf stnrv1.Config) (*ReconciliationState, error) { m.log.Tracef("preparing reconciliation") state := ReconciliationState{ @@ -134,7 +134,7 @@ func (m *managerImpl) FinishReconciliation(state *ReconciliationState) error { return nil } -func findConfByName(confs []v1alpha1.Config, name string) bool { +func findConfByName(confs []stnrv1.Config, name string) bool { for _, c := range confs { if c.ConfigName() == name { return true diff --git a/internal/object/admin.go b/internal/object/admin.go index 103a2eb8..9bb5c7eb 100644 --- a/internal/object/admin.go +++ b/internal/object/admin.go @@ -2,20 +2,18 @@ package object import ( "context" + "encoding/json" "errors" "fmt" "net" "net/http" "net/url" "strconv" - // "time" "github.com/pion/logging" "github.com/prometheus/client_golang/prometheus/promhttp" - health "github.com/heptiolabs/healthcheck" - - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) const DefaultAdminObjectName = "DefaultAdmin" @@ -26,28 +24,57 @@ type Admin struct { DryRun bool MetricsEndpoint, HealthCheckEndpoint string metricsServer, healthCheckServer *http.Server - health health.Handler + health *http.ServeMux log logging.LeveledLogger } // NewAdmin creates a new Admin object. -func NewAdmin(conf v1alpha1.Config, dryRun bool, rc health.Check, logger logging.LoggerFactory) (Object, error) { - req, ok := conf.(*v1alpha1.AdminConfig) +func NewAdmin(conf stnrv1.Config, dryRun bool, rc ReadinessHandler, status StatusHandler, logger logging.LoggerFactory) (Object, error) { + req, ok := conf.(*stnrv1.AdminConfig) if !ok { - return nil, v1alpha1.ErrInvalidConf + return nil, stnrv1.ErrInvalidConf } admin := Admin{ DryRun: dryRun, - health: health.NewHandler(), + health: http.NewServeMux(), log: logger.NewLogger("stunner-admin"), } admin.log.Tracef("NewAdmin: %s", req.String()) // health checker // liveness probe always succeeds once we got here - admin.health.AddLivenessCheck("server-alive", func() error { return nil }) - admin.health.AddReadinessCheck("server-ready", rc) + admin.health.HandleFunc("/live", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + w.Write([]byte("{}\n")) //nolint:errcheck + }) + // readniness checker calls the checker from the factory + admin.health.HandleFunc("/ready", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + if err := rc(); err != nil { + w.WriteHeader(http.StatusServiceUnavailable) + + w.Write([]byte(fmt.Sprintf("{\"status\":%d,\"message\":\"%s\"}\n", //nolint:errcheck + http.StatusServiceUnavailable, err.Error()))) + } else { + w.WriteHeader(http.StatusOK) + w.Write([]byte(fmt.Sprintf("{\"status\":%d,\"message\":\"%s\"}\n", //nolint:errcheck + http.StatusOK, "READY"))) + } + }) + // status handler returns the status + admin.health.HandleFunc("/status", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + if js, err := json.Marshal(status()); err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(fmt.Sprintf("{\"status\":%d,\"message\":\"%s\"}\n", //nolint:errcheck + http.StatusInternalServerError, err.Error()))) + } else { + w.WriteHeader(http.StatusOK) + w.Write(js) //nolint:errcheck + } + }) if err := admin.Reconcile(req); err != nil && !errors.Is(err, ErrRestartRequired) { return nil, err @@ -58,16 +85,16 @@ func NewAdmin(conf v1alpha1.Config, dryRun bool, rc health.Check, logger logging // Inspect examines whether a configuration change requires a reconciliation (returns true if it // does) or restart (returns ErrRestartRequired). -func (a *Admin) Inspect(old, new, full v1alpha1.Config) (bool, error) { +func (a *Admin) Inspect(old, new, full stnrv1.Config) (bool, error) { return !old.DeepEqual(new), nil } // Reconcile updates the authenticator for a new configuration. Requires a valid reconciliation // request. -func (a *Admin) Reconcile(conf v1alpha1.Config) error { - req, ok := conf.(*v1alpha1.AdminConfig) +func (a *Admin) Reconcile(conf stnrv1.Config) error { + req, ok := conf.(*stnrv1.AdminConfig) if !ok { - return v1alpha1.ErrInvalidConf + return stnrv1.ErrInvalidConf } if err := req.Validate(); err != nil { @@ -96,7 +123,7 @@ func (a *Admin) Reconcile(conf v1alpha1.Config) error { // ObjectName returns the name of the object. func (a *Admin) ObjectName() string { - return v1alpha1.DefaultAdminName + return stnrv1.DefaultAdminName } // ObjectType returns the type of the object. @@ -105,14 +132,14 @@ func (a *Admin) ObjectType() string { } // GetConfig returns the configuration of the running object. -func (a *Admin) GetConfig() v1alpha1.Config { +func (a *Admin) GetConfig() stnrv1.Config { a.log.Tracef("GetConfig") // use a copy when taking the pointer: we don't want anyone downstream messing with our own // copies h := a.HealthCheckEndpoint - return &v1alpha1.AdminConfig{ + return &stnrv1.AdminConfig{ Name: a.Name, LogLevel: a.LogLevel, MetricsEndpoint: a.MetricsEndpoint, @@ -143,7 +170,20 @@ func (a *Admin) Close() error { return nil } -func (a *Admin) reconcileMetrics(req *v1alpha1.AdminConfig) error { +// Status returns the status of the object. +func (a *Admin) Status() stnrv1.Status { + s := stnrv1.AdminStatus{ + Name: a.Name, + LogLevel: a.LogLevel, + MetricsEndpoint: a.MetricsEndpoint, + HealthCheckEndpoint: a.HealthCheckEndpoint, + } + + // add licensing status here + return &s +} + +func (a *Admin) reconcileMetrics(req *stnrv1.AdminConfig) error { a.log.Trace("reconcileMetrics") if a.DryRun { @@ -158,7 +198,7 @@ func (a *Admin) reconcileMetrics(req *v1alpha1.AdminConfig) error { a.log.Tracef("closing metrics server at %s", mEndpoint) if err := a.metricsServer.Shutdown(context.Background()); err != nil { - return fmt.Errorf("error stopping metrics server at %s: %w", + return fmt.Errorf("failed to stop metrics server at %s: %w", mEndpoint, err) } a.metricsServer = nil @@ -205,7 +245,7 @@ end: } // req MUST be validated! -func (a *Admin) reconcileHealthCheck(req *v1alpha1.AdminConfig) error { +func (a *Admin) reconcileHealthCheck(req *stnrv1.AdminConfig) error { a.log.Trace("reconcileHealthCheck") // if req is validated then either @@ -276,23 +316,24 @@ end: // AdminFactory can create now Admin objects type AdminFactory struct { dry bool - rc health.Check + rc ReadinessHandler + status StatusHandler logger logging.LoggerFactory } // NewAdminFactory creates a new factory for Admin objects -func NewAdminFactory(dryRun bool, rc health.Check, logger logging.LoggerFactory) Factory { - return &AdminFactory{dry: dryRun, rc: rc, logger: logger} +func NewAdminFactory(dryRun bool, rc ReadinessHandler, status StatusHandler, logger logging.LoggerFactory) Factory { + return &AdminFactory{dry: dryRun, rc: rc, status: status, logger: logger} } // New can produce a new Admin object from the given configuration. A nil config will create an // empty admin object (useful for creating throwaway objects for, e.g., calling Inpect) -func (f *AdminFactory) New(conf v1alpha1.Config) (Object, error) { +func (f *AdminFactory) New(conf stnrv1.Config) (Object, error) { if conf == nil { return &Admin{}, nil } - return NewAdmin(conf, f.dry, f.rc, f.logger) + return NewAdmin(conf, f.dry, f.rc, f.status, f.logger) } func getHealthAddr(e string) string { @@ -315,7 +356,7 @@ func getHealthAddr(e string) string { port := u.Port() if port == "" { - port = fmt.Sprintf("%d", v1alpha1.DefaultHealthCheckPort) + port = fmt.Sprintf("%d", stnrv1.DefaultHealthCheckPort) } return addr + ":" + port @@ -341,7 +382,7 @@ func getMetricsAddr(e string) (string, string) { port := u.Port() if port == "" { - port = strconv.Itoa(v1alpha1.DefaultMetricsPort) + port = strconv.Itoa(stnrv1.DefaultMetricsPort) } addr = addr + ":" + port diff --git a/internal/object/auth.go b/internal/object/auth.go index cc770bea..aba8e04d 100644 --- a/internal/object/auth.go +++ b/internal/object/auth.go @@ -6,21 +6,21 @@ import ( "github.com/pion/logging" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // Auth is the STUNner authenticator type Auth struct { - Type v1alpha1.AuthType + Type stnrv1.AuthType Realm, Username, Password, Secret string Log logging.LeveledLogger } // NewAuth creates a new authenticator. -func NewAuth(conf v1alpha1.Config, logger logging.LoggerFactory) (Object, error) { - req, ok := conf.(*v1alpha1.AuthConfig) +func NewAuth(conf stnrv1.Config, logger logging.LoggerFactory) (Object, error) { + req, ok := conf.(*stnrv1.AuthConfig) if !ok { - return nil, v1alpha1.ErrInvalidConf + return nil, stnrv1.ErrInvalidConf } auth := Auth{Log: logger.NewLogger("stunner-auth")} @@ -35,15 +35,15 @@ func NewAuth(conf v1alpha1.Config, logger logging.LoggerFactory) (Object, error) // Inspect examines whether a configuration change requires a reconciliation (returns true if it // does) or restart (returns ErrRestartRequired). -func (auth *Auth) Inspect(old, new, full v1alpha1.Config) (bool, error) { +func (auth *Auth) Inspect(old, new, full stnrv1.Config) (bool, error) { return !old.DeepEqual(new), nil } // Reconcile updates the authenticator for a new configuration. -func (auth *Auth) Reconcile(conf v1alpha1.Config) error { - req, ok := conf.(*v1alpha1.AuthConfig) +func (auth *Auth) Reconcile(conf stnrv1.Config) error { + req, ok := conf.(*stnrv1.AuthConfig) if !ok { - return v1alpha1.ErrInvalidConf + return stnrv1.ErrInvalidConf } if err := req.Validate(); err != nil { @@ -51,7 +51,7 @@ func (auth *Auth) Reconcile(conf v1alpha1.Config) error { } // type already validated - atype, _ := v1alpha1.NewAuthType(req.Type) + atype, _ := stnrv1.NewAuthType(req.Type) auth.Log.Debugf("using authentication: %s", atype.String()) @@ -59,10 +59,10 @@ func (auth *Auth) Reconcile(conf v1alpha1.Config) error { auth.Type = atype auth.Realm = req.Realm switch atype { - case v1alpha1.AuthTypePlainText: + case stnrv1.AuthTypeStatic: auth.Username = req.Credentials["username"] auth.Password = req.Credentials["password"] - case v1alpha1.AuthTypeLongTerm: + case stnrv1.AuthTypeEphemeral: auth.Secret = req.Credentials["secret"] } @@ -72,7 +72,7 @@ func (auth *Auth) Reconcile(conf v1alpha1.Config) error { // ObjectName returns the name of the object func (auth *Auth) ObjectName() string { // singleton! - return v1alpha1.DefaultAuthName + return stnrv1.DefaultAuthName } // ObjectType returns the type of the object @@ -81,18 +81,18 @@ func (a *Auth) ObjectType() string { } // GetConfig returns the configuration of the running authenticator -func (auth *Auth) GetConfig() v1alpha1.Config { +func (auth *Auth) GetConfig() stnrv1.Config { auth.Log.Tracef("GetConfig") - r := v1alpha1.AuthConfig{ + r := stnrv1.AuthConfig{ Type: auth.Type.String(), Realm: auth.Realm, Credentials: make(map[string]string), } switch auth.Type { - case v1alpha1.AuthTypePlainText: + case stnrv1.AuthTypeStatic: r.Credentials["username"] = auth.Username r.Credentials["password"] = auth.Password - case v1alpha1.AuthTypeLongTerm: + case stnrv1.AuthTypeEphemeral: r.Credentials["secret"] = auth.Secret } @@ -105,6 +105,11 @@ func (auth *Auth) Close() error { return nil } +// Status returns the status of the object. +func (auth *Auth) Status() stnrv1.Status { + return auth.GetConfig() +} + // AuthFactory can create now Auth objects type AuthFactory struct { logger logging.LoggerFactory @@ -117,7 +122,7 @@ func NewAuthFactory(logger logging.LoggerFactory) Factory { // New can produce a new Auth object from the given configuration. A nil config will create an // empty auth object (useful for creating throwaway objects for, e.g., calling Inpect) -func (f *AuthFactory) New(conf v1alpha1.Config) (Object, error) { +func (f *AuthFactory) New(conf stnrv1.Config) (Object, error) { if conf == nil { return &Auth{}, nil } diff --git a/internal/object/cluster.go b/internal/object/cluster.go index a2dfb3c7..1aec7b03 100644 --- a/internal/object/cluster.go +++ b/internal/object/cluster.go @@ -10,26 +10,27 @@ import ( "github.com/l7mp/stunner/internal/resolver" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // Listener implements a STUNner cluster type Cluster struct { Name string - Type v1alpha1.ClusterType - Protocol v1alpha1.ClusterProtocol - Endpoints []net.IPNet + Type stnrv1.ClusterType + Protocol stnrv1.ClusterProtocol + Endpoints []*util.Endpoint Domains []string Resolver resolver.DnsResolver // for strict DNS - logger logging.LoggerFactory - log logging.LeveledLogger + + logger logging.LoggerFactory + log logging.LeveledLogger } // NewCluster creates a new cluster. -func NewCluster(conf v1alpha1.Config, resolver resolver.DnsResolver, logger logging.LoggerFactory) (Object, error) { - req, ok := conf.(*v1alpha1.ClusterConfig) +func NewCluster(conf stnrv1.Config, resolver resolver.DnsResolver, logger logging.LoggerFactory) (Object, error) { + req, ok := conf.(*stnrv1.ClusterConfig) if !ok { - return nil, v1alpha1.ErrInvalidConf + return nil, stnrv1.ErrInvalidConf } // make sure req.Name is correct @@ -39,14 +40,14 @@ func NewCluster(conf v1alpha1.Config, resolver resolver.DnsResolver, logger logg c := Cluster{ Name: req.Name, - Endpoints: []net.IPNet{}, + Endpoints: []*util.Endpoint{}, Domains: []string{}, Resolver: resolver, logger: logger, log: logger.NewLogger(fmt.Sprintf("stunner-cluster-%s", req.Name)), } - c.log.Tracef("NewCluster: %sv", req.String()) + c.log.Tracef("NewCluster: %s", req.String()) if err := c.Reconcile(req); err != nil && err != ErrRestartRequired { return nil, err @@ -57,15 +58,15 @@ func NewCluster(conf v1alpha1.Config, resolver resolver.DnsResolver, logger logg // Inspect examines whether a configuration change requires a reconciliation (returns true if it // does) or restart (returns ErrRestartRequired). -func (c *Cluster) Inspect(old, new, full v1alpha1.Config) (bool, error) { +func (c *Cluster) Inspect(old, new, full stnrv1.Config) (bool, error) { return !old.DeepEqual(new), nil } // Reconcile updates the authenticator for a new configuration. -func (c *Cluster) Reconcile(conf v1alpha1.Config) error { - req, ok := conf.(*v1alpha1.ClusterConfig) +func (c *Cluster) Reconcile(conf stnrv1.Config) error { + req, ok := conf.(*stnrv1.ClusterConfig) if !ok { - return v1alpha1.ErrInvalidConf + return stnrv1.ErrInvalidConf } if err := req.Validate(); err != nil { @@ -73,45 +74,25 @@ func (c *Cluster) Reconcile(conf v1alpha1.Config) error { } c.log.Tracef("Reconcile: %s", req.String()) - c.Type, _ = v1alpha1.NewClusterType(req.Type) - c.Protocol, _ = v1alpha1.NewClusterProtocol(req.Protocol) + c.Type, _ = stnrv1.NewClusterType(req.Type) + c.Protocol, _ = stnrv1.NewClusterProtocol(req.Protocol) switch c.Type { - case v1alpha1.ClusterTypeStatic: + case stnrv1.ClusterTypeStatic: // remove existing endpoints and start anew c.Endpoints = c.Endpoints[:0] for _, e := range req.Endpoints { // try to parse as a subnet - _, n, err := net.ParseCIDR(e) - if err == nil { - c.Endpoints = append(c.Endpoints, *n) - continue - } - - // try to parse as an IP address - a := net.ParseIP(e) - if a == nil { - c.log.Warnf("cluster %q: invalid endpoint IP: %q, ignoring", c.Name, e) - continue - } - - // add a prefix and reparse - if a.To4() == nil { - e = e + "/128" - } else { - e = e + "/32" - } - - _, n2, err := net.ParseCIDR(e) + ep, err := util.ParseEndpoint(e) if err != nil { - c.log.Warnf("cluster %q: could not convert endpoint %q to CIDR subnet ", + c.log.Warnf("cluster %q: could not parse endpoint %q ", "(ignoring): %s", c.Name, e, err.Error()) - continue } - c.Endpoints = append(c.Endpoints, *n2) + c.Endpoints = append(c.Endpoints, ep) } - case v1alpha1.ClusterTypeStrictDNS: + case stnrv1.ClusterTypeStrictDNS: + // TODO: port-range support for DNS clusters if c.Resolver == nil { return fmt.Errorf("STRICT_DNS cluster %q initialized with no DNS resolver", c.Name) } @@ -145,21 +126,20 @@ func (c *Cluster) ObjectType() string { } // GetConfig returns the configuration of the running cluster. -func (c *Cluster) GetConfig() v1alpha1.Config { - conf := v1alpha1.ClusterConfig{ +func (c *Cluster) GetConfig() stnrv1.Config { + conf := stnrv1.ClusterConfig{ Name: c.Name, Protocol: c.Protocol.String(), Type: c.Type.String(), } switch c.Type { - case v1alpha1.ClusterTypeStatic: + case stnrv1.ClusterTypeStatic: conf.Endpoints = make([]string, len(c.Endpoints)) for i, e := range c.Endpoints { - // e.String() adds a /32 at the end of IPs, remove - conf.Endpoints[i] = strings.TrimRight(e.String(), "/32") + conf.Endpoints[i] = e.String() } - case v1alpha1.ClusterTypeStrictDNS: + case stnrv1.ClusterTypeStrictDNS: conf.Endpoints = make([]string, len(c.Domains)) copy(conf.Endpoints, c.Domains) conf.Endpoints = sort.StringSlice(conf.Endpoints) @@ -173,9 +153,9 @@ func (c *Cluster) Close() error { c.log.Trace("closing cluster") switch c.Type { - case v1alpha1.ClusterTypeStatic: + case stnrv1.ClusterTypeStatic: // do nothing - case v1alpha1.ClusterTypeStrictDNS: + case stnrv1.ClusterTypeStrictDNS: for _, d := range c.Domains { c.Resolver.Unregister(d) } @@ -184,24 +164,37 @@ func (c *Cluster) Close() error { return nil } -// Route decides whwther a peer IP appears among the permitted endpoints of a cluster. +// Status returns the status of the object. +func (c *Cluster) Status() stnrv1.Status { + return c.GetConfig() +} + +// Route decides whether a peer IP appears among the permitted endpoints of a cluster. func (c *Cluster) Route(peer net.IP) bool { - c.log.Tracef("Route: cluster %q of type %s, peer IP: %s", c.Name, c.Type.String(), + return c.Match(peer, 0) +} + +// Match decides whether a peer IP and port matches one of the permitted endpoints of a cluster. If +// port is zero then port-matching is disabled. +func (c *Cluster) Match(peer net.IP, port int) bool { + c.log.Tracef("Match: cluster %q of type %s, peer IP: %s", c.Name, c.Type.String(), peer.String()) switch c.Type { - case v1alpha1.ClusterTypeStatic: + case stnrv1.ClusterTypeStatic: // endpoints are IPNets + c.log.Tracef("route: STATIC cluster with %d endpoints", len(c.Endpoints)) + for _, e := range c.Endpoints { c.log.Tracef("considering endpoint %q", e) - if e.Contains(peer) { + if e.Match(peer, port) { return true } } - case v1alpha1.ClusterTypeStrictDNS: + case stnrv1.ClusterTypeStrictDNS: // endpoints are obtained from the DNS - c.log.Tracef("running STRICT_DNS cluster with domains: [%s]", strings.Join(c.Domains, ", ")) + c.log.Tracef("route: STRICT_DNS cluster with domains: [%s]", strings.Join(c.Domains, ", ")) for _, d := range c.Domains { c.log.Tracef("considering domain %q", d) @@ -213,7 +206,6 @@ func (c *Cluster) Route(peer net.IP) bool { for _, n := range hs { c.log.Tracef("considering IP address %q", n) - if n.Equal(peer) { return true } @@ -237,7 +229,7 @@ func NewClusterFactory(resolver resolver.DnsResolver, logger logging.LoggerFacto // New can produce a new Cluster object from the given configuration. A nil config will create an // empty cluster object (useful for creating throwaway objects for, e.g., calling Inpect) -func (f *ClusterFactory) New(conf v1alpha1.Config) (Object, error) { +func (f *ClusterFactory) New(conf stnrv1.Config) (Object, error) { if conf == nil { return &Cluster{}, nil } diff --git a/internal/object/listener.go b/internal/object/listener.go index bd22f41d..2fb059a1 100644 --- a/internal/object/listener.go +++ b/internal/object/listener.go @@ -9,19 +9,21 @@ import ( "strings" "github.com/pion/logging" - "github.com/pion/transport/v2" - "github.com/pion/turn/v2" + "github.com/pion/transport/v3" + "github.com/pion/turn/v4" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // Listener implements a STUNner listener. type Listener struct { Name, Realm string - Proto v1alpha1.ListenerProtocol + Proto stnrv1.ListenerProtocol Addr net.IP Port, MinPort, MaxPort int + PublicAddr string // for GetConfig() + PublicPort int // for GetConfig() rawAddr string // net.IP.String() may rewrite the string representation Cert, Key []byte Conns []any // either a set of turn.ListenerConfigs or turn.PacketConnConfigs @@ -34,10 +36,10 @@ type Listener struct { } // NewListener creates a new listener. Requires a server restart (returns ErrRestartRequired) -func NewListener(conf v1alpha1.Config, net transport.Net, realmHandler RealmHandler, logger logging.LoggerFactory) (Object, error) { - req, ok := conf.(*v1alpha1.ListenerConfig) +func NewListener(conf stnrv1.Config, net transport.Net, realmHandler RealmHandler, logger logging.LoggerFactory) (Object, error) { + req, ok := conf.(*stnrv1.ListenerConfig) if !ok { - return nil, v1alpha1.ErrInvalidConf + return nil, stnrv1.ErrInvalidConf } // make sure req.Name is correct @@ -46,12 +48,14 @@ func NewListener(conf v1alpha1.Config, net transport.Net, realmHandler RealmHand } l := Listener{ - Name: req.Name, - Net: net, - getRealm: realmHandler, - Conns: []any{}, - logger: logger, - log: logger.NewLogger(fmt.Sprintf("stunner-listener-%s", req.Name)), + Name: req.Name, + PublicAddr: req.PublicAddr, + PublicPort: req.PublicPort, + Net: net, + getRealm: realmHandler, + Conns: []any{}, + logger: logger, + log: logger.NewLogger(fmt.Sprintf("stunner-listener-%s", req.Name)), } l.log.Tracef("NewListener: %s", req.String()) @@ -65,20 +69,20 @@ func NewListener(conf v1alpha1.Config, net transport.Net, realmHandler RealmHand // Inspect examines whether a configuration change requires a reconciliation (returns true if it // does) or restart (returns ErrRestartRequired). -func (l *Listener) Inspect(old, new, full v1alpha1.Config) (bool, error) { - req, ok := new.(*v1alpha1.ListenerConfig) +func (l *Listener) Inspect(old, new, full stnrv1.Config) (bool, error) { + req, ok := new.(*stnrv1.ListenerConfig) if !ok { - return false, v1alpha1.ErrInvalidConf + return false, stnrv1.ErrInvalidConf } - stunnerConf, ok := full.(*v1alpha1.StunnerConfig) + stunnerConf, ok := full.(*stnrv1.StunnerConfig) if !ok { - return false, v1alpha1.ErrInvalidConf + return false, stnrv1.ErrInvalidConf } changed := !old.DeepEqual(req) - proto, _ := v1alpha1.NewListenerProtocol(req.Protocol) + proto, _ := stnrv1.NewListenerProtocol(req.Protocol) cert, err := base64.StdEncoding.DecodeString(req.Cert) if err != nil { return false, fmt.Errorf("invalid TLS certificate: base64-decode error: %w", err) @@ -88,14 +92,12 @@ func (l *Listener) Inspect(old, new, full v1alpha1.Config) (bool, error) { return false, fmt.Errorf("invalid TLS key: base64-decode error: %w", err) } - // the only chance we don't need a restart if only the Routes change + // the only chance we don't need a restart if only the Routes and/or PublicIP/PublicPort change restart := ErrRestartRequired if l.Name == req.Name && // name unchanged (should always be true) l.Proto == proto && // protocol unchanged l.rawAddr == req.Addr && // address unchanged l.Port == req.Port && // ports unchanged - l.MinPort == req.MinRelayPort && - l.MaxPort == req.MaxRelayPort && bytes.Equal(l.Cert, cert) && // TLS creds unchanged bytes.Equal(l.Key, key) { restart = nil @@ -112,10 +114,10 @@ func (l *Listener) Inspect(old, new, full v1alpha1.Config) (bool, error) { } // Reconcile updates a listener. -func (l *Listener) Reconcile(conf v1alpha1.Config) error { - req, ok := conf.(*v1alpha1.ListenerConfig) +func (l *Listener) Reconcile(conf stnrv1.Config) error { + req, ok := conf.(*stnrv1.ListenerConfig) if !ok { - return v1alpha1.ErrInvalidConf + return stnrv1.ErrInvalidConf } l.log.Tracef("Reconcile: %s", req.String()) @@ -124,7 +126,7 @@ func (l *Listener) Reconcile(conf v1alpha1.Config) error { return err } - proto, _ := v1alpha1.NewListenerProtocol(req.Protocol) + proto, _ := stnrv1.NewListenerProtocol(req.Protocol) ipAddr := net.ParseIP(req.Addr) // special-case "localhost" if ipAddr == nil && req.Addr == "localhost" { @@ -137,8 +139,8 @@ func (l *Listener) Reconcile(conf v1alpha1.Config) error { l.Proto = proto l.Addr = ipAddr l.rawAddr = req.Addr - l.Port, l.MinPort, l.MaxPort = req.Port, req.MinRelayPort, req.MaxRelayPort - if proto == v1alpha1.ListenerProtocolTLS || proto == v1alpha1.ListenerProtocolDTLS { + l.Port = req.Port + if proto == stnrv1.ListenerProtocolTURNTLS || proto == stnrv1.ListenerProtocolTURNDTLS { cert, err := base64.StdEncoding.DecodeString(req.Cert) if err != nil { return fmt.Errorf("invalid TLS certificate: base64-decode error: %w", err) @@ -152,6 +154,9 @@ func (l *Listener) Reconcile(conf v1alpha1.Config) error { } l.Realm = l.getRealm() + l.PublicAddr = req.PublicAddr + l.PublicPort = req.PublicPort + l.Routes = make([]string, len(req.Routes)) copy(l.Routes, req.Routes) @@ -176,17 +181,17 @@ func (l *Listener) ObjectType() string { } // GetConfig returns the configuration of the running listener. -func (l *Listener) GetConfig() v1alpha1.Config { +func (l *Listener) GetConfig() stnrv1.Config { // must be sorted! sort.Strings(l.Routes) - c := &v1alpha1.ListenerConfig{ - Name: l.Name, - Protocol: l.Proto.String(), - Addr: l.rawAddr, - Port: l.Port, - MinRelayPort: l.MinPort, - MaxRelayPort: l.MaxPort, + c := &stnrv1.ListenerConfig{ + Name: l.Name, + Protocol: l.Proto.String(), + Addr: l.rawAddr, + Port: l.Port, + PublicAddr: l.PublicAddr, + PublicPort: l.PublicPort, } c.Cert = string(l.Cert) @@ -204,7 +209,7 @@ func (l *Listener) Close() error { for _, c := range l.Conns { switch l.Proto { - case v1alpha1.ListenerProtocolUDP: + case stnrv1.ListenerProtocolTURNUDP: l.log.Tracef("closing %s packet socket at %s", l.Proto.String(), l.Addr) conn, ok := c.(turn.PacketConnConfig) @@ -216,7 +221,7 @@ func (l *Listener) Close() error { if err := conn.PacketConn.Close(); err != nil && !util.IsClosedErr(err) { return err } - case v1alpha1.ListenerProtocolTCP, v1alpha1.ListenerProtocolTLS, v1alpha1.ListenerProtocolDTLS: + case stnrv1.ListenerProtocolTURNTCP, stnrv1.ListenerProtocolTURNTLS, stnrv1.ListenerProtocolTURNDTLS: l.log.Tracef("closing %s listener socket at %s", l.Proto.String(), l.Addr) conn, ok := c.(turn.ListenerConfig) @@ -244,6 +249,11 @@ func (l *Listener) Close() error { return nil } +// Status returns the status of the object. +func (l *Listener) Status() stnrv1.Status { + return l.GetConfig() +} + // /////////// // ListenerFactory can create now Listener objects type ListenerFactory struct { @@ -263,7 +273,7 @@ func NewListenerFactory(net transport.Net, realmHandler RealmHandler, logger log // New can produce a new Listener object from the given configuration. A nil config will create an // empty listener object (useful for creating throwaway objects for, e.g., calling Inpect) -func (f *ListenerFactory) New(conf v1alpha1.Config) (Object, error) { +func (f *ListenerFactory) New(conf stnrv1.Config) (Object, error) { if conf == nil { return &Listener{}, nil } diff --git a/internal/object/object.go b/internal/object/object.go index 5a2a1ec5..c80edab8 100644 --- a/internal/object/object.go +++ b/internal/object/object.go @@ -1,8 +1,6 @@ package object -import ( - "github.com/l7mp/stunner/pkg/apis/v1alpha1" -) +import stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" // Object is the high-level interface for all STUNner objects like listeners, clusters, etc. type Object interface { @@ -11,19 +9,21 @@ type Object interface { // ObjectType returns the type of the object. ObjectType() string // Inspect examines whether a configuration change requires a reconciliation or restart. - Inspect(old, new, full v1alpha1.Config) (bool, error) + Inspect(old, new, full stnrv1.Config) (bool, error) // Reconcile updates the object for a new configuration. - Reconcile(conf v1alpha1.Config) error + Reconcile(conf stnrv1.Config) error // GetConfig returns the configuration of the running authenticator. - GetConfig() v1alpha1.Config + GetConfig() stnrv1.Config // Close closes the object, may return ErrRestartRequired. Close() error + // Status returns the status of the object. + Status() stnrv1.Status } -// Factory can create new objects +// Factory can create new objects. type Factory interface { - // New will spawn a new object from the factory - New(conf v1alpha1.Config) (Object, error) + // New will spawn a new object from the factory. + New(conf stnrv1.Config) (Object, error) } // ReadinessHandler is a callback that allows an object to check the readiness of STUNner. @@ -31,3 +31,6 @@ type ReadinessHandler = func() error // RealmHandler is a callback that allows an object to find out the authentication realm. type RealmHandler = func() string + +// StatusHandler is a callback that allows an object to obtain the status of STUNNer. +type StatusHandler = func() stnrv1.Status diff --git a/internal/resolver/mock.go b/internal/resolver/mock.go index d25aa437..2d2960a0 100644 --- a/internal/resolver/mock.go +++ b/internal/resolver/mock.go @@ -63,5 +63,5 @@ func (m *MockResolver) Lookup(domain string) ([]net.IP, error) { } } - return []net.IP{}, fmt.Errorf("Host %q not found: 3(NXDOMAIN)", domain) + return []net.IP{}, fmt.Errorf("host %q not found: 3(NXDOMAIN)", domain) } diff --git a/internal/resolver/mock_test.go b/internal/resolver/mock_test.go index c24f6484..8e3b0ab4 100644 --- a/internal/resolver/mock_test.go +++ b/internal/resolver/mock_test.go @@ -16,7 +16,7 @@ func TestMockResolver(t *testing.T) { loggerFactory := logger.NewLoggerFactory(resolverTestLoglevel) log := loggerFactory.NewLogger("resolver-test") - log.Debug("setting up the mock DNS") + log.Debug("Setting up the mock DNS") mockDns := NewMockResolver(map[string]([]string){ "stunner.l7mp.io": []string{"1.2.3.4"}, "echo-server.l7mp.io": []string{"1.2.3.5"}, diff --git a/internal/resolver/resolver.go b/internal/resolver/resolver.go index 568cbe96..6b1f8328 100644 --- a/internal/resolver/resolver.go +++ b/internal/resolver/resolver.go @@ -85,13 +85,13 @@ func (r *dnsResolverImpl) Register(domain string) error { // the resolver goroutine func startResolver(e *serviceEntry, log logging.LeveledLogger) { - log.Infof("resolver thread starting for domain %q, DNS update interval: %v", + log.Infof("Resolver thread starting for domain %q, DNS update interval: %v", e.domain, dnsUpdateInterval) if err := doResolve(e); err != nil { - log.Debugf("initial resolution failed for domain %q: %s", e.domain, err.Error()) + log.Debugf("Initial resolution failed for domain %q: %s", e.domain, err.Error()) } - log.Tracef("initial resolution ready for domain %q, found %d endpoints", e.domain, + log.Tracef("Initial resolution ready for domain %q, found %d endpoints", e.domain, len(e.hostNames)) ticker := time.NewTicker(dnsUpdateInterval) @@ -100,15 +100,15 @@ func startResolver(e *serviceEntry, log logging.LeveledLogger) { for { select { case <-e.ctx.Done(): - log.Debugf("resolver thread exiting for domain %q", e.domain) + log.Debugf("Resolver thread exiting for domain %q", e.domain) return case <-ticker.C: - log.Tracef("resolving for domain %q", e.domain) + log.Tracef("Resolving for domain %q", e.domain) if err := doResolve(e); err != nil { - log.Debugf("resolution failed for domain %q: %s", + log.Debugf("Resolution failed for domain %q: %s", e.domain, err.Error()) } - log.Tracef("periodic resolution ready for domain %q, found %d endpoints", e.domain, + log.Tracef("Periodic resolution ready for domain %q, found %d endpoints", e.domain, len(e.hostNames)) } } @@ -119,7 +119,7 @@ func doResolve(e *serviceEntry) error { if e.cname == "" { cname, err := e.resolver.LookupCNAME(e.ctx, e.domain) if err != nil { - return fmt.Errorf("Cannot resolve CNAME for domain %q: %s", + return fmt.Errorf("failed to resolve CNAME for domain %q: %s", e.domain, err.Error()) } e.cname = cname @@ -127,7 +127,7 @@ func doResolve(e *serviceEntry) error { hosts, err := e.resolver.LookupHost(e.ctx, e.domain) if err != nil { - return fmt.Errorf("Cannot resolve CNAME for domain %q: %s", + return fmt.Errorf("failed to resolve CNAME for domain %q: %s", e.domain, err.Error()) } @@ -175,7 +175,7 @@ func (r *dnsResolverImpl) Lookup(domain string) ([]net.IP, error) { e, found := r.register[domain] if !found { - return []net.IP{}, fmt.Errorf("Unknown domain name: %q", domain) + return []net.IP{}, fmt.Errorf("unknown domain name: %q", domain) } e.lock.RLock() diff --git a/internal/telemetry/metrics.go b/internal/telemetry/metrics.go deleted file mode 100644 index 815917e8..00000000 --- a/internal/telemetry/metrics.go +++ /dev/null @@ -1,161 +0,0 @@ -package telemetry - -import ( - // "github.com/pion/logging" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - stunnerNamespace = "stunner" -) - -var ( - promConnLabels = []string{"name"} - promCounterLabels = []string{"name", "direction"} - promListenerPacketsTotal *prometheus.CounterVec - promListenerBytesTotal *prometheus.CounterVec - promListenerConnsTotal *prometheus.CounterVec - promListenerConnsActive *prometheus.GaugeVec - promClusterPacketsTotal *prometheus.CounterVec - promClusterBytesTotal *prometheus.CounterVec - promClusterConnsTotal *prometheus.CounterVec - promClusterConnsActive *prometheus.GaugeVec -) - -func Init() { - // listener stats - promListenerConnsActive = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: stunnerNamespace, - Subsystem: "listener", - Name: "connections", - Help: "Number of active downstream connections at a listener.", - }, promConnLabels) - promListenerConnsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "listener", - Name: "connections_total", - Help: "Number of downstream connections at a listener.", - }, promConnLabels) - promListenerPacketsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "listener", - Name: "packets_total", - Help: "Number of datagrams sent or received at a listener.", - }, promCounterLabels) - promListenerBytesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "listener", - Name: "bytes_total", - Help: "Number of bytes sent or received at a listener.", - }, promCounterLabels) - - prometheus.MustRegister(promListenerPacketsTotal) - prometheus.MustRegister(promListenerBytesTotal) - prometheus.MustRegister(promListenerConnsTotal) - prometheus.MustRegister(promListenerConnsActive) - - // cluster stats - promClusterConnsActive = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: stunnerNamespace, - Subsystem: "cluster", - Name: "connections", - Help: "Number of active upstream connections on behalf of a listener", - }, promConnLabels) - promClusterConnsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "cluster", - Name: "connections_total", - Help: "Number of upstream connections on behalf of a listener.", - }, promConnLabels) - promClusterPacketsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "cluster", - Name: "packets_total", - Help: "Number of datagrams sent to backends or received from backends on behalf of a listener", - }, promCounterLabels) - promClusterBytesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: stunnerNamespace, - Subsystem: "cluster", - Name: "bytes_total", - Help: "Number of bytes sent to backends or received from backends on behalf of a listener.", - }, promCounterLabels) - - prometheus.MustRegister(promClusterPacketsTotal) - prometheus.MustRegister(promClusterBytesTotal) - prometheus.MustRegister(promClusterConnsTotal) - prometheus.MustRegister(promClusterConnsActive) -} - -func Close() { - _ = prometheus.Unregister(promListenerPacketsTotal) - _ = prometheus.Unregister(promListenerBytesTotal) - _ = prometheus.Unregister(promListenerConnsTotal) - _ = prometheus.Unregister(promListenerConnsActive) - _ = prometheus.Unregister(promClusterPacketsTotal) - _ = prometheus.Unregister(promClusterBytesTotal) - _ = prometheus.Unregister(promClusterConnsTotal) - _ = prometheus.Unregister(promClusterConnsActive) -} - -func IncrementPackets(n string, c ConnType, d Direction, count uint64) { - switch c { - case ListenerType: - promListenerPacketsTotal.WithLabelValues(n, d.String()).Add(float64(count)) - case ClusterType: - promClusterPacketsTotal.WithLabelValues(n, d.String()).Add(float64(count)) - } -} - -func IncrementBytes(n string, c ConnType, d Direction, count uint64) { - switch c { - case ListenerType: - promListenerBytesTotal.WithLabelValues(n, d.String()).Add(float64(count)) - case ClusterType: - promClusterBytesTotal.WithLabelValues(n, d.String()).Add(float64(count)) - } -} - -func AddConnection(n string, c ConnType) { - switch c { - case ListenerType: - promListenerConnsActive.WithLabelValues(n).Add(1) - promListenerConnsTotal.WithLabelValues(n).Add(1) - case ClusterType: - promClusterConnsActive.WithLabelValues(n).Add(1) - promClusterConnsTotal.WithLabelValues(n).Add(1) - } -} - -func SubConnection(n string, c ConnType) { - switch c { - case ListenerType: - promListenerConnsActive.WithLabelValues(n).Sub(1) - case ClusterType: - promClusterConnsActive.WithLabelValues(n).Sub(1) - } -} - -// func RegisterMetrics(log logging.LeveledLogger, GetAllocationCount func() float64) { -// AllocActiveGauge = prometheus.NewGaugeFunc( -// prometheus.GaugeOpts{ -// Name: "stunner_allocations_active", -// Help: "Number of active allocations.", -// }, -// GetAllocationCount, -// ) -// if err := prometheus.Register(AllocActiveGauge); err == nil { -// log.Debug("GaugeFunc 'stunner_allocations_active' registered.") -// } else { -// log.Warn("GaugeFunc 'stunner_allocations_active' cannot be registered.") -// } -// } - -// func UnregisterMetrics(log logging.LeveledLogger) { -// if AllocActiveGauge != nil { -// if success := prometheus.Unregister(AllocActiveGauge); success { -// log.Debug("GaugeFunc 'stunner_allocations_active' unregistered.") -// return -// } -// } -// log.Warn("GaugeFunc 'stunner_allocations_active' cannot be unregistered.") -// } diff --git a/internal/telemetry/relay.go b/internal/telemetry/relay.go deleted file mode 100644 index eb7c0a76..00000000 --- a/internal/telemetry/relay.go +++ /dev/null @@ -1,132 +0,0 @@ -package telemetry - -// code adopted from github.com/livekit/pkg/telemetry - -import ( - "errors" - "fmt" - "net" - - "github.com/pion/randutil" - "github.com/pion/transport/v2" - "github.com/pion/transport/v2/stdnet" -) - -var ( - errInvalidName = errors.New("RelayAddressGenerator: Name must be set") - errRelayAddressInvalid = errors.New("RelayAddressGenerator: invalid RelayAddress") - errMinPortNotZero = errors.New("RelayAddressGenerator: MinPort must be not 0") - errMaxPortNotZero = errors.New("RelayAddressGenerator: MaxPort must be not 0") - errListeningAddressInvalid = errors.New("RelayAddressGenerator: invalid ListeningAddress") - errNilConn = errors.New("cannot allocate relay connection") - errMaxRetriesExceeded = errors.New("max retries exceeded when trying to generate new relay connection: MinPort:MaxPort range too small?") - errTodo = errors.New("relay to Net.Conn not implemented") -) - -// RelayAddressGenerator can be used to only allocate connections inside a defined port range. A -// static ip address can be set. -type RelayAddressGenerator struct { - // Name is the name of the listener this relay address generator belongs to. Note that - // packets sent to/received from upstream cluster are reported with the name of the - // *listener* that the packet belongs to, and not the cluster. - Name string - - // RelayAddress is the IP returned to the user when the relay is created. - RelayAddress net.IP - - // MinPort the minimum port to allocate. - MinPort uint16 - // MaxPort the maximum (inclusive) port to allocate. - MaxPort uint16 - - // MaxRetries the amount of tries to allocate a random port in the defined range. - MaxRetries int - - // Rand the random source of numbers. - Rand randutil.MathRandomGenerator - - // Address is passed to Listen/ListenPacket when creating the Relay. - Address string - - // Net is a pion/transport VNet, used for testing. - Net transport.Net -} - -// Validate is called on server startup and confirms the RelayAddressGenerator is properly configured -func (r *RelayAddressGenerator) Validate() error { - if r.Name == "" { - return errInvalidName - } - - if r.Net == nil { - r.Net, _ = stdnet.NewNet() - } - - if r.Rand == nil { - r.Rand = randutil.NewMathRandomGenerator() - } - - if r.MaxRetries == 0 { - r.MaxRetries = 10 - } - - switch { - case r.MinPort == 0: - return errMinPortNotZero - case r.MaxPort == 0: - return errMaxPortNotZero - case r.RelayAddress == nil: - return errRelayAddressInvalid - case r.Address == "": - return errListeningAddressInvalid - default: - return nil - } -} - -// AllocatePacketConn generates a new PacketConn to receive traffic on and the IP/Port to populate -// the allocation response with -func (r *RelayAddressGenerator) AllocatePacketConn(network string, requestedPort int) (net.PacketConn, net.Addr, error) { - if requestedPort != 0 { - conn, err := r.Net.ListenPacket(network, fmt.Sprintf("%s:%d", r.Address, requestedPort)) - if err != nil { - return nil, nil, err - } - - conn = NewPacketConn(conn, r.Name, ClusterType) - - relayAddr, ok := conn.LocalAddr().(*net.UDPAddr) - if !ok { - return nil, nil, errNilConn - } - - relayAddr.IP = r.RelayAddress - return conn, relayAddr, nil - } - - for try := 0; try < r.MaxRetries; try++ { - port := r.MinPort + uint16(r.Rand.Intn(int((r.MaxPort+1)-r.MinPort))) - conn, err := r.Net.ListenPacket(network, fmt.Sprintf("%s:%d", r.Address, port)) - if err != nil { - continue - } - - conn = NewPacketConn(conn, r.Name, ClusterType) - - relayAddr, ok := conn.LocalAddr().(*net.UDPAddr) - if !ok { - return nil, nil, errNilConn - } - - relayAddr.IP = r.RelayAddress - return conn, relayAddr, nil - } - - return nil, nil, errMaxRetriesExceeded -} - -// AllocateConn generates a new Conn to receive traffic on and the IP/Port to populate the -// allocation response with -func (g *RelayAddressGenerator) AllocateConn(network string, requestedPort int) (net.Conn, net.Addr, error) { - return nil, nil, errTodo -} diff --git a/internal/telemetry/statsconn.go b/internal/telemetry/statsconn.go index 4b0cad9a..36ac9665 100644 --- a/internal/telemetry/statsconn.go +++ b/internal/telemetry/statsconn.go @@ -9,13 +9,14 @@ import ( // Listener is a net.Listener that knows how to report to Prometheus. type Listener struct { net.Listener - name string - connType ConnType + name string + connType ConnType + telemetry *Telemetry } // NewListener creates a net.Listener that knows its name and type. -func NewListener(l net.Listener, n string, t ConnType) *Listener { - return &Listener{Listener: l, name: n, connType: t} +func NewListener(l net.Listener, n string, t ConnType, tm *Telemetry) *Listener { + return &Listener{Listener: l, name: n, connType: t, telemetry: tm} } // Accept accepts a new connection on a Listener. @@ -25,28 +26,29 @@ func (l *Listener) Accept() (net.Conn, error) { return nil, err } - return NewConn(conn, l.name, l.connType), nil + return NewConn(conn, l.name, l.connType, l.telemetry), nil } // Conn is a net.Conn that knows how to report to Prometheus. type Conn struct { net.Conn - name string - connType ConnType + name string + connType ConnType + telemetry *Telemetry } // NewConn allocates a stats conn that knows its name and type. -func NewConn(c net.Conn, n string, t ConnType) *Conn { - AddConnection(n, t) - return &Conn{Conn: c, name: n, connType: t} +func NewConn(c net.Conn, n string, t ConnType, tm *Telemetry) *Conn { + tm.AddConnection(n, t) + return &Conn{Conn: c, name: n, connType: t, telemetry: tm} } // Read reads from the Conn. func (c *Conn) Read(b []byte) (n int, err error) { n, err = c.Conn.Read(b) if n > 0 { - IncrementBytes(c.name, c.connType, Incoming, uint64(n)) - IncrementPackets(c.name, c.connType, Incoming, 1) + c.telemetry.IncrementBytes(c.name, c.connType, Incoming, uint64(n)) + c.telemetry.IncrementPackets(c.name, c.connType, Incoming, 1) } return } @@ -55,37 +57,38 @@ func (c *Conn) Read(b []byte) (n int, err error) { func (c *Conn) Write(b []byte) (n int, err error) { n, err = c.Conn.Write(b) if n > 0 { - IncrementBytes(c.name, c.connType, Outgoing, uint64(n)) - IncrementPackets(c.name, c.connType, Outgoing, 1) + c.telemetry.IncrementBytes(c.name, c.connType, Outgoing, uint64(n)) + c.telemetry.IncrementPackets(c.name, c.connType, Outgoing, 1) } return } // Close closes the Conn. func (c *Conn) Close() error { - SubConnection(c.name, c.connType) + c.telemetry.SubConnection(c.name, c.connType) return c.Conn.Close() } // PacketConn is a net.PacketConn that knows how to report to Prometheus. type PacketConn struct { net.PacketConn - name string - connType ConnType + name string + connType ConnType + telemetry *Telemetry } -// NewPacketConn allocates a stats conn that knows its name and type. -func NewPacketConn(c net.PacketConn, n string, t ConnType) *PacketConn { - AddConnection(n, t) - return &PacketConn{PacketConn: c, name: n, connType: t} +// NewPacketConn decorates a PacketConnn with metric reporting. +func NewPacketConn(c net.PacketConn, n string, t ConnType, tm *Telemetry) *PacketConn { + tm.AddConnection(n, t) + return &PacketConn{PacketConn: c, name: n, connType: t, telemetry: tm} } // ReadFrom reads from the PacketConn. func (c *PacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { n, addr, err = c.PacketConn.ReadFrom(p) if n > 0 { - IncrementBytes(c.name, c.connType, Incoming, uint64(n)) - IncrementPackets(c.name, c.connType, Incoming, 1) + c.telemetry.IncrementBytes(c.name, c.connType, Incoming, uint64(n)) + c.telemetry.IncrementPackets(c.name, c.connType, Incoming, 1) } return } @@ -94,8 +97,8 @@ func (c *PacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { func (c *PacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) { n, err = c.PacketConn.WriteTo(p, addr) if n > 0 { - IncrementBytes(c.name, c.connType, Outgoing, uint64(n)) - IncrementPackets(c.name, c.connType, Outgoing, 1) + c.telemetry.IncrementBytes(c.name, c.connType, Outgoing, uint64(n)) + c.telemetry.IncrementPackets(c.name, c.connType, Outgoing, 1) } return } @@ -104,6 +107,6 @@ func (c *PacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) { // WriteTo writes to the PacketConn. // Close closes the PacketConn. func (c *PacketConn) Close() error { - SubConnection(c.name, c.connType) + c.telemetry.SubConnection(c.name, c.connType) return c.PacketConn.Close() } diff --git a/internal/telemetry/telemetry.go b/internal/telemetry/telemetry.go new file mode 100644 index 00000000..4a186c92 --- /dev/null +++ b/internal/telemetry/telemetry.go @@ -0,0 +1,233 @@ +package telemetry + +import ( + "context" + "fmt" + "time" + + "github.com/pion/logging" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + stunnerInstrumentName = "stunner" + closeTimeout = 250 * time.Millisecond +) + +// Callbacks lets the caller to define various callbacks for reporting active metrics from an +// object that cannot be reached from this subpackage. This interface allows to easily add new +// metric reporters. +type Callbacks struct { + // GetAllocationCount should map to the total allocation counter of the server. + GetAllocationCount func() int64 +} + +type Telemetry struct { + sdkmetric.Reader + + meter metric.Meter + provider *sdkmetric.MeterProvider + ctx context.Context + cancel context.CancelFunc + + // Metrics instruments + ListenerPacketsCounter metric.Int64Counter + ListenerBytesCounter metric.Int64Counter + ListenerConnsCounter metric.Int64Counter + ListenerConnsGauge metric.Int64UpDownCounter + ClusterPacketsCounter metric.Int64Counter + ClusterBytesCounter metric.Int64Counter + AllocationsGauge metric.Int64ObservableGauge + + callbacks Callbacks + + log logging.LeveledLogger +} + +func New(callbacks Callbacks, dryRun bool, log logging.LeveledLogger) (*Telemetry, error) { + var reader sdkmetric.Reader + + resource, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceNameKey.String("stunner"))) + if err != nil { + return nil, fmt.Errorf("could not create OTEL resource: %w", err) + } + + if dryRun { + // Use manual collection mode + reader = sdkmetric.NewManualReader() + } else { + // Create a new Prometheus exporter (starts background collection) + exporter, err := prometheus.New() + if err != nil { + return nil, err + } + reader = exporter + } + + // Create a new MeterProvider with the Prometheus exporter + provider := sdkmetric.NewMeterProvider( + sdkmetric.WithResource(resource), + sdkmetric.WithReader(reader), + ) + + ctx, cancel := context.WithCancel(context.Background()) + t := &Telemetry{ + Reader: reader, + meter: provider.Meter(stunnerInstrumentName), + provider: provider, + callbacks: callbacks, + ctx: ctx, + cancel: cancel, + log: log, + } + + if err := t.init(); err != nil { + return nil, err + } + return t, nil +} + +// Close cleanly shuts down the meter provider and blocks until the shutdown cycle is finished or a +// timout expires. +func (t *Telemetry) Close() error { + ctx, cancel := context.WithTimeout(t.ctx, closeTimeout) + defer cancel() + defer t.cancel() + return t.provider.Shutdown(ctx) +} + +func (t *Telemetry) Ctx() context.Context { + return t.ctx +} + +func (t *Telemetry) init() error { + var err error + + // Initialize listener metrics + t.ListenerPacketsCounter, err = t.meter.Int64Counter( + stunnerInstrumentName+"_listener_packets_total", + metric.WithDescription("Number of datagrams sent or received at a listener"), + ) + if err != nil { + return err + } + + t.ListenerBytesCounter, err = t.meter.Int64Counter( + stunnerInstrumentName+"_listener_bytes_total", + metric.WithDescription("Number of bytes sent or received at a listener"), + ) + if err != nil { + return err + } + + t.ListenerConnsCounter, err = t.meter.Int64Counter( + stunnerInstrumentName+"_listener_connections_total", + metric.WithDescription("Number of all downstream connections observed at a listener"), + ) + if err != nil { + return err + } + + t.ListenerConnsGauge, err = t.meter.Int64UpDownCounter( + stunnerInstrumentName+"_listener_connections", + metric.WithDescription("Number of active downstream connections at a listener"), + ) + if err != nil { + return err + } + + // Initialize cluster metrics + t.ClusterPacketsCounter, err = t.meter.Int64Counter( + stunnerInstrumentName+"_cluster_packets_total", + metric.WithDescription("Number of datagrams sent to or received from backends"), + ) + if err != nil { + return err + } + + t.ClusterBytesCounter, err = t.meter.Int64Counter( + stunnerInstrumentName+"_cluster_bytes_total", + metric.WithDescription("Number of bytes sent to or received from backends"), + ) + if err != nil { + return err + } + + t.AllocationsGauge, err = t.meter.Int64ObservableGauge( + stunnerInstrumentName+"_allocations_active", + metric.WithDescription("Number of active allocations"), + ) + if err != nil { + return err + } + + _, err = t.meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(t.AllocationsGauge, t.callbacks.GetAllocationCount()) + return nil + }, + t.AllocationsGauge, + ) + if err != nil { + return err + } + + return nil +} + +func (t *Telemetry) IncrementPackets(n string, c ConnType, d Direction, count uint64) { + attrs := metric.WithAttributes( + attribute.String("name", n), + attribute.String("direction", d.String()), + ) + + switch c { + case ListenerType: + t.ListenerPacketsCounter.Add(t.ctx, int64(count), attrs) + case ClusterType: + t.ClusterPacketsCounter.Add(t.ctx, int64(count), attrs) + } +} + +func (t *Telemetry) IncrementBytes(n string, c ConnType, d Direction, count uint64) { + attrs := metric.WithAttributes( + attribute.String("name", n), + attribute.String("direction", d.String()), + ) + + switch c { + case ListenerType: + t.ListenerBytesCounter.Add(t.ctx, int64(count), attrs) + case ClusterType: + t.ClusterBytesCounter.Add(t.ctx, int64(count), attrs) + } +} + +func (t *Telemetry) AddConnection(n string, c ConnType) { + attrs := metric.WithAttributes(attribute.String("name", n)) + + switch c { + case ListenerType: + t.ListenerConnsGauge.Add(t.ctx, 1, attrs) + t.ListenerConnsCounter.Add(t.ctx, 1, attrs) + case ClusterType: + // Cluster connection metrics are disabled + } +} + +func (t *Telemetry) SubConnection(n string, c ConnType) { + attrs := metric.WithAttributes(attribute.String("name", n)) + + switch c { + case ListenerType: + t.ListenerConnsGauge.Add(t.ctx, -1, attrs) + case ClusterType: + // Cluster connection metrics are disabled + } +} diff --git a/internal/telemetry/tester/tester.go b/internal/telemetry/tester/tester.go new file mode 100644 index 00000000..d24b4a9b --- /dev/null +++ b/internal/telemetry/tester/tester.go @@ -0,0 +1,125 @@ +package tester + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/otel/attribute" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Tester provides testing utilities for OpenTelemetry metrics +type Tester struct { + sdkmetric.Reader + *testing.T +} + +// New creates a new test helper and returns both the helper and a MeterProvider +// that can be used to initialize the system under test +func New(reader sdkmetric.Reader, t *testing.T) *Tester { + return &Tester{Reader: reader, T: t} +} + +// CollectAndCount returns the number of metrics with the given name and attributes +func (h *Tester) CollectAndCount(name string) int { + h.Helper() + + metrics := &metricdata.ResourceMetrics{} + err := h.Collect(context.Background(), metrics) + assert.NoError(h, err, "failed to collect metrics: %v") + + for _, scope := range metrics.ScopeMetrics { + for _, m := range scope.Metrics { + if m.Name != name { + continue + } + + sum, ok := m.Data.(metricdata.Sum[int64]) + assert.True(h, ok, fmt.Sprintf("metric %s is not a Sum", name)) + + return len(sum.DataPoints) + } + } + + return 0 +} + +// CollectAndGetInt returns the value of the metric with given name and attributes. +func (h *Tester) CollectAndGetInt(name string, attrs ...string) int { + h.Helper() + + assert.True(h, len(attrs)%2 == 0, "odd number of attribute key-value pairs") + + metrics := &metricdata.ResourceMetrics{} + err := h.Collect(context.Background(), metrics) + assert.NoError(h, err, "failed to collect metrics: %v") + + for _, scope := range metrics.ScopeMetrics { + for _, m := range scope.Metrics { + if m.Name != name { + continue + } + + sum, ok := m.Data.(metricdata.Sum[int64]) + assert.True(h, ok, fmt.Sprintf("metric %s is not a Sum", name)) + + for _, dp := range sum.DataPoints { + matches := true + for i := 0; i < len(attrs); i += 2 { + if val, ok := dp.Attributes.Value(attribute.Key(attrs[i])); !ok || val.AsString() != attrs[i+1] { + matches = false + break + } + } + if matches { + return int(dp.Value) + } + } + } + } + + return 0 +} + +// CollectAndDump returns the metrics with the given name and attributes as a string. +func (h *Tester) CollectAndDump(name string, attrs ...string) string { + h.Helper() + + metrics := &metricdata.ResourceMetrics{} + if err := h.Collect(context.Background(), metrics); err != nil { + return "" + } + + ret := []string{} + for _, scope := range metrics.ScopeMetrics { + for _, m := range scope.Metrics { + if m.Name != name { + continue + } + + sum, ok := m.Data.(metricdata.Sum[int64]) + if !ok { + return "" + } + + for _, dp := range sum.DataPoints { + matches := true + for i := 0; i < len(attrs); i += 2 { + if val, ok := dp.Attributes.Value(attribute.Key(attrs[i])); !ok || val.AsString() != attrs[i+1] { + matches = false + break + } + } + if matches { + ret = append(ret, fmt.Sprintf("%v", dp)) + } + } + } + } + + return strings.Join(ret, ",") +} diff --git a/internal/util/conn.go b/internal/util/conn.go deleted file mode 100644 index 1479b630..00000000 --- a/internal/util/conn.go +++ /dev/null @@ -1,90 +0,0 @@ -package util - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/pion/transport/v2" -) - -type FileConnAddr struct { - File *os.File -} - -func (s *FileConnAddr) Network() string { return "file" } -func (s *FileConnAddr) String() string { return s.File.Name() } - -type FileConn struct { - file *os.File -} - -func (f *FileConn) Read(b []byte) (n int, err error) { - return f.file.Read(b) -} - -func (f *FileConn) Write(b []byte) (n int, err error) { - return f.file.Write(b) -} - -func (f *FileConn) Close() error { - return f.file.Close() -} - -func (f *FileConn) LocalAddr() net.Addr { - return &FileConnAddr{File: f.file} -} - -func (f *FileConn) RemoteAddr() net.Addr { - return &FileConnAddr{File: f.file} -} - -func (f *FileConn) SetDeadline(t time.Time) error { - return nil -} - -func (f *FileConn) SetReadDeadline(t time.Time) error { - return nil -} - -func (f *FileConn) SetWriteDeadline(t time.Time) error { - return nil -} - -// NewFileConn returns a wrapper that shows an os.File as a net.Conn. -func NewFileConn(file *os.File) net.Conn { - return &FileConn{file: file} -} - -// PacketConnPool is a factory to create pools of related PacketConns, which may either be a set of -// PacketConns bound to the same local IP using SO_REUSEPORT (on unix, under certain circumstances) -// that can do multithreaded readloops, or a single PacketConn as a fallback for non-unic -// architectures and for testing. -type PacketConnPool interface { - // Make creates a PacketConnPool, caller must make sure to close the sockets. - Make(network, address string) ([]net.PacketConn, error) - // Size returns the number of sockets in the pool. - Size() int -} - -// defaultPacketConPool implements a socketpool that consists of only a single socket, used as a fallback for architectures that do not support SO_REUSEPORT or when socket pooling is disabled. -type defaultPacketConnPool struct { - transport.Net -} - -// Make creates a PacketConnPool, caller must make sure to close the sockets. -func (p *defaultPacketConnPool) Make(network, address string) ([]net.PacketConn, error) { - conns := []net.PacketConn{} - - conn, err := p.ListenPacket(network, address) - if err != nil { - return []net.PacketConn{}, fmt.Errorf("failed to create PacketConn at %s "+ - "(REUSEPORT: false): %s", address, err) - } - conns = append(conns, conn) - - return conns, nil -} - -func (p *defaultPacketConnPool) Size() int { return 1 } diff --git a/internal/util/endpoint.go b/internal/util/endpoint.go new file mode 100644 index 00000000..9d5d1961 --- /dev/null +++ b/internal/util/endpoint.go @@ -0,0 +1,106 @@ +package util + +import ( + "fmt" + "net" + "regexp" + "strconv" +) + +var endPointMatcher = regexp.MustCompile("^(.*):<([0-9]+)-([0-9]+)>$") + +// Endpoint is a pair of an IP prefix and a port range. +type Endpoint struct { + prefix net.IPNet + port, endPort int + hasPrefixLen, hasPort bool +} + +// ParseEndpoint parses an endpoint from the canonical format: "[optional slash and prefix length]:= port + } else { + return true + } +} + +func (ep *Endpoint) Network() string { + return ep.prefix.Network() +} + +func (ep *Endpoint) String() string { + ip, portRange := "", "" + if ep.hasPrefixLen { + ip = ep.prefix.String() + } else { + ip = ep.prefix.IP.String() + } + if ep.hasPort { + portRange = fmt.Sprintf(":<%d-%d>", ep.port, ep.endPort) + } + + return ip + portRange +} diff --git a/internal/util/endpoint_test.go b/internal/util/endpoint_test.go new file mode 100644 index 00000000..6ed70274 --- /dev/null +++ b/internal/util/endpoint_test.go @@ -0,0 +1,159 @@ +package util + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +type endpointTest struct { + name, input, output, ipnet string + port, endPort int + success bool +} + +var endpointTester = []endpointTest{ + { + name: "ipv4 - full", + input: "1.2.3.4/16:<1-2>", + output: "1.2.0.0/16:<1-2>", + ipnet: "1.2.0.0/16", + port: 1, + endPort: 2, + success: true, + }, + { + name: "ipv4 - no port", + input: "1.2.3.4/16", + output: "1.2.0.0/16", + ipnet: "1.2.0.0/16", + port: 1, + endPort: 65535, + success: true, + }, + { + name: "ipv4 - no prefix len", + input: "1.2.3.4:<1-2>", + output: "1.2.3.4:<1-2>", + ipnet: "1.2.3.4/32", + port: 1, + endPort: 2, + success: true, + }, + { + name: "ipv4 - no port, no prefix len ", + input: "1.2.3.4", + output: "1.2.3.4", + ipnet: "1.2.3.4/32", + port: 1, + endPort: 65535, + success: true, + }, + { + name: "ipv6 - full", + input: "2001:db8:3333:4444:5555:6666:7777:8888/32:<1-2>", + output: "2001:db8::/32:<1-2>", + ipnet: "2001:db8::/32", + port: 1, + endPort: 2, + success: true, + }, + { + name: "ipv6 - no port", + input: "2001:db8:3333:4444:5555:6666:7777:8888/32", + output: "2001:db8::/32", + ipnet: "2001:db8::/32", + port: 1, + endPort: 65535, + success: true, + }, + { + name: "ipv6 - no prefix len", + input: "2001:db8:3333:4444:5555:6666:7777:8888:<1-2>", + output: "2001:db8:3333:4444:5555:6666:7777:8888:<1-2>", + ipnet: "2001:db8:3333:4444:5555:6666:7777:8888/128", + port: 1, + endPort: 2, + success: true, + }, + { + name: "ipv6 - no port, no prefix len ", + input: "2001:db8:3333:4444:5555:6666:7777:8888", + output: "2001:db8:3333:4444:5555:6666:7777:8888", + ipnet: "2001:db8:3333:4444:5555:6666:7777:8888/128", + port: 1, + endPort: 65535, + success: true, + }, + { + name: "ipv4 - no addr fails ", + input: ":<1-65535>", + success: false, + }, + { + name: "ipv4 - random stuff fails ", + input: "dummy", + success: false, + }, +} + +func TestEndpointParse(t *testing.T) { + for _, c := range endpointTester { + t.Run(c.name, func(t *testing.T) { + ep, err := ParseEndpoint(c.input) + if c.success { + assert.NoError(t, err, "parse") + assert.Equal(t, c.ipnet, ep.prefix.String(), "ip equal") + assert.Equal(t, c.port, ep.port, "port equal") + assert.Equal(t, c.endPort, ep.endPort, "endport equal") + assert.Equal(t, c.output, ep.String(), "output") + } else { + assert.Error(t, err, "parse") + } + + }) + } +} + +type matchTest struct { + name, input, ip string + port int + match, route bool +} + +var matchTester = []matchTest{{ + name: "ipv4 - full - both", + input: "1.2.3.4/16:<1-2>", + ip: "1.2.3.5", + port: 1, + match: true, + route: true, +}, { + name: "ipv4 - full - route", + input: "1.2.3.4/16:<1-2>", + ip: "1.2.4.6", + port: 3, + match: false, + route: true, +}, { + name: "ipv4 - full - neither", + input: "1.2.3.4/16:<1-2>", + ip: "1.3.3.4", + port: 1, + match: false, + route: false, +}} + +func TestRouteMatch(t *testing.T) { + for _, c := range matchTester { + t.Run(c.name, func(t *testing.T) { + ep, err := ParseEndpoint(c.input) + assert.NoError(t, err, "endpoint parse") + ip := net.ParseIP(c.ip) + assert.NotNil(t, ip, "ip parse") + assert.True(t, ep.Contains(ip) == c.route, "route") + assert.True(t, ep.Match(ip, c.port) == c.match, "match") + }) + } +} diff --git a/internal/util/file_conn.go b/internal/util/file_conn.go new file mode 100644 index 00000000..d7b8c5c7 --- /dev/null +++ b/internal/util/file_conn.go @@ -0,0 +1,55 @@ +package util + +import ( + "net" + "os" + "time" +) + +type FileConnAddr struct { + File *os.File +} + +func (s *FileConnAddr) Network() string { return "file" } +func (s *FileConnAddr) String() string { return s.File.Name() } + +type FileConn struct { + file *os.File +} + +func (f *FileConn) Read(b []byte) (n int, err error) { + return f.file.Read(b) +} + +func (f *FileConn) Write(b []byte) (n int, err error) { + return f.file.Write(b) +} + +func (f *FileConn) Close() error { + return f.file.Close() +} + +func (f *FileConn) LocalAddr() net.Addr { + return &FileConnAddr{File: f.file} +} + +func (f *FileConn) RemoteAddr() net.Addr { + return &FileConnAddr{File: f.file} +} + +func (f *FileConn) SetDeadline(t time.Time) error { + return nil +} + +func (f *FileConn) SetReadDeadline(t time.Time) error { + return nil +} + +func (f *FileConn) SetWriteDeadline(t time.Time) error { + return nil +} + +// NewFileConn returns a wrapper that shows an os.File as a net.Conn. +func NewFileConn(file *os.File) net.Conn { + return &FileConn{file: file} +} diff --git a/internal/util/socketpool.go b/internal/util/socketpool.go index 08fd05f1..2c63a6d3 100644 --- a/internal/util/socketpool.go +++ b/internal/util/socketpool.go @@ -1,15 +1,45 @@ -//go:build !linux - package util import ( - "github.com/pion/transport/v2" + "fmt" + "net" + + "github.com/l7mp/stunner/internal/telemetry" + "github.com/pion/transport/v3" ) -// NewPacketConnPool creates a new packet connection pool which is fixed to a single connection, -// used if threadNum is zero or if we are running on top of transport.VNet (which does not support -// reuseport), or if we are on non-unix, see the fallback in socketpool.go. -func NewPacketConnPool(vnet transport.Net, threadNum int) PacketConnPool { - // default to a single socket for vnet or if udp multithreading is disabled - return &defaultPacketConnPool{Net: vnet} +// PacketConnPool is a factory to create pools of related PacketConns, which may either be a set of +// PacketConns bound to the same local IP using SO_REUSEPORT (on unix, under certain circumstances) +// that can do multithreaded readloops, or a single PacketConn as a fallback for non-unic +// architectures and for testing. +type PacketConnPool interface { + // Make creates a PacketConnPool, caller must make sure to close the sockets. + Make(network, address string) ([]net.PacketConn, error) + // Size returns the number of sockets in the pool. + Size() int } + +// defaultPacketConPool implements a socketpool that consists of only a single socket, used as a +// fallback for architectures that do not support SO_REUSEPORT or when socket pooling is disabled. +type defaultPacketConnPool struct { + transport.Net + listenerName string + telemetry *telemetry.Telemetry +} + +// Make creates a PacketConnPool, caller must make sure to close the sockets. +func (p *defaultPacketConnPool) Make(network, address string) ([]net.PacketConn, error) { + conns := []net.PacketConn{} + + conn, err := p.ListenPacket(network, address) + if err != nil { + return []net.PacketConn{}, fmt.Errorf("failed to create PacketConn at %s "+ + "(REUSEPORT: false): %s", address, err) + } + + conn = telemetry.NewPacketConn(conn, p.listenerName, telemetry.ListenerType, p.telemetry) + conns = append(conns, conn) + return conns, nil +} + +func (p *defaultPacketConnPool) Size() int { return 1 } diff --git a/internal/util/socketpool_nonunix.go b/internal/util/socketpool_nonunix.go new file mode 100644 index 00000000..23df79fe --- /dev/null +++ b/internal/util/socketpool_nonunix.go @@ -0,0 +1,20 @@ +//go:build !linux + +package util + +import ( + "github.com/l7mp/stunner/internal/telemetry" + "github.com/pion/transport/v3" +) + +// NewPacketConnPool creates a new packet connection pool which is fixed to a single connection, +// used if threadNum is zero or if we are running on top of transport.VNet (which does not support +// reuseport), or if we are on non-unix, see the fallback in socketpool.go. +func NewPacketConnPool(listenerName string, vnet transport.Net, threadNum int, t *telemetry.Telemetry) PacketConnPool { + // default to a single socket for vnet or if udp multithreading is disabled + return &defaultPacketConnPool{ + Net: vnet, + listenerName: listenerName, + telemetry: t, + } +} diff --git a/internal/util/socketpool_unix.go b/internal/util/socketpool_unix.go index c0710b37..46476256 100644 --- a/internal/util/socketpool_unix.go +++ b/internal/util/socketpool_unix.go @@ -10,25 +10,27 @@ import ( "golang.org/x/sys/unix" - "github.com/pion/transport/v2" - "github.com/pion/transport/v2/stdnet" + "github.com/l7mp/stunner/internal/telemetry" + "github.com/pion/transport/v3" + "github.com/pion/transport/v3/stdnet" ) // unixPacketConPool implements socketpools for unix with full support for SO_REUSEPORT type unixPacketConnPool struct { net.ListenConfig - size int + listenerName string + size int + telemetry *telemetry.Telemetry } // NewPacketConnPool creates a new packet connection pool. Pooling is disabled if threadNum is zero // or if we are running on top of transport.VNet (which does not support reuseport), or if we are // on non-unix, see the fallback in socketpool.go. -func NewPacketConnPool(vnet transport.Net, threadNum int) PacketConnPool { +func NewPacketConnPool(listenerName string, vnet transport.Net, threadNum int, t *telemetry.Telemetry) PacketConnPool { // default to a single socket for vnet or if udp multithreading is disabled _, ok := vnet.(*stdnet.Net) if ok && threadNum > 0 { return &unixPacketConnPool{ - size: threadNum, ListenConfig: net.ListenConfig{ Control: func(network, address string, conn syscall.RawConn) error { var operr error @@ -42,9 +44,12 @@ func NewPacketConnPool(vnet transport.Net, threadNum int) PacketConnPool { return operr }, }, + size: threadNum, + listenerName: listenerName, + telemetry: t, } } else { - return &defaultPacketConnPool{Net: vnet} + return &defaultPacketConnPool{listenerName: listenerName, Net: vnet, telemetry: t} } } @@ -59,6 +64,7 @@ func (p *unixPacketConnPool) Make(network, address string) ([]net.PacketConn, er return []net.PacketConn{}, fmt.Errorf("failed to create PacketConn "+ "%d at %s (REUSEPORT: %t): %s", i, address, (p.size > 0), err) } + conn = telemetry.NewPacketConn(conn, p.listenerName, telemetry.ListenerType, p.telemetry) conns = append(conns, conn) } diff --git a/mkdocs.yml b/mkdocs.yml index 1882d45e..9e401a58 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,7 +9,6 @@ theme: name: readthedocs highlightjs: true nav: -- 'README.md' - 'Why STUNner': 'WHY.md' - 'Deployment models': 'DEPLOYMENT.md' - 'Concepts': 'CONCEPTS.md' @@ -19,21 +18,26 @@ nav: - 'Authentication': 'AUTH.md' - 'Scaling': 'SCALING.md' - 'Monitoring': 'MONITORING.md' -- 'Standalone mode': 'OBSOLETE.md' - 'Examples': + - 'Configuring TLS for examples': 'examples/TLS.md' + - 'UDP echo': 'examples/udp-echo/README.md' - 'Simple tunnel': 'examples/simple-tunnel/README.md' - 'Direct one to one call': 'examples/direct-one2one-call/README.md' - - 'Kurento one to one call': 'examples/kurento-one2one-call/README.md' - - 'Kurento Magic Mirror': 'examples/kurento-magic-mirror/README.md' - 'LiveKit': 'examples/livekit/README.md' + - 'Janus': 'examples/janus/README.md' + - 'Elixir WebRTC': 'examples/elixir-webrtc/README.md' - 'Jitsi': 'examples/jitsi/README.md' - - 'Cloud Retro': 'examples/cloudretro/README.md' + - 'mediasoup': 'examples/mediasoup/README.md' + - 'CloudRetro': 'examples/cloudretro/README.md' - 'N.eko': 'examples/neko/README.md' + - 'Kurento one to one call': 'examples/kurento-one2one-call/README.md' + - 'Kurento Magic Mirror': 'examples/kurento-magic-mirror/README.md' - 'Benchmark': 'examples/benchmark/README.md' - 'CMD': - 'stunnerctl': 'cmd/stunnerctl.md' - 'stunnerd': 'cmd/stunnerd.md' - 'turncat': 'cmd/turncat.md' + - 'icetester': 'cmd/icetester.md' plugins: - search - mkdocstrings: @@ -46,3 +50,11 @@ markdown_extensions: - markdown_include.include: base_path: . - admonition + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + - gfm_admonition diff --git a/pkg/apis/v1alpha1/admin.go b/pkg/apis/v1/admin.go similarity index 64% rename from pkg/apis/v1alpha1/admin.go rename to pkg/apis/v1/admin.go index 9bfdce7a..9f95ff38 100644 --- a/pkg/apis/v1alpha1/admin.go +++ b/pkg/apis/v1/admin.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1 import ( "fmt" @@ -9,7 +9,7 @@ import ( // AdminConfig holds the administrative configuration. type AdminConfig struct { - // Name is the name of the server, optional. + // Name of the server. Default is "default-stunnerd". Name string `json:"name,omitempty"` // LogLevel is the desired log verbosity, e.g.: "stunner:TRACE,all:INFO". Default is // "all:INFO". @@ -21,15 +21,14 @@ type AdminConfig struct { // HealthCheckEndpoint is the URI of the form `http://address:port` exposed for external // HTTP health-checking. A liveness probe responder will be exposed on path `/live` and // readiness probe on path `/ready`. The scheme (`http://`) is mandatory, and if no port is - // specified then the default port is 8086. If pointer value nil then the default is to - // enable health-checking at `http://0.0.0.0:8086`, set to a pointer to an enpty string if - // you want to disable health-checking. + // specified then the default port is 8086. If ignored, then the default is to enable + // health-checking at `http://0.0.0.0:8086`. Set to a pointer to an empty string to disable + // health-checking. HealthCheckEndpoint *string `json:"healthcheck_endpoint,omitempty"` } // Validate checks a configuration and injects defaults. func (req *AdminConfig) Validate() error { - //FIXME: no validation for loglevel (we'd need to create a new logger and it's not worth) if req.LogLevel == "" { req.LogLevel = DefaultLogLevel } @@ -39,7 +38,7 @@ func (req *AdminConfig) Validate() error { } if req.MetricsEndpoint != "" { - //metrics endpoint set: validate. empty string is valid + //Metrics endpoint set: validate. The empty string is valid if _, err := url.Parse(req.MetricsEndpoint); err != nil { return fmt.Errorf("invalid metric server endpoint URL %s: %s", req.MetricsEndpoint, err.Error()) @@ -47,11 +46,11 @@ func (req *AdminConfig) Validate() error { } if req.HealthCheckEndpoint == nil { - // no healtchcheck endpoint given: use default URL + // No healtchcheck endpoint given: use default URL e := fmt.Sprintf("http://:%d", DefaultHealthCheckPort) req.HealthCheckEndpoint = &e } else { - //healtcheck endpoint set: validate. empty string is valid + // Healtcheck endpoint set: validate. Empty string is valid if _, err := url.Parse(*req.HealthCheckEndpoint); err != nil { return fmt.Errorf("invalid health-check server endpoint URL %s: %s", *req.HealthCheckEndpoint, err.Error()) @@ -63,7 +62,7 @@ func (req *AdminConfig) Validate() error { // Name returns the name of the object to be configured. func (req *AdminConfig) ConfigName() string { - // singleton! + // Singleton! return DefaultAdminName } @@ -75,13 +74,15 @@ func (req *AdminConfig) DeepEqual(other Config) bool { // DeepCopyInto copies a configuration. func (req *AdminConfig) DeepCopyInto(dst Config) { ret := dst.(*AdminConfig) - // admin conf contians primitive types only so this is safe *ret = *req } // String stringifies the configuration. func (req *AdminConfig) String() string { status := []string{} + if req.Name != "" { + status = append(status, fmt.Sprintf("name=%q", req.Name)) + } if req.LogLevel != "" { status = append(status, fmt.Sprintf("logLevel=%q", req.LogLevel)) } @@ -93,3 +94,30 @@ func (req *AdminConfig) String() string { } return fmt.Sprintf("admin:{%s}", strings.Join(status, ",")) } + +// AdminStatus represents the administrative status. +type AdminStatus struct { + Name string `json:"name,omitempty"` + LogLevel string `json:"loglevel,omitempty"` + MetricsEndpoint string `json:"metrics_endpoint,omitempty"` + HealthCheckEndpoint string `json:"healthcheck_endpoint,omitempty"` + // licencing status comes here +} + +// String returns a string reprsentation of the administrative status. +func (a *AdminStatus) String() string { + status := []string{} + if a.LogLevel != "" { + status = append(status, fmt.Sprintf("logLevel=%q", a.LogLevel)) + } + if a.MetricsEndpoint != "" { + status = append(status, fmt.Sprintf("metrics=%q", a.MetricsEndpoint)) + } + if a.HealthCheckEndpoint != "" { + status = append(status, fmt.Sprintf("health-check=%q", a.HealthCheckEndpoint)) + } + + // add licencing status here + + return fmt.Sprintf("%s:{%s}", a.Name, strings.Join(status, ",")) +} diff --git a/pkg/apis/v1/auth.go b/pkg/apis/v1/auth.go new file mode 100644 index 00000000..a46cba87 --- /dev/null +++ b/pkg/apis/v1/auth.go @@ -0,0 +1,137 @@ +package v1 + +import ( + "fmt" + "reflect" + "strings" +) + +// Auth specifies the STUN/TURN authentication mechanism used by STUNner. +type AuthConfig struct { + // Type of the STUN/TURN authentication mechanism ("static" or "ephemeral"). The deprecated + // type name "plaintext" is accepted for "static" and the deprecated type name "longterm" + // is accepted for "ephemeral" for compatibility with older versions. + Type string `json:"type,omitempty"` + // Realm defines the STUN/TURN authentication realm. + Realm string `json:"realm,omitempty"` + // Credentials specifies the authententication credentials: for "static" at least the keys + // "username" and "password" must be set, for "ephemeral" the key "secret" specifying the + // shared authentication secret must be set. + Credentials map[string]string `json:"credentials"` +} + +// Validate checks a configuration and injects defaults. +func (req *AuthConfig) Validate() error { + if req.Type == "" { + req.Type = DefaultAuthType + } + + // Normalize + atype, err := NewAuthType(req.Type) + if err != nil { + return err + } + req.Type = atype.String() + + switch atype { + case AuthTypeStatic: + _, userFound := req.Credentials["username"] + _, passFound := req.Credentials["password"] + if !userFound || !passFound { + return fmt.Errorf("%s: empty username or password", atype.String()) + } + + case AuthTypeEphemeral: + _, secretFound := req.Credentials["secret"] + if !secretFound { + return fmt.Errorf("no secret found in %s auth config", atype.String()) + } + default: + return fmt.Errorf("invalid authentication type %q", req.Type) + } + + if req.Realm == "" { + req.Realm = DefaultRealm + } + + if req.Credentials == nil { + req.Credentials = map[string]string{} + } + + return nil +} + +// Name returns the name of the object to be configured. +func (req *AuthConfig) ConfigName() string { + // Singleton! + return DefaultAuthName +} + +// DeepEqual compares two configurations. +func (req *AuthConfig) DeepEqual(other Config) bool { + return reflect.DeepEqual(req, other) +} + +// DeepCopyInto copies a configuration. +func (req *AuthConfig) DeepCopyInto(dst Config) { + ret := dst.(*AuthConfig) + *ret = *req + ret.Credentials = make(map[string]string, len(req.Credentials)) + for k, v := range req.Credentials { + ret.Credentials[k] = v + } +} + +// String stringifies the configuration. +func (req *AuthConfig) String() string { + status := []string{} + if req.Realm != "" { + status = append(status, fmt.Sprintf("realm=%q", req.Realm)) + } + + if atype, err := NewAuthType(req.Type); err == nil { + switch atype { + case AuthTypeStatic: + u, userFound := req.Credentials["username"] + if userFound { + if u == "" { + u = "" + } else { + u = "" + } + } else { + u = "-" + } + p, passFound := req.Credentials["password"] + if passFound { + if p == "" { + p = "" + } else { + p = "" + } + } else { + p = "-" + } + status = append(status, fmt.Sprintf("username=%q,password=%q", u, p)) + + case AuthTypeEphemeral: + s, secretFound := req.Credentials["secret"] + if secretFound { + if s == "" { + s = "" + } else { + s = "" + } + } else { + s = "-" + } + + status = append(status, fmt.Sprintf("secret=%q", s)) + } + } + + return fmt.Sprintf("%s-auth:{%s}", req.Type, strings.Join(status, ",")) +} + +// AuthStatus represents the authentication status. +type AuthStatus = AuthConfig diff --git a/pkg/apis/v1alpha1/cluster.go b/pkg/apis/v1/cluster.go similarity index 64% rename from pkg/apis/v1alpha1/cluster.go rename to pkg/apis/v1/cluster.go index 315f76b3..1f8746f9 100644 --- a/pkg/apis/v1alpha1/cluster.go +++ b/pkg/apis/v1/cluster.go @@ -1,23 +1,26 @@ -package v1alpha1 +package v1 import ( "fmt" "reflect" "sort" "strings" + + "github.com/l7mp/stunner/internal/util" ) -// ClusterConfig specifies a set of upstream peers STUNner can open transport relay connections -// to. There are two address resolution policies. In STATIC clusters the allowed peer IP addresses -// are explicitly listed in the endpoint list. In STRICT_DNS clusters the endpoints are assumed to -// be proper DNS domain names. STUNner will resolve each domain name in the background and admits a -// new connection only if the peer address matches one of the IP addresses returned by the DNS -// resolver for one of the endpoints. STRICT_DNS clusters are best used with headless Kubernetes -// services. +// ClusterConfig specifies a set of upstream peers to which STUNner can open transport relay +// connections. There are two address resolution policies. In STATIC clusters the allowed peer IP +// addresses are explicitly listed in the endpoint list. In STRICT_DNS clusters the endpoints are +// assumed to be proper DNS domain names: STUNner will resolve each domain name in the background +// and admit a new connection only if the peer address matches one of the IP addresses returned by +// the DNS resolver for one of the endpoints. STRICT_DNS clusters are best used with headless +// Kubernetes services. type ClusterConfig struct { - // Name is the name of the cluster. + // Name of the cluster. Name is mandatory. Name string `json:"name"` - // Type specifies the cluster address resolution policy, either STATIC or STRICT_DNS. + // Type specifies the cluster address resolution policy, either STATIC or + // STRICT_DNS. Default is "STATIC". Type string `json:"type,omitempty"` // Protocol specifies the protocol to be used with the cluster, either UDP (default) or TCP // (not implemented yet). @@ -32,6 +35,7 @@ func (req *ClusterConfig) Validate() error { return fmt.Errorf("missing name in cluster configuration: %s", req.String()) } + // Normalize if req.Type == "" { req.Type = DefaultClusterType } @@ -39,8 +43,9 @@ func (req *ClusterConfig) Validate() error { if err != nil { return err } - req.Type = t.String() // normalize + req.Type = t.String() + // Normalize if req.Protocol == "" { req.Protocol = DefaultClusterProtocol } @@ -48,9 +53,23 @@ func (req *ClusterConfig) Validate() error { if err != nil { return err } - req.Protocol = p.String() // normalize + req.Protocol = p.String() + + // Do endpoints parse? + if t == ClusterTypeStatic { + for _, ep := range req.Endpoints { + if _, err := util.ParseEndpoint(ep); err != nil { + return err + } + } + } + + if req.Endpoints == nil { + req.Endpoints = []string{} + } sort.Strings(req.Endpoints) + return nil } @@ -94,3 +113,5 @@ func (req *ClusterConfig) String() string { return fmt.Sprintf("%q:{%s}", n, strings.Join(status, ",")) } + +type ClusterStatus = ClusterConfig diff --git a/pkg/apis/v1alpha1/config.go b/pkg/apis/v1/config.go similarity index 70% rename from pkg/apis/v1alpha1/config.go rename to pkg/apis/v1/config.go index a8f5a649..b32986f8 100644 --- a/pkg/apis/v1alpha1/config.go +++ b/pkg/apis/v1/config.go @@ -1,7 +1,7 @@ -// Package v1alpha1 is the v1alpha1 version of the STUNner API. -package v1alpha1 +// Package v1 is the v1 version of the STUNner API. +package v1 -// Config is the main interface for STUNner configuration objects +// Config is the main interface for STUNner configuration objects. type Config interface { // Validate checks a configuration and injects defaults. Validate() error @@ -14,3 +14,9 @@ type Config interface { // String stringifies the configuration. String() string } + +// Status holds the status of a component. +type Status interface { + // String stringifies the status. + String() string +} diff --git a/pkg/apis/v1/default.go b/pkg/apis/v1/default.go new file mode 100644 index 00000000..835911b0 --- /dev/null +++ b/pkg/apis/v1/default.go @@ -0,0 +1,52 @@ +package v1 + +// stunnerd defaults +const ( + ApiVersion string = "v1" + DefaultStunnerName = "default-stunnerd" + DefaultProtocol = "turn-udp" + DefaultClusterProtocol = "udp" + DefaultPort int = 3478 + DefaultLogLevel = "all:INFO" + DefaultRealm = "stunner.l7mp.io" + DefaultAuthType = "static" + DefaultMinRelayPort int = 1 + DefaultMaxRelayPort int = 1<<16 - 1 + DefaultClusterType = "STATIC" + DefaultAdminName = "default-admin-config" + DefaultAuthName = "default-auth-config" +) + +// default ports +const ( + DefaultMetricsPort int = 8080 + DefaultHealthCheckPort int = 8086 + DefaultAuthServicePort int = 8088 + DefaultICETesterPort int = 8089 +) + +// Label/annotation defaults +const ( + DefaultCDSServiceLabelKey = "stunner.l7mp.io/config-discovery-service" + DefaultCDSServiceLabelValue = "enabled" + DefaultAppLabelKey = "app" + DefaultAppLabelValue = "stunner" + DefaultAuthAppLabelValue = "stunner-auth" + DefaultRelatedGatewayKey = "stunner.l7mp.io/related-gateway-name" + DefaultRelatedGatewayNamespace = "stunner.l7mp.io/related-gateway-namespace" + DefaultOwnedByLabelKey = "stunner.l7mp.io/owned-by" + DefaultOwnedByLabelValue = "stunner" +) + +// CDS defaults +const ( + DefaultConfigDiscoveryPort = 13478 + DefaultConfigDiscoveryAddress = ":13478" + DefaultEnvVarName = "STUNNER_NAME" + DefaultEnvVarNamespace = "STUNNER_NAMESPACE" + DefaultEnvVarNodeName = "STUNNER_NODENAME" + DefaultEnvVarConfigOrigin = "STUNNER_CONFIG_ORIGIN" + DefaultCDSServerAddrEnv = "CDS_SERVER_ADDR" + DefaultCDSServerNamespaceEnv = "CDS_SERVER_NAMESPACE" + DefaultCDSServerPortEnv = "CDS_SERVER_PORT" +) diff --git a/pkg/apis/v1/errors.go b/pkg/apis/v1/errors.go new file mode 100644 index 00000000..2b5862d3 --- /dev/null +++ b/pkg/apis/v1/errors.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "errors" + "fmt" + "strings" +) + +var ( + ErrInvalidConf = errors.New("invalid configuration") + ErrNoSuchListener = errors.New("no such listener") + ErrNoSuchCluster = errors.New("no such cluster") + // ErrInvalidRoute = errors.New("invalid route") +) + +type ErrRestarted struct { + Objects []string +} + +func (e ErrRestarted) Error() string { + s := []string{} + for _, o := range e.Objects { + s = append(s, fmt.Sprintf("[%s]", o)) + } + return fmt.Sprintf("restarted: %s", strings.Join(s, ", ")) +} diff --git a/pkg/apis/v1alpha1/listener.go b/pkg/apis/v1/listener.go similarity index 54% rename from pkg/apis/v1alpha1/listener.go rename to pkg/apis/v1/listener.go index f4ec64ee..5d08e3d8 100644 --- a/pkg/apis/v1alpha1/listener.go +++ b/pkg/apis/v1/listener.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1 import ( "fmt" @@ -9,27 +9,23 @@ import ( // ListenerConfig specifies a server socket on which STUN/TURN connections will be served. type ListenerConfig struct { - // Name is the name of the listener. + // Name of the listener. Name string `json:"name,omitempty"` - // Protocol is the transport protocol used by the listener ("UDP", "TCP", "TLS", - // "DTLS"). The application-layer protocol on top of the transport protocol is always - // STUN/TURN. + // Protocol is the transport protocol ("UDP", "TCP", "TLS", "DTLS") or the complete L4/L7 + // protocol stack ("TURN-UDP", "TURN-TCP", "TURN-TLS", "TURN-DTLS") used by the listener. + // The application-layer protocol on top of the transport protocol is always TURN, so "UDP" + // and "TURN-UDP" are equivalent (and so on for the other protocols). Default is + // "TURN-UDP". Protocol string `json:"protocol,omitempty"` // PublicAddr is the Internet-facing public IP address for the listener (ignored by // STUNner). PublicAddr string `json:"public_address,omitempty"` // PublicPort is the Internet-facing public port for the listener (ignored by STUNner). PublicPort int `json:"public_port,omitempty"` - // Addr is the IP address for the listener. + // Addr is the IP address for the listener. Default is localhost. Addr string `json:"address,omitempty"` - // Port is the port for the listener. + // Port is the port for the listener. Default is the standard TURN port (3478). Port int `json:"port,omitempty"` - // MinRelayPort is the smallest relay port assigned for the relay connections spawned by - // the listener. - MinRelayPort int `json:"min_relay_port,omitempty"` - // MaxRelayPort is the highest relay port assigned for the relay connections spawned by the - // listener. - MaxRelayPort int `json:"max_relay_port,omitempty"` // Cert is the base64-encoded TLS cert. Cert string `json:"cert,omitempty"` // Key is the base64-encoded TLS key. @@ -44,6 +40,7 @@ func (req *ListenerConfig) Validate() error { return fmt.Errorf("missing name in listener configuration: %s", req.String()) } + // Normalize if req.Protocol == "" { req.Protocol = DefaultProtocol } @@ -51,7 +48,7 @@ func (req *ListenerConfig) Validate() error { if err != nil { return err } - req.Protocol = proto.String() // normalize + req.Protocol = proto.String() if req.Addr == "" { req.Addr = "0.0.0.0" @@ -60,19 +57,12 @@ func (req *ListenerConfig) Validate() error { if req.Port == 0 { req.Port = DefaultPort } - if req.MinRelayPort == 0 { - req.MinRelayPort = DefaultMinRelayPort - } - if req.MaxRelayPort == 0 { - req.MaxRelayPort = DefaultMaxRelayPort - } - for _, p := range []int{req.Port, req.MinRelayPort, req.MaxRelayPort} { - if p <= 0 || p > 65535 { - return fmt.Errorf("invalid port: %d", p) - } + if req.Port <= 0 || req.Port > 65535 { + return fmt.Errorf("invalid port: %d", req.Port) } - if proto == ListenerProtocolTLS || proto == ListenerProtocolDTLS { + if proto == ListenerProtocolTURNTLS || proto == ListenerProtocolTURNDTLS || + proto == ListenerProtocolTLS || proto == ListenerProtocolDTLS { if req.Cert == "" { return fmt.Errorf("empty TLS cert for %s listener", proto.String()) } @@ -81,6 +71,10 @@ func (req *ListenerConfig) Validate() error { } } + if req.Routes == nil { + req.Routes = []string{} + } + sort.Strings(req.Routes) return nil } @@ -90,9 +84,8 @@ func (req *ListenerConfig) ConfigName() string { return req.Name } -// DeepEqual compares two configurations. +// DeepEqual compares two configurations. Routes must be sorted in both configs! func (req *ListenerConfig) DeepEqual(other Config) bool { - // routes must be sorted in both configs! return reflect.DeepEqual(req, other) } @@ -113,26 +106,14 @@ func (req *ListenerConfig) String() string { n = req.Name } - pr, a, p := "udp", "-", "-" - if req.Protocol != "" { - pr = req.Protocol - } - if req.Addr != "" { - a = req.Addr + addr := "0.0.0.0" + if req.Addr != "" && req.Addr != "$STUNNER_ADDR" { + addr = req.Addr } - if req.Port != 0 { - p = fmt.Sprintf("%d", req.Port) - } - min, max := 0, 65535 - if req.MinRelayPort != 0 { - min = req.MinRelayPort - } - if req.MaxRelayPort != 0 { - max = req.MaxRelayPort - } - status = append(status, fmt.Sprintf("%s://%s:%s<%d-%d>", pr, a, p, min, max)) - a, p = "-", "-" + status = append(status, fmt.Sprintf("turn://%s:%d", addr, req.Port)) + + a, p := "-", "-" if req.PublicAddr != "" { a = req.PublicAddr } @@ -153,3 +134,50 @@ func (req *ListenerConfig) String() string { return fmt.Sprintf("%q:{%s}", n, strings.Join(status, ",")) } + +// GetListenerURI is a helper that can output two types of Listener URIs: one with "://" after the +// scheme or one with only ":" (as per RFC7065). +func (req *ListenerConfig) GetListenerURI(rfc7065 bool) (string, error) { + proto, err := NewListenerProtocol(req.Protocol) + if err != nil { + return "", err + } + + service, protocol := "", "" + switch proto { + case ListenerProtocolTURNUDP: + service = "turn" + protocol = "udp" + case ListenerProtocolTURNTCP: + service = "turn" + protocol = "tcp" + case ListenerProtocolTURNDTLS: + service = "turns" + protocol = "udp" + case ListenerProtocolTURNTLS: + service = "turns" + protocol = "tcp" + } + + addr := req.PublicAddr + if addr == "" { + // Fallback to server addr + addr = req.Addr + } + + port := req.PublicPort + if port == 0 { + // Fallback to server addr + port = req.Port + } + + var uri string + if rfc7065 { + uri = fmt.Sprintf("%s:%s:%d?transport=%s", service, addr, port, protocol) + } else { + uri = fmt.Sprintf("%s://%s:%d?transport=%s", service, addr, port, protocol) + } + return uri, nil +} + +type ListenerStatus = ListenerConfig diff --git a/pkg/apis/v1/stunner.go b/pkg/apis/v1/stunner.go new file mode 100644 index 00000000..ebcff12e --- /dev/null +++ b/pkg/apis/v1/stunner.go @@ -0,0 +1,248 @@ +package v1 + +import ( + "fmt" + // "sort" + "strings" +) + +// StunnerConfig specifies the configuration for the STUnner daemon. +type StunnerConfig struct { + // ApiVersion is the version of the STUNner API implemented. Must be set to "v1". + ApiVersion string `json:"version"` + // AdminConfig holds administrative configuration. + Admin AdminConfig `json:"admin,omitempty"` + // Auth defines the STUN/TURN authentication mechanism. + Auth AuthConfig `json:"auth"` + // Listeners defines the server sockets exposed to clients. + Listeners []ListenerConfig `json:"listeners,omitempty"` + // Clusters defines the upstream endpoints to which relay transport connections can be made + // by clients. + Clusters []ClusterConfig `json:"clusters,omitempty"` +} + +// Validate checks if a listener configuration is correct. +func (req *StunnerConfig) Validate() error { + // ApiVersion + if req.ApiVersion != ApiVersion { + return fmt.Errorf("unsupported API version: %q", req.ApiVersion) + } + + if err := req.Admin.Validate(); err != nil { + return err + } + + if err := req.Auth.Validate(); err != nil { + return err + } + + if req.Listeners == nil { + req.Listeners = []ListenerConfig{} + } else { + for i, l := range req.Listeners { + if err := l.Validate(); err != nil { + return err + } + req.Listeners[i] = l + } + } + + if req.Clusters == nil { + req.Clusters = []ClusterConfig{} + } else { + for i, c := range req.Clusters { + if err := c.Validate(); err != nil { + return err + } + req.Clusters[i] = c + } + } + + return nil +} + +// Name returns the name of the object to be configured. +func (req *StunnerConfig) ConfigName() string { + return req.Admin.Name +} + +// DeepEqual compares two configurations. +func (a *StunnerConfig) DeepEqual(conf Config) bool { + b, ok := conf.(*StunnerConfig) + if !ok { + return false + } + + if a.ApiVersion != b.ApiVersion { + return false + } + + if !a.Admin.DeepEqual(&b.Admin) { + return false + } + + if !a.Auth.DeepEqual(&b.Auth) { + return false + } + + if len(a.Listeners) != len(b.Listeners) { + return false + } + for i := range a.Listeners { + if !a.Listeners[i].DeepEqual(&b.Listeners[i]) { + return false + } + } + + if len(a.Clusters) != len(b.Clusters) { + return false + } + for i := range a.Clusters { + if !a.Clusters[i].DeepEqual(&b.Clusters[i]) { + return false + } + } + + return true +} + +// DeepCopyInto copies a configuration. +func (req *StunnerConfig) DeepCopyInto(dst Config) { + ret := dst.(*StunnerConfig) + ret.ApiVersion = req.ApiVersion + req.Admin.DeepCopyInto(&ret.Admin) + req.Auth.DeepCopyInto(&ret.Auth) + + ret.Listeners = make([]ListenerConfig, len(req.Listeners)) + for i := range req.Listeners { + req.Listeners[i].DeepCopyInto(&ret.Listeners[i]) + } + + ret.Clusters = make([]ClusterConfig, len(req.Clusters)) + for i := range req.Clusters { + req.Clusters[i].DeepCopyInto(&ret.Clusters[i]) + } +} + +// GetListenerConfig finds a Listener by name in a StunnerConfig or returns an error. +func (req *StunnerConfig) GetListenerConfig(name string) (ListenerConfig, error) { + for _, l := range req.Listeners { + if l.Name == name { + return l, nil + } + } + + return ListenerConfig{}, ErrNoSuchListener +} + +// GetClusterConfig finds a Cluster by name in a StunnerConfig or returns an error. +func (req *StunnerConfig) GetClusterConfig(name string) (ClusterConfig, error) { + for _, c := range req.Clusters { + if c.Name == name { + return c, nil + } + } + + return ClusterConfig{}, ErrNoSuchCluster +} + +// String stringifies the configuration. +func (req *StunnerConfig) String() string { + status := []string{} + status = append(status, fmt.Sprintf("version=%q", req.ApiVersion)) + status = append(status, req.Admin.String()) + status = append(status, req.Auth.String()) + + ls := []string{} + for _, l := range req.Listeners { + ls = append(ls, l.String()) + } + status = append(status, fmt.Sprintf("listeners=[%s]", strings.Join(ls, ","))) + + cs := []string{} + for _, c := range req.Clusters { + cs = append(cs, c.String()) + } + status = append(status, fmt.Sprintf("clusters=[%s]", strings.Join(cs, ","))) + + return fmt.Sprintf("{%s}", strings.Join(status, ",")) +} + +// Summary returns a stringified configuration. +func (req *StunnerConfig) Summary() string { + // isEnabled = func(b bool) string { if b {return "enabled"} else {return "disabled"}} + strOrNone := func(s string) string { + if s != "" { + return s + } else { + return "" + } + } + intOrNone := func(s int) string { + if s != 0 { + return fmt.Sprintf("%d", s) + } else { + return "" + } + } + status := fmt.Sprintf("Gateway: %s (loglevel: %q)\n", req.Admin.Name, req.Admin.LogLevel) + if t, err := NewAuthType(req.Auth.Type); err == nil { + if t == AuthTypeStatic { + status += fmt.Sprintf("Authentication type: static, username/password: %s/%s\n", + req.Auth.Credentials["username"], req.Auth.Credentials["password"]) + } else { + status += fmt.Sprintf("Authentication type: ephemeral, shared-secret: %s\n", + req.Auth.Credentials["secret"]) + } + } + + status += "Listeners:\n" + for _, l := range req.Listeners { + status += fmt.Sprintf(" - Name: %s\n", l.Name) + status += fmt.Sprintf(" Protocol: %s\n", l.Protocol) + status += fmt.Sprintf(" Public address:port: %s:%s\n", strOrNone(l.PublicAddr), intOrNone(l.PublicPort)) + status += fmt.Sprintf(" Routes: [%s]\n", strings.Join(l.Routes, ", ")) + ep := []string{} + for _, r := range l.Routes { + if c, err := req.GetClusterConfig(r); err == nil { + ep = append(ep, c.Endpoints...) + } + } + status += fmt.Sprintf(" Endpoints: [%s]\n", strings.Join(ep, ", ")) + } + + return status +} + +// StunnerStatus represents the status of the STUnner daemon. +type StunnerStatus struct { + ApiVersion string `json:"version"` + Admin *AdminStatus `json:"admin"` + Auth *AuthStatus `json:"auth"` + Listeners []*ListenerStatus `json:"listeners"` + Clusters []*ClusterStatus `json:"clusters"` + AllocationCount int `json:"allocationCount"` + Status string `json:"status"` +} + +// String stringifies the status. +func (s *StunnerStatus) String() string { + ls := []string{} + for _, l := range s.Listeners { + ls = append(ls, l.String()) + } + cs := []string{} + for _, c := range s.Clusters { + cs = append(cs, c.String()) + } + + return fmt.Sprintf("%s/%s/%s/%s/allocs:%d/status=%s", + s.Admin.String(), s.Auth.String(), ls, cs, s.AllocationCount, s.Status) +} + +// String summarizes the status. +func (s *StunnerStatus) Summary() string { + return fmt.Sprintf("%s\n\t%s\n\tlisteners:%d/clusters:%d\n\tallocs:%d/status=%s", + s.Admin.String(), s.Auth.String(), len(s.Listeners), len(s.Clusters), + s.AllocationCount, s.Status) +} diff --git a/pkg/apis/v1/util.go b/pkg/apis/v1/util.go new file mode 100644 index 00000000..0cafe3b3 --- /dev/null +++ b/pkg/apis/v1/util.go @@ -0,0 +1,198 @@ +package v1 + +import ( + "fmt" + "strings" +) + +// AuthType species the type of the STUN/TURN authentication mechanism used by STUNner. +type AuthType int + +const ( + AuthTypeStatic AuthType = iota + 1 + AuthTypeEphemeral + AuthTypeUnknown +) + +const ( + authTypeStaticStr = "static" + authTypeEphemeralStr = "ephemeral" + AuthTypePlainText = AuthTypeStatic + AuthTypeLongTerm = AuthTypeEphemeral + authTypePlainTextStr = "plaintext" + authTypeLongTermStr = "longterm" +) + +// NewAuthType parses the authentication mechanism specification. +func NewAuthType(raw string) (AuthType, error) { + switch raw { + case authTypeStaticStr, authTypePlainTextStr: + return AuthTypeStatic, nil + case authTypeEphemeralStr, authTypeLongTermStr: + return AuthTypeEphemeral, nil + default: + return AuthTypeUnknown, fmt.Errorf("unknown authentication type: \"%s\"", raw) + } +} + +// String returns a string representation for the authentication mechanism. +func (a AuthType) String() string { + switch a { + case AuthTypeStatic: + return authTypeStaticStr + case AuthTypeEphemeral: + return authTypeEphemeralStr + default: + return "" + } +} + +// ListenerProtocol specifies the network protocol for a listener. +type ListenerProtocol int + +const ( + ListenerProtocolUnknown ListenerProtocol = iota + ListenerProtocolUDP + ListenerProtocolTCP + ListenerProtocolTLS + ListenerProtocolDTLS + ListenerProtocolTURNUDP + ListenerProtocolTURNTCP + ListenerProtocolTURNTLS + ListenerProtocolTURNDTLS +) + +const ( + listenerProtocolUDPStr = "UDP" + listenerProtocolTCPStr = "TCP" + listenerProtocolTLSStr = "TLS" + listenerProtocolDTLSStr = "DTLS" + listenerProtocolTURNUDPStr = "TURN-UDP" + listenerProtocolTURNTCPStr = "TURN-TCP" + listenerProtocolTURNTLSStr = "TURN-TLS" + listenerProtocolTURNDTLSStr = "TURN-DTLS" +) + +// NewListenerProtocol parses the protocol specification. +func NewListenerProtocol(raw string) (ListenerProtocol, error) { + switch strings.ToUpper(raw) { + case listenerProtocolUDPStr: + return ListenerProtocolUDP, nil + case listenerProtocolTCPStr: + return ListenerProtocolTCP, nil + case listenerProtocolTLSStr: + return ListenerProtocolTLS, nil + case listenerProtocolDTLSStr: + return ListenerProtocolDTLS, nil + case listenerProtocolTURNUDPStr: + return ListenerProtocolTURNUDP, nil + case listenerProtocolTURNTCPStr: + return ListenerProtocolTURNTCP, nil + case listenerProtocolTURNTLSStr: + return ListenerProtocolTURNTLS, nil + case listenerProtocolTURNDTLSStr: + return ListenerProtocolTURNDTLS, nil + default: + return ListenerProtocol(ListenerProtocolUnknown), + fmt.Errorf("unknown listener protocol: \"%s\"", raw) + } +} + +// String returns a string representation of a listener protocol. +func (l ListenerProtocol) String() string { + switch l { + case ListenerProtocolUDP: + return listenerProtocolUDPStr + case ListenerProtocolTCP: + return listenerProtocolTCPStr + case ListenerProtocolTLS: + return listenerProtocolTLSStr + case ListenerProtocolDTLS: + return listenerProtocolDTLSStr + case ListenerProtocolTURNUDP: + return listenerProtocolTURNUDPStr + case ListenerProtocolTURNTCP: + return listenerProtocolTURNTCPStr + case ListenerProtocolTURNTLS: + return listenerProtocolTURNTLSStr + case ListenerProtocolTURNDTLS: + return listenerProtocolTURNDTLSStr + default: + return "" + } +} + +// ClusterType specifies the cluster address resolution policy. +type ClusterType int + +const ( + ClusterTypeStatic ClusterType = iota + 1 + ClusterTypeStrictDNS + ClusterTypeUnknown +) + +const ( + clusterTypeStaticStr = "STATIC" + clusterTypeStrictDNSStr = "STRICT_DNS" +) + +func NewClusterType(raw string) (ClusterType, error) { + switch strings.ToUpper(raw) { + case clusterTypeStaticStr: + return ClusterTypeStatic, nil + case clusterTypeStrictDNSStr: + return ClusterTypeStrictDNS, nil + default: + return ClusterType(ClusterTypeUnknown), fmt.Errorf("unknown cluster type: \"%s\"", raw) + } +} + +func (l ClusterType) String() string { + switch l { + case ClusterTypeStatic: + return clusterTypeStaticStr + case ClusterTypeStrictDNS: + return clusterTypeStrictDNSStr + default: + return "" + } +} + +// ClusterProtocol specifies the network protocol for a cluster. +type ClusterProtocol int + +const ( + ClusterProtocolUDP ClusterProtocol = iota + 1 + ClusterProtocolTCP + ClusterProtocolUnknown +) + +const ( + clusterProtocolUDPStr = "UDP" + clusterProtocolTCPStr = "TCP" +) + +// NewClusterProtocol parses the protocol specification. +func NewClusterProtocol(raw string) (ClusterProtocol, error) { + switch strings.ToUpper(raw) { + case clusterProtocolUDPStr: + return ClusterProtocolUDP, nil + case clusterProtocolTCPStr: + return ClusterProtocolTCP, nil + default: + return ClusterProtocol(ClusterProtocolUnknown), + fmt.Errorf("unknown cluster protocol: \"%s\"", raw) + } +} + +// String returns a string representation of a cluster protocol. +func (p ClusterProtocol) String() string { + switch p { + case ClusterProtocolUDP: + return clusterProtocolUDPStr + case ClusterProtocolTCP: + return clusterProtocolTCPStr + default: + return "" + } +} diff --git a/pkg/apis/v1alpha1/default.go b/pkg/apis/v1alpha1/default.go index 680c2bc8..17106798 100644 --- a/pkg/apis/v1alpha1/default.go +++ b/pkg/apis/v1alpha1/default.go @@ -2,17 +2,12 @@ package v1alpha1 const ApiVersion string = "v1alpha1" const DefaultStunnerName = "default-stunnerd" -const DefaultProtocol = "udp" +const DefaultProtocol = "turn-udp" const DefaultClusterProtocol = "udp" const DefaultPort int = 3478 const DefaultLogLevel = "all:INFO" const DefaultRealm = "stunner.l7mp.io" const DefaultAuthType = "plaintext" - -// no more default user/pass pairs -// const DefaultUsername = "user1" -// const DefaultPassword = "passwd1" - const DefaultMinRelayPort int = 1 << 15 const DefaultMaxRelayPort int = 1<<16 - 1 const DefaultClusterType = "STATIC" diff --git a/pkg/apis/v1alpha1/errors.go b/pkg/apis/v1alpha1/errors.go index 786651df..5adba2e1 100644 --- a/pkg/apis/v1alpha1/errors.go +++ b/pkg/apis/v1alpha1/errors.go @@ -1,26 +1,11 @@ package v1alpha1 -import ( - "errors" - "fmt" - "strings" -) +import stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" var ( - ErrInvalidConf = errors.New("invalid configuration") - ErrNoSuchListener = errors.New("no such listener") - ErrNoSuchCluster = errors.New("no such cluster") - // ErrInvalidRoute = errors.New("invalid route") + ErrInvalidConf = stnrv1.ErrInvalidConf + ErrNoSuchListener = stnrv1.ErrNoSuchListener + ErrNoSuchCluster = stnrv1.ErrNoSuchCluster ) -type ErrRestarted struct { - Objects []string -} - -func (e ErrRestarted) Error() string { - s := []string{} - for _, o := range e.Objects { - s = append(s, fmt.Sprintf("[%s]", o)) - } - return fmt.Sprintf("restarted: %s", strings.Join(s, ", ")) -} +type ErrRestarted = stnrv1.ErrRestarted diff --git a/pkg/apis/v1alpha1/stunner.go b/pkg/apis/v1alpha1/stunner.go index 0ae9b8f1..17662d04 100644 --- a/pkg/apis/v1alpha1/stunner.go +++ b/pkg/apis/v1alpha1/stunner.go @@ -2,10 +2,30 @@ package v1alpha1 import ( "fmt" - // "sort" + "maps" "strings" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) +// Config is the main interface for STUNner configuration objects +type Config = stnrv1.Config + +// AdminConfig holds the administrative configuration. +type AdminConfig = stnrv1.AdminConfig + +// ClusterConfig specifies a set of upstream peers STUNner can open transport relay connections +// to. There are two address resolution policies. In STATIC clusters the allowed peer IP addresses +// are explicitly listed in the endpoint list. In STRICT_DNS clusters the endpoints are assumed to +// be proper DNS domain names. STUNner will resolve each domain name in the background and admits a +// new connection only if the peer address matches one of the IP addresses returned by the DNS +// resolver for one of the endpoints. STRICT_DNS clusters are best used with headless Kubernetes +// services. +type ClusterConfig = stnrv1.ClusterConfig + +// ListenerConfig specifies a server socket on which STUN/TURN connections will be served. +type ListenerConfig = stnrv1.ListenerConfig + // StunnerConfig specifies the configuration of the the STUnner daemon. type StunnerConfig struct { // ApiVersion is the version of the STUNner API implemented. @@ -170,3 +190,37 @@ func (req *StunnerConfig) GetClusterConfig(name string) (ClusterConfig, error) { return ClusterConfig{}, ErrNoSuchCluster } + +// ConvertToV1 upgrades a v1alpha1 StunnerConfig to a v1. +func ConvertToV1(sv1a1 *StunnerConfig) (*stnrv1.StunnerConfig, error) { + sv1 := stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + } + + (*stnrv1.AdminConfig)(&sv1a1.Admin).DeepCopyInto(&sv1.Admin) + + // auth needs to be converted + at, err := stnrv1.NewAuthType(sv1a1.Auth.Type) + if err != nil { + return nil, err + } + + sv1.Auth = stnrv1.AuthConfig{ + Type: at.String(), + Realm: sv1a1.Auth.Realm, + Credentials: make(map[string]string), + } + maps.Copy(sv1.Auth.Credentials, sv1a1.Auth.Credentials) + + sv1.Listeners = make([]stnrv1.ListenerConfig, len(sv1a1.Listeners)) + for i := range sv1a1.Listeners { + (*stnrv1.ListenerConfig)(&sv1a1.Listeners[i]).DeepCopyInto(&sv1.Listeners[i]) + } + + sv1.Clusters = make([]stnrv1.ClusterConfig, len(sv1a1.Clusters)) + for i := range sv1a1.Clusters { + (*stnrv1.ClusterConfig)(&sv1a1.Clusters[i]).DeepCopyInto(&sv1.Clusters[i]) + } + + return &sv1, nil +} diff --git a/pkg/apis/v1alpha1/util.go b/pkg/apis/v1alpha1/util.go index dc779f0b..db3be683 100644 --- a/pkg/apis/v1alpha1/util.go +++ b/pkg/apis/v1alpha1/util.go @@ -2,16 +2,17 @@ package v1alpha1 import ( "fmt" - "strings" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // AuthType species the type of the STUN/TURN authentication mechanism used by STUNner -type AuthType int +type AuthType stnrv1.AuthType const ( - AuthTypePlainText AuthType = iota + 1 - AuthTypeLongTerm - AuthTypeUnknown + AuthTypePlainText AuthType = AuthType(stnrv1.AuthTypeStatic) + AuthTypeLongTerm AuthType = AuthType(stnrv1.AuthTypeLongTerm) + AuthTypeUnknown AuthType = AuthType(stnrv1.AuthTypeUnknown) ) const ( @@ -44,127 +45,10 @@ func (a AuthType) String() string { } // ListenerProtocol specifies the network protocol for a listener -type ListenerProtocol int - -const ( - ListenerProtocolUDP ListenerProtocol = iota + 1 - ListenerProtocolTCP - ListenerProtocolTLS - ListenerProtocolDTLS - ListenerProtocolUnknown -) - -const ( - listenerProtocolUDPStr = "UDP" - listenerProtocolTCPStr = "TCP" - listenerProtocolTLSStr = "TLS" - listenerProtocolDTLSStr = "DTLS" -) - -// NewListenerProtocol parses the protocol specification -func NewListenerProtocol(raw string) (ListenerProtocol, error) { - switch strings.ToUpper(raw) { - case listenerProtocolUDPStr: - return ListenerProtocolUDP, nil - case listenerProtocolTCPStr: - return ListenerProtocolTCP, nil - case listenerProtocolTLSStr: - return ListenerProtocolTLS, nil - case listenerProtocolDTLSStr: - return ListenerProtocolDTLS, nil - default: - return ListenerProtocol(ListenerProtocolUnknown), - fmt.Errorf("unknown listener protocol: \"%s\"", raw) - } -} - -// String returns a string representation of a listener protocol -func (l ListenerProtocol) String() string { - switch l { - case ListenerProtocolUDP: - return listenerProtocolUDPStr - case ListenerProtocolTCP: - return listenerProtocolTCPStr - case ListenerProtocolTLS: - return listenerProtocolTLSStr - case ListenerProtocolDTLS: - return listenerProtocolDTLSStr - default: - return "" - } -} +type ListenerProtocol = stnrv1.ListenerProtocol // ClusterType specifies the cluster address resolution policy -type ClusterType int - -const ( - ClusterTypeStatic ClusterType = iota + 1 - ClusterTypeStrictDNS - ClusterTypeUnknown -) - -const ( - clusterTypeStaticStr = "STATIC" - clusterTypeStrictDNSStr = "STRICT_DNS" -) - -func NewClusterType(raw string) (ClusterType, error) { - switch strings.ToUpper(raw) { - case clusterTypeStaticStr: - return ClusterTypeStatic, nil - case clusterTypeStrictDNSStr: - return ClusterTypeStrictDNS, nil - default: - return ClusterType(ClusterTypeUnknown), fmt.Errorf("unknown cluster type: \"%s\"", raw) - } -} - -func (l ClusterType) String() string { - switch l { - case ClusterTypeStatic: - return clusterTypeStaticStr - case ClusterTypeStrictDNS: - return clusterTypeStrictDNSStr - default: - return "" - } -} +type ClusterType = stnrv1.ClusterType // ClusterProtocol specifies the network protocol for a cluster -type ClusterProtocol int - -const ( - ClusterProtocolUDP ClusterProtocol = iota + 1 - ClusterProtocolTCP - ClusterProtocolUnknown -) - -const ( - clusterProtocolUDPStr = "UDP" - clusterProtocolTCPStr = "TCP" -) - -// NewClusterProtocol parses the protocol specification -func NewClusterProtocol(raw string) (ClusterProtocol, error) { - switch strings.ToUpper(raw) { - case clusterProtocolUDPStr: - return ClusterProtocolUDP, nil - case clusterProtocolTCPStr: - return ClusterProtocolTCP, nil - default: - return ClusterProtocol(ClusterProtocolUnknown), - fmt.Errorf("unknown cluster protocol: \"%s\"", raw) - } -} - -// String returns a string representation of a cluster protocol -func (p ClusterProtocol) String() string { - switch p { - case ClusterProtocolUDP: - return clusterProtocolUDPStr - case ClusterProtocolTCP: - return clusterProtocolTCPStr - default: - return "" - } -} +type ClusterProtocol = stnrv1.ClusterProtocol diff --git a/pkg/authentication/auth.go b/pkg/authentication/auth.go index 55768263..c1092a6e 100644 --- a/pkg/authentication/auth.go +++ b/pkg/authentication/auth.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/pion/turn/v2" + "github.com/pion/turn/v4" ) // UsernameSeparator is the separator character used in time-windowed TURN authentication as @@ -72,7 +72,7 @@ func GetLongTermCredential(username string, sharedSecret string) (string, error) } // GenerateAuthKey is a convenience function to easily generate keys in the format used by -// AuthHandler. Re-exported from `pion/turn/v2` so that our callers will have a single import. +// AuthHandler. Re-exported from `pion/turn` so that our callers will have a single import. func GenerateAuthKey(username, realm, password string) []byte { return turn.GenerateAuthKey(username, realm, password) } diff --git a/pkg/buildinfo/build_info.go b/pkg/buildinfo/build_info.go new file mode 100644 index 00000000..4665b91f --- /dev/null +++ b/pkg/buildinfo/build_info.go @@ -0,0 +1,13 @@ +package buildinfo + +import "fmt" + +type BuildInfo struct { + Version string + CommitHash string + BuildDate string +} + +func (i BuildInfo) String() string { + return fmt.Sprintf("version %s (%s) built on %s", i.Version, i.CommitHash, i.BuildDate) +} diff --git a/pkg/config/api/stunner_openapi.yaml b/pkg/config/api/stunner_openapi.yaml new file mode 100644 index 00000000..66195d87 --- /dev/null +++ b/pkg/config/api/stunner_openapi.yaml @@ -0,0 +1,164 @@ +openapi: 3.0.3 +info: + title: REST API for STUNner dataplane config + description: The STUNner dataplane API exposes a reconfigurable multi-protocol multi-listener TURN service for WebRTC media ingestion. + version: 1.0.0 +servers: + - url: "" +tags: + - name: STUNner + description: 'A Kubernetes media gateway for WebRTC. Contact: info@l7mp.io' +paths: + /api/v1/configs: + get: + description: API to list or watch config objects in all namespaces. + operationId: listV1Configs + parameters: + - name: watch + in: query + description: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + schema: + type: boolean + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/v1.ConfigList' + default: + description: Unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Error' + /api/v1/configs/{namespace}: + get: + description: API to list or watch config objects in a specific namespace. + operationId: listV1ConfigsNamespace + parameters: + - name: namespace + in: path + description: Namespace of the gateway for which config is requested. + required: true + schema: + type: string + - name: watch + in: query + description: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + schema: + type: boolean + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/v1.ConfigList' + default: + description: Unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Error' + /api/v1/configs/{namespace}/{name}: + get: + description: API to list or watch config objects for a specific gateway in a specific namespace. + operationId: getV1ConfigNamespaceName + parameters: + - name: namespace + in: path + description: Namespace of the gateway for which config is requested. + required: true + schema: + type: string + - name: name + in: path + description: Name of the gateway for which config is requested. + required: true + schema: + type: string + - name: watch + in: query + description: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + schema: + type: boolean + - name: node + in: query + description: Name of the node the client runs on. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Config' + "404": + description: Not found. + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Error' + "500": + description: Internal server error. + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Error' + default: + description: Unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/v1.Error' +components: + schemas: + v1.ClientInfo: + description: Client description. + type: object + required: + - id + properties: + id: + description: Client id. + type: string + node: + description: Name of the node client is deployed to. + type: string + v1.ConfigList: + description: ConfigList is a list of Configs. + type: object + required: + - version + - items + properties: + version: + description: version defines the versioned schema of this object. + type: string + items: + description: Items is the list of Config objects in the list. + type: array + items: + $ref: '#/components/schemas/v1.Config' + v1.Config: + description: 'Config provides a STUNner config. Schema is defined in https://github.com/l7mp/stunner/tree/main/pkg/apis/v1' + x-go-type: "stunnerv1.StunnerConfig" + x-go-type-import: + name: stunnerv1 + path: "github.com/l7mp/stunner/pkg/apis/v1" + v1.Error: + description: API error. + type: object + required: + - code + - message + properties: + code: + description: Error code. + type: integer + format: int32 + message: + description: Error message. + type: string diff --git a/pkg/config/cds_test.go b/pkg/config/cds_test.go new file mode 100644 index 00000000..50a178d0 --- /dev/null +++ b/pkg/config/cds_test.go @@ -0,0 +1,1387 @@ +package server + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/go-logr/zapr" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/client" + "github.com/l7mp/stunner/pkg/config/server" + "github.com/l7mp/stunner/pkg/logger" +) + +// var testerLogLevel = zapcore.Level(-4) +// var testerLogLevel = zapcore.DebugLevel +var testerLogLevel = zapcore.ErrorLevel + +// const stunnerLogLevel = "all:TRACE" +const stunnerLogLevel = "all:ERROR" + +// run on random port +func getRandCDSAddr() string { + rndPort := rand.Intn(10000) + 20000 + return fmt.Sprintf(":%d", rndPort) +} + +func init() { + // setup a fast pinger so that we get a timely error notification + client.PingPeriod = 500 * time.Millisecond + client.PongWait = 800 * time.Millisecond + client.WriteWait = 200 * time.Millisecond + client.RetryPeriod = 250 * time.Millisecond +} + +func TestServerLoad(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + // suppress deletions + server.SuppressConfigDeletion = true + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(ctx) + assert.NoError(t, err, "start") + + time.Sleep(20 * time.Millisecond) + + testLog.Debug("create client") + client1, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client 1") + client2, err := client.New(testCDSAddr, "ns1/gw2", logger) + assert.NoError(t, err, "client 2") + // nonexistent + client3, err := client.New(testCDSAddr, "ns1/gw3", logger) + assert.NoError(t, err, "client 3") + + testLog.Debug("load: error") + c, err := client1.Load() + assert.Error(t, err, "load") + assert.Nil(t, c, "conf") + c, err = client2.Load() + assert.Error(t, err, "load") + assert.Nil(t, c, "conf") + c, err = client3.Load() + assert.Error(t, err, "load") + assert.Nil(t, c, "conf") + + c1 := testConfig("ns1/gw1", "realm1") + c2 := testConfig("ns1/gw2", "realm1") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 := srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq") + sc3 := srv.GetConfigStore().Get("ns1/gw3") + assert.Nil(t, sc3, "get 3") + + testLog.Debug("load: config ok") + c, err = client1.Load() + assert.NoError(t, err, "load") + assert.True(t, c.DeepEqual(sc1), "deepeq") + c, err = client2.Load() + assert.NoError(t, err, "load") + assert.True(t, c.DeepEqual(sc2), "deepeq") + c, err = client3.Load() + assert.Error(t, err, "load") + assert.Nil(t, c, "conf") + + testLog.Debug("remove 2 configs") + err = srv.UpdateConfig([]server.Config{}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 0, "snapshot len") + + testLog.Debug("load: no result") + _, err = client1.Load() + assert.Error(t, err, "load") + _, err = client2.Load() + assert.Error(t, err, "load") + _, err = client3.Load() + assert.Error(t, err, "load") + assert.Nil(t, c, "conf") +} + +func TestServerPoll(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(ctx) + assert.NoError(t, err, "start") + + time.Sleep(20 * time.Millisecond) + + testLog.Debug("create client") + client1, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client 1") + client2, err := client.New(testCDSAddr, "ns1/gw2", logger) + assert.NoError(t, err, "client 2") + client3, err := client.New(testCDSAddr, "ns1/gw3", logger) + assert.NoError(t, err, "client 3") + + testLog.Debug("poll: no result") + ch1 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch1) + ch2 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch2) + ch3 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch3) + + go func() { + err = client1.Poll(ctx, ch1, false) + assert.NoError(t, err, "client 1 cancelled") + }() + go func() { + err = client2.Poll(ctx, ch2, false) + assert.NoError(t, err, "client 2 cancelled") + }() + go func() { + err = client3.Poll(ctx, ch2, false) + assert.NoError(t, err, "client 3 cancelled") + }() + + s := watchConfig(ch1, 10*time.Millisecond) + assert.Nil(t, s, "config 1") + s = watchConfig(ch2, 10*time.Millisecond) + assert.Nil(t, s, "config 2") + s = watchConfig(ch3, 10*time.Millisecond) + assert.Nil(t, s, "config 3") + + testLog.Debug("poll: one result") + c1 := testConfig("ns1/gw1", "realm1") + c2 := testConfig("ns1/gw2", "realm1") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 := srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq") + sc3 := srv.GetConfigStore().Get("ns1/gw3") + assert.Nil(t, sc3, "get 3") + + // poll should have fed the configs to the channels + s = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + s = watchConfig(ch2, 500*time.Millisecond) + assert.NotNil(t, s, "config 2") + assert.True(t, s.DeepEqual(sc2), "deepeq 2") + s = watchConfig(ch3, 500*time.Millisecond) + assert.Nil(t, s, "config 3") + + testLog.Debug("remove 2 configs") + err = srv.UpdateConfig([]server.Config{}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 0, "snapshot len") + + testLog.Debug("poll: zeroconfig") + s = watchConfig(ch1, 10*time.Millisecond) + assert.Nil(t, s, "config") + s = watchConfig(ch2, 10*time.Millisecond) + assert.Nil(t, s, "config") + s = watchConfig(ch3, 10*time.Millisecond) + assert.Nil(t, s, "config") +} + +func TestServerWatch(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + serverCtx, serverCancel := context.WithCancel(context.Background()) + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + testLog.Debug("create client") + client1, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client 1") + client2, err := client.New(testCDSAddr, "ns1/gw2", logger) + assert.NoError(t, err, "client 2") + client3, err := client.New(testCDSAddr, "ns1/gw3", logger) + assert.NoError(t, err, "client 3") + + testLog.Debug("watch: no result") + ch1 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch1) + ch2 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch2) + ch3 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch3) + + clientCtx, clientCancel := context.WithCancel(context.Background()) + defer clientCancel() + err = client1.Watch(clientCtx, ch1, false) + assert.NoError(t, err, "client 1 watch") + err = client2.Watch(clientCtx, ch2, false) + assert.NoError(t, err, "client 2 watch") + err = client3.Watch(clientCtx, ch3, false) + assert.NoError(t, err, "client 3 watch") + + s := watchConfig(ch1, 150*time.Millisecond) + assert.Nil(t, s, "config 1") + s = watchConfig(ch2, 150*time.Millisecond) + assert.Nil(t, s, "config 2") + s = watchConfig(ch3, 150*time.Millisecond) + assert.Nil(t, s, "config 3") + + testLog.Debug("poll: one result") + c1 := testConfig("ns1/gw1", "realm1") + c2 := testConfig("ns1/gw2", "realm1") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 := srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc3 := srv.GetConfigStore().Get("ns1/gw3") + assert.Nil(t, sc3, "get 3") + + // poll should have fed the configs to the channels + s = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + s = watchConfig(ch2, 500*time.Millisecond) + assert.NotNil(t, s, "config 2") + assert.True(t, s.DeepEqual(sc2), "deepeq 2") + s = watchConfig(ch3, 500*time.Millisecond) + assert.Nil(t, s, "config 3") + + testLog.Debug("update: conf 1 and conf 3") + c1 = testConfig("ns1/gw1", "realm-new") + c3 := testConfig("ns1/gw3", "realm3") + err = srv.UpdateConfig([]server.Config{c1, c2, c3}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 3, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq 1") + sc2 = srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq 2") + sc3 = srv.GetConfigStore().Get("ns1/gw3") + assert.NotNil(t, sc3, "get 3") + assert.NoError(t, sc3.Validate(), "valid") // loaders validate + assert.True(t, c3.Config.DeepEqual(sc3), "deepeq 3") + + // poll should have fed the configs to the channels + s = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + s = watchConfig(ch2, 500*time.Millisecond) + assert.Nil(t, s, "config 2") + s = watchConfig(ch3, 500*time.Millisecond) + assert.NotNil(t, s, "config 3") + assert.True(t, s.DeepEqual(sc3), "deepeq 3") + + testLog.Debug("restarting server") + serverCancel() + // let the server shut down and restart + time.Sleep(50 * time.Millisecond) + serverCtx, serverCancel = context.WithCancel(context.Background()) + defer serverCancel() + srv = server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + err = srv.UpdateConfig([]server.Config{c1, c2, c3}) + assert.NoError(t, err, "update") + + // obtain the initial configs: this may take a while + s = watchConfig(ch1, 5000*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + s = watchConfig(ch2, 500*time.Millisecond) + assert.NotNil(t, s, "config 2") + assert.True(t, s.DeepEqual(sc2), "deepeq 2") + s = watchConfig(ch3, 500*time.Millisecond) + assert.NotNil(t, s, "config 3") + assert.True(t, s.DeepEqual(sc3), "deepeq 3") + + testLog.Debug("remove 1 config (the 2nd)") + err = srv.UpdateConfig([]server.Config{c1, c3}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq 1") + sc2 = srv.GetConfigStore().Get("ns1/gw2") + assert.Nil(t, sc2, "get 2") + sc3 = srv.GetConfigStore().Get("ns1/gw3") + assert.NotNil(t, sc3, "get 3") + assert.NoError(t, sc3.Validate(), "valid") // loaders validate + assert.True(t, c3.Config.DeepEqual(sc3), "deepeq 3") + + s = watchConfig(ch1, 50*time.Millisecond) + assert.Nil(t, s, "config 1") + s = watchConfig(ch2, 50*time.Millisecond) + assert.Nil(t, s, "config 2") + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s, "config 3") + + testLog.Debug("remove remaining 2 configs") + err = srv.UpdateConfig([]server.Config{}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 0, "snapshot len") + + testLog.Debug("poll: no config") + s = watchConfig(ch1, 10*time.Millisecond) + assert.Nil(t, s, "config") + s = watchConfig(ch2, 10*time.Millisecond) + assert.Nil(t, s, "config") + s = watchConfig(ch3, 10*time.Millisecond) + assert.Nil(t, s, "config") +} + +// config already available when watcher joins +func TestServerWatchBootstrap(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + serverCtx, serverCancel := context.WithCancel(context.Background()) + defer serverCancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + testLog.Debug("create client") + client1, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client 1") + + testLog.Debug("bootstrap") + c1 := testConfig("ns1/gw1", "realm1") + c2 := testConfig("ns1/gw2", "realm1") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 := srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc3 := srv.GetConfigStore().Get("ns1/gw3") + assert.Nil(t, sc3, "get 3") + + testLog.Debug("watch: 1 result") + ch1 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch1) + + clientCtx, clientCancel := context.WithCancel(context.Background()) + defer clientCancel() + err = client1.Watch(clientCtx, ch1, false) + assert.NoError(t, err, "client 1 watch") + + s := watchConfig(ch1, 1500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + // only 1 config + s = watchConfig(ch1, 150*time.Millisecond) + assert.Nil(t, s, "config 1") + + testLog.Debug("update: conf 1 and conf 2") + c1 = testConfig("ns1/gw1", "realm-new") + c2 = testConfig("ns1/gw2", "realm3") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq 1") + sc2 = srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq 2") + + s = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + + testLog.Debug("remove 2 configs") + err = srv.UpdateConfig([]server.Config{}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 0, "snapshot len") + + testLog.Debug("poll: no config") + s = watchConfig(ch1, 10*time.Millisecond) + assert.Nil(t, s, "config 1") +} + +// test APIs +func TestServerAPI(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + serverCtx, serverCancel := context.WithCancel(context.Background()) + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + testLog.Debug("create client") + client1, err := client.NewAllConfigsAPI(testCDSAddr, logger.NewLogger("all-config-client")) + assert.NoError(t, err, "client 1") + client2, err := client.NewConfigsNamespaceAPI(testCDSAddr, "ns1", logger.NewLogger("ns-config-client-ns1")) + assert.NoError(t, err, "client 2") + client3, err := client.NewConfigsNamespaceAPI(testCDSAddr, "ns2", logger.NewLogger("ns-config-client-ns2")) + assert.NoError(t, err, "client 3") + client4, err := client.NewConfigNamespaceNameAPI(testCDSAddr, "ns1", "gw1", logger.NewLogger("gw-config-client")) + assert.NoError(t, err, "client 4") + + testLog.Debug("watch: no result") + ch1 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch1) + ch2 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch2) + ch3 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch3) + ch4 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch4) + + clientCtx, clientCancel := context.WithCancel(context.Background()) + defer clientCancel() + err = client1.Watch(clientCtx, ch1, false) + assert.NoError(t, err, "client 1 watch") + err = client2.Watch(clientCtx, ch2, false) + assert.NoError(t, err, "client 2 watch") + err = client3.Watch(clientCtx, ch3, false) + assert.NoError(t, err, "client 3 watch") + err = client4.Watch(clientCtx, ch4, false) + assert.NoError(t, err, "client 4 watch") + + s := watchConfig(ch1, 50*time.Millisecond) + assert.Nil(t, s, "config 1") + s = watchConfig(ch2, 50*time.Millisecond) + assert.Nil(t, s, "config 2") + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s, "config 3") + s = watchConfig(ch4, 50*time.Millisecond) + assert.Nil(t, s, "config 4") + + testLog.Debug("--------------------------------") + testLog.Debug("Update1: ns1/gw1 + ns2/gw1 ") + testLog.Debug("--------------------------------") + testLog.Debug("poll: one result") + c1 := testConfig("ns1/gw1", "realm1") + c2 := testConfig("ns2/gw1", "realm1") + err = srv.UpdateConfig([]server.Config{c1, c2}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq 1") + sc2 := srv.GetConfigStore().Get("ns2/gw1") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq 2") + + testLog.Debug("load") + + // all-configs should result sc1 and sc2 + scs, err := client1.Get(clientCtx) + assert.NoError(t, err, "load 1") + assert.Len(t, scs, 2, "load 1") + co := findConfById(scs, "ns1/gw1") + assert.NotNil(t, co, "c1") + assert.True(t, co.DeepEqual(sc1), "deepeq") + co = findConfById(scs, "ns2/gw1") + assert.NotNil(t, co, "c2") + assert.True(t, co.DeepEqual(sc2), "deepeq") + + // ns1 client should yield 1 config + scs, err = client2.Get(clientCtx) + assert.NoError(t, err, "load 2") + assert.Len(t, scs, 1, "load 2") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // ns2 client should yield 1 config + scs, err = client3.Get(clientCtx) + assert.NoError(t, err, "load 3") + assert.Len(t, scs, 1, "load 3") + assert.True(t, scs[0].DeepEqual(sc2), "deepeq") + + // ns1/gw1 client should yield 1 config + scs, err = client4.Get(clientCtx) + assert.NoError(t, err, "load 4") + assert.Len(t, scs, 1, "load 4") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // two configs from client1 watch + s1 := watchConfig(ch1, 50*time.Millisecond) + assert.NotNil(t, s1) + s2 := watchConfig(ch1, 50*time.Millisecond) + assert.NotNil(t, s2) + s3 := watchConfig(ch1, 50*time.Millisecond) + assert.Nil(t, s3) + lst := []*stnrv1.StunnerConfig{s1, s2} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq 1") + assert.NotNil(t, findConfById(lst, "ns2/gw1")) + assert.True(t, findConfById(lst, "ns2/gw1").DeepEqual(sc2), "deepeq 1") + + // 1 config from client2 watch + s = watchConfig(ch2, 50*time.Millisecond) + assert.NotNil(t, s) + assert.True(t, s.DeepEqual(sc1)) + s = watchConfig(ch2, 50*time.Millisecond) + assert.Nil(t, s) + + // 1 config from client3 watch + s = watchConfig(ch3, 50*time.Millisecond) + assert.NotNil(t, s, "config 3") + assert.True(t, s.DeepEqual(sc2)) + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s) + + // 1 config from client4 watch + s = watchConfig(ch4, 50*time.Millisecond) + assert.NotNil(t, s) + assert.True(t, s.DeepEqual(sc1)) + s = watchConfig(ch4, 50*time.Millisecond) + assert.Nil(t, s) + + testLog.Debug("--------------------------------") + testLog.Debug("Update1: ns1/gw1 + ns1/gw2 ") + testLog.Debug("--------------------------------") + testLog.Debug("update: conf 1 and conf 3") + c1 = testConfig("ns1/gw1", "realm-new") + c3 := testConfig("ns1/gw2", "realm3") + err = srv.UpdateConfig([]server.Config{c1, c2, c3}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 3, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 = srv.GetConfigStore().Get("ns2/gw1") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq") + sc3 := srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc3, "get 3") + assert.NoError(t, sc3.Validate(), "valid") // loaders validate + assert.True(t, c3.Config.DeepEqual(sc3), "deepeq") + + // all-configs should result sc1 and sc2 and sc3 + scs, err = client1.Get(clientCtx) + assert.NoError(t, err, "load 1") + assert.Len(t, scs, 3, "load 1") + co = findConfById(scs, "ns1/gw1") + assert.NotNil(t, co, "c1") + assert.True(t, co.DeepEqual(sc1), "deepeq") + co = findConfById(scs, "ns2/gw1") + assert.NotNil(t, co, "c2") + assert.True(t, co.DeepEqual(sc2), "deepeq") + co = findConfById(scs, "ns1/gw2") + assert.NotNil(t, co, "c3") + assert.True(t, co.DeepEqual(sc3), "deepeq") + + // ns1 client should yield 2 configs + scs, err = client2.Get(clientCtx) + assert.NoError(t, err, "load 2") + assert.Len(t, scs, 2, "load 2") + assert.NotNil(t, findConfById(scs, "ns1/gw1")) + assert.True(t, findConfById(scs, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(scs, "ns1/gw2")) + assert.True(t, findConfById(scs, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // ns2 client should yield 1 config + scs, err = client3.Get(clientCtx) + assert.NoError(t, err, "load 3") + assert.Len(t, scs, 1, "load 3") + assert.True(t, scs[0].DeepEqual(sc2), "deepeq") + + // ns1/gw1 client should yield 1 config + scs, err = client4.Get(clientCtx) + assert.NoError(t, err, "load 4") + assert.Len(t, scs, 1, "load 4") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // 2 configs from client1 watch + s1 = watchConfig(ch1, 1500*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch1, 150*time.Millisecond) + assert.NotNil(t, s2) + s3 = watchConfig(ch1, 150*time.Millisecond) + assert.Nil(t, s3) + lst = []*stnrv1.StunnerConfig{s1, s2} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(lst, "ns1/gw2")) + assert.True(t, findConfById(lst, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // 2 configs from client2 watch + s1 = watchConfig(ch2, 1500*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch2, 150*time.Millisecond) + assert.NotNil(t, s2) + s3 = watchConfig(ch2, 50*time.Millisecond) + assert.Nil(t, s3) + lst = []*stnrv1.StunnerConfig{s1, s2} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(lst, "ns1/gw2")) + assert.True(t, findConfById(lst, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // 0 config from client3 watch + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s, "config 3") + + // 1 config from client4 watch + s = watchConfig(ch4, 50*time.Millisecond) + assert.NotNil(t, s) + assert.True(t, s.DeepEqual(sc1), "deepeq") + + testLog.Debug("--------------------------------") + testLog.Debug("Restart + Update1: ns1/gw1 + ns2/gw1 + ns1/gw2") + testLog.Debug("--------------------------------") + testLog.Debug("restarting server") + serverCancel() + // let the server shut down and restart + time.Sleep(50 * time.Millisecond) + serverCtx, serverCancel = context.WithCancel(context.Background()) + defer serverCancel() + srv = server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + err = srv.UpdateConfig([]server.Config{c1, c2, c3}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 3, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 = srv.GetConfigStore().Get("ns2/gw1") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq") + sc3 = srv.GetConfigStore().Get("ns1/gw2") + assert.NotNil(t, sc3, "get 3") + assert.NoError(t, sc3.Validate(), "valid") // loaders validate + assert.True(t, c3.Config.DeepEqual(sc3), "deepeq") + + // all-configs should result sc1 and sc2 and sc3 + scs, err = client1.Get(clientCtx) + assert.NoError(t, err, "load 1") + assert.Len(t, scs, 3, "load 1") + co = findConfById(scs, "ns1/gw1") + assert.NotNil(t, co, "c1") + assert.True(t, co.DeepEqual(sc1), "deepeq") + co = findConfById(scs, "ns2/gw1") + assert.NotNil(t, co, "c2") + assert.True(t, co.DeepEqual(sc2), "deepeq") + co = findConfById(scs, "ns1/gw2") + assert.NotNil(t, co, "c3") + assert.True(t, co.DeepEqual(sc3), "deepeq") + + // ns1 client should yield 2 configs + scs, err = client2.Get(clientCtx) + assert.NoError(t, err, "load 2") + assert.Len(t, scs, 2, "load 2") + assert.NotNil(t, findConfById(scs, "ns1/gw1")) + assert.True(t, findConfById(scs, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(scs, "ns1/gw2")) + assert.True(t, findConfById(scs, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // ns2 client should yield 1 config + scs, err = client3.Get(clientCtx) + assert.NoError(t, err, "load 3") + assert.Len(t, scs, 1, "load 3") + assert.True(t, scs[0].DeepEqual(sc2), "deepeq") + + // ns1/gw1 client should yield 1 config + scs, err = client4.Get(clientCtx) + assert.NoError(t, err, "load 4") + assert.Len(t, scs, 1, "load 4") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // 3 configs from client1 watch + s1 = watchConfig(ch1, 5000*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch1, 100*time.Millisecond) + assert.NotNil(t, s2) + s3 = watchConfig(ch1, 100*time.Millisecond) + assert.NotNil(t, s2) + s4 := watchConfig(ch1, 100*time.Millisecond) + assert.Nil(t, s4) + lst = []*stnrv1.StunnerConfig{s1, s2, s3} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(lst, "ns1/gw2")) + assert.True(t, findConfById(lst, "ns2/gw1").DeepEqual(sc2), "deepeq") + assert.NotNil(t, findConfById(lst, "ns2/gw1")) + assert.True(t, findConfById(lst, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // 2 configs from client2 watch + s1 = watchConfig(ch2, 50*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch2, 50*time.Millisecond) + assert.NotNil(t, s2) + s3 = watchConfig(ch2, 50*time.Millisecond) + assert.Nil(t, s3) + lst = []*stnrv1.StunnerConfig{s1, s2} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(lst, "ns1/gw2")) + assert.True(t, findConfById(lst, "ns1/gw2").DeepEqual(sc3), "deepeq") + + // 1 config from client3 watch + s = watchConfig(ch3, 50*time.Millisecond) + assert.NotNil(t, s, "config 3") + assert.True(t, s.DeepEqual(sc2)) + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s) + + // 1 config from client4 watch + s = watchConfig(ch4, 50*time.Millisecond) + assert.NotNil(t, s) + assert.True(t, s.DeepEqual(sc1)) + s = watchConfig(ch4, 50*time.Millisecond) + assert.Nil(t, s) + + // switch config deletions on + server.SuppressConfigDeletion = false + + testLog.Debug("--------------------------------") + testLog.Debug("Update1: ns1/gw1 + ns3/gw1 ") + testLog.Debug("--------------------------------") + testLog.Debug("update: conf 1, remove conf 3, and add conf 4") + c1 = testConfig("ns1/gw1", "realm-newer") + c4 := testConfig("ns3/gw1", "realm4") + err = srv.UpdateConfig([]server.Config{c1, c2, c4}) + assert.NoError(t, err, "update") + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 3, "snapshot len") + sc1 = srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + sc2 = srv.GetConfigStore().Get("ns2/gw1") + assert.NotNil(t, sc2, "get 2") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, c2.Config.DeepEqual(sc2), "deepeq") + sc4 := srv.GetConfigStore().Get("ns3/gw1") + assert.NotNil(t, sc3, "get 3") + assert.NoError(t, sc3.Validate(), "valid") // loaders validate + assert.True(t, c4.Config.DeepEqual(sc4), "deepeq") + + // all-configs should result sc1 and sc2 and sc4 + scs, err = client1.Get(clientCtx) + assert.NoError(t, err, "load 1") + assert.Len(t, scs, 3, "load 1") + co = findConfById(scs, "ns1/gw1") + assert.NotNil(t, co, "c1") + assert.True(t, co.DeepEqual(sc1), "deepeq") + co = findConfById(scs, "ns2/gw1") + assert.NotNil(t, co, "c2") + assert.True(t, co.DeepEqual(sc2), "deepeq") + co = findConfById(scs, "ns3/gw1") + assert.NotNil(t, co, "c4") + assert.True(t, co.DeepEqual(sc4), "deepeq") + + // ns1 client should yield 1 config + scs, err = client2.Get(clientCtx) + assert.NoError(t, err, "load 2") + assert.Len(t, scs, 1, "load 2") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // ns2 client should yield 1 config + scs, err = client3.Get(clientCtx) + assert.NoError(t, err, "load 3") + assert.Len(t, scs, 1, "load 3") + assert.True(t, scs[0].DeepEqual(sc2), "deepeq") + + // ns1/gw1 client should yield 1 config + scs, err = client4.Get(clientCtx) + assert.NoError(t, err, "load 4") + assert.Len(t, scs, 1, "load 4") + assert.True(t, scs[0].DeepEqual(sc1), "deepeq") + + // 2 configs from client1 watch + s1 = watchConfig(ch1, 5000*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s2) + s3 = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s3) + lst = []*stnrv1.StunnerConfig{s1, s2, s3} + assert.NotNil(t, findConfById(lst, "ns1/gw1")) + assert.True(t, findConfById(lst, "ns1/gw1").DeepEqual(sc1), "deepeq") + assert.NotNil(t, findConfById(lst, "ns3/gw1")) + assert.True(t, findConfById(lst, "ns3/gw1").DeepEqual(sc4), "deepeq") + assert.NotNil(t, findConfById(lst, "ns1/gw2")) + assert.True(t, client.IsConfigDeleted(findConfById(lst, "ns1/gw2")), "deepeq") + + // 1 config from client2 watch (removed config never updated) + s1 = watchConfig(ch2, 50*time.Millisecond) + assert.NotNil(t, s1) + s2 = watchConfig(ch2, 50*time.Millisecond) + assert.NotNil(t, s2) + // we do not know the order + assert.True(t, s1.DeepEqual(sc1) || s2.DeepEqual(sc1), "config-deepeq") + assert.True(t, client.IsConfigDeleted(s1) || client.IsConfigDeleted(s2), "deleted") // deleted + // assert.True(t, s1.DeepEqual(sc1), "deepeq") + // assert.True(t, client.IsConfigDeleted(s2), "deepeq") // deleted! + + // no config from client3 watch + s = watchConfig(ch3, 50*time.Millisecond) + assert.Nil(t, s, "config 3") + + // 1 config from client4 watch + s = watchConfig(ch4, 50*time.Millisecond) + assert.NotNil(t, s) + assert.True(t, s.DeepEqual(sc1), "deepeq") +} + +func TestClientReconnect(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + // suppress deletions + server.SuppressConfigDeletion = true + + serverCtx, serverCancel := context.WithCancel(context.Background()) + defer serverCancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + testLog.Debug("create client") + client1, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client 1") + + testLog.Debug("watch: no result") + ch1 := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch1) + + clientCtx, clientCancel := context.WithCancel(context.Background()) + defer clientCancel() + err = client1.Watch(clientCtx, ch1, false) + assert.NoError(t, err, "client 1 watch") + + s := watchConfig(ch1, 150*time.Millisecond) + assert.Nil(t, s, "config 1") + + testLog.Debug("update") + c1 := testConfig("ns1/gw1", "realm1") + err = srv.UpdateConfig([]server.Config{c1}) + assert.NoError(t, err, "update") + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 1, "snapshot len") + sc1 := srv.GetConfigStore().Get("ns1/gw1") + assert.NotNil(t, sc1, "get 1") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, c1.Config.DeepEqual(sc1), "deepeq") + + // poll should have fed the config to the channels + s = watchConfig(ch1, 500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") + + log.Info("killing the connection of the watcher", "id", "ns1/gw1") + conns := srv.GetConnTrack() + assert.NotNil(t, conns) + snapshot := conns.Snapshot() + assert.Len(t, snapshot, 1) + connId := snapshot[0].Id() + srv.RemoveClient(connId) + + // after 2 pong-waits, client should have reconnected + time.Sleep(client.RetryPeriod) + time.Sleep(client.RetryPeriod) + + // watcher should receive its config + s = watchConfig(ch1, 1500*time.Millisecond) + assert.NotNil(t, s, "config 1") + assert.True(t, s.DeepEqual(sc1), "deepeq 1") +} + +// test server config update mechanism +func TestServerUpdate(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + // suppress deletions + server.SuppressConfigDeletion = true + + serverCtx, serverCancel := context.WithCancel(context.Background()) + defer serverCancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + oldC, err := client.ParseConfig([]byte(`{"version":"v1","admin":{"name":"stunner/udp-gateway","logLevel":"all:INFO","health-check":"http://:8086"},"auth":{"realm":"stunner.l7mp.io","type":"static","credentials":{"username":"a","password":"b"}},"listeners":[{"name": "stunner/udp-gateway/udp-listener", "protocol":"turn-udp","address":"0.0.0.0","port":3478,"routes":["stunner/media-plane"]}],"clusters":[]}`)) + assert.NoError(t, oldC.Validate(), "validate") + assert.NoError(t, err, "parse 1") + + testLog.Debug("upsert stunner/udp-gateway") + srv.UpsertConfig("stunner/udp-gateway", oldC) + + cs := srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 1, "snapshot len") + sc1 := srv.GetConfigStore().Get("stunner/udp-gateway") + assert.NotNil(t, sc1, "get") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, sc1.DeepEqual(oldC), "deepeq") + + // reapply - no change + testLog.Debug("re-apply stunner/udp-gateway") + srv.UpsertConfig("stunner/udp-gateway", oldC) + time.Sleep(20 * time.Millisecond) // let the server process + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 1, "snapshot len") + sc1 = srv.GetConfigStore().Get("stunner/udp-gateway") + assert.NotNil(t, sc1, "get") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, sc1.DeepEqual(oldC), "deepeq") + + // add another config + tcpC, err := client.ParseConfig([]byte(`{"version":"v1","admin":{"name":"stunner/tcp-gateway","logLevel":"all:INFO","health-check":"http://:8086"},"auth":{"realm":"stunner.l7mp.io","type":"static","credentials":{"username":"a","password":"b"}},"listeners":[{"name": "stunner/tcp-gateway/tcp-listener", "protocol":"turn-tcp","address":"0.0.0.0","port":3478,"routes":["stunner/media-plane"]}],"clusters":[{"name":"stunner/media-plane", "type":"STATIC","protocol":"UDP","endpoints":["0.0.0.0/0"]}]}`)) + assert.NoError(t, tcpC.Validate(), "validate") + assert.NoError(t, err, "parse") + + testLog.Debug("upsert stunner/tcp-gateway") + srv.UpsertConfig("stunner/tcp-gateway", tcpC) + time.Sleep(20 * time.Millisecond) // let the server process + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 = srv.GetConfigStore().Get("stunner/udp-gateway") + assert.NotNil(t, sc1, "get") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, sc1.DeepEqual(oldC), "deepeq") + sc2 := srv.GetConfigStore().Get("stunner/tcp-gateway") + assert.NotNil(t, sc2, "get") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, sc2.DeepEqual(tcpC), "deepeq") + + // add a cluster + newC, err := client.ParseConfig([]byte(`{"version":"v1","admin":{"name":"stunner/udp-gateway","logLevel":"all:INFO","health-check":"http://:8086"},"auth":{"realm":"stunner.l7mp.io","type":"static","credentials":{"username":"a","password":"b"}},"listeners":[{"name": "stunner/udp-gateway/udp-listener", "protocol":"turn-udp","address":"0.0.0.0","port":3478,"routes":["stunner/media-plane"]}],"clusters":[{"name": "stunner/media-plane", "type":"STATIC","protocol":"UDP","endpoints":["0.0.0.0/0"]}]}`)) + assert.NoError(t, err, "parse 1") + assert.NoError(t, newC.Validate(), "validate") + assert.False(t, oldC.DeepEqual(newC), "deepeq") + + // process in a single go + testLog.Debug("modify stunner/udp-gateway using UpdateConfig") + err = srv.UpdateConfig([]server.Config{{Id: "stunner/udp-gateway", Config: newC}, {Id: "stunner/tcp-gateway", Config: tcpC}}) + assert.NoError(t, err, "parse 1") + + time.Sleep(20 * time.Millisecond) // let the server process + + cs = srv.GetConfigStore().Snapshot() + assert.Len(t, cs, 2, "snapshot len") + sc1 = srv.GetConfigStore().Get("stunner/udp-gateway") + assert.NotNil(t, sc1, "get") + assert.NoError(t, sc1.Validate(), "valid") // loaders validate + assert.True(t, sc1.DeepEqual(newC), "deepeq") + sc2 = srv.GetConfigStore().Get("stunner/tcp-gateway") + assert.NotNil(t, sc2, "get") + assert.NoError(t, sc2.Validate(), "valid") // loaders validate + assert.True(t, sc2.DeepEqual(tcpC), "deepeq") +} + +// Test various combinations of server-side "drop-delete" (server.SuppressConfigDeletion=true) and +// client-side "drop-delete" (client.Watch(..., suppressDelete=true)). +func TestDeleteConfigAPI(t *testing.T) { + zc := zap.NewProductionConfig() + zc.Level = zap.NewAtomicLevelAt(testerLogLevel) + z, err := zc.Build() + assert.NoError(t, err, "logger created") + zlogger := zapr.NewLogger(z) + log := zlogger.WithName("tester") + + logger := logger.NewLoggerFactory(stunnerLogLevel) + testLog := logger.NewLogger("test") + + saved := server.SuppressConfigDeletion + server.SuppressConfigDeletion = false + serverCtx, serverCancel := context.WithCancel(context.Background()) + defer serverCancel() + + testCDSAddr := getRandCDSAddr() + testLog.Debugf("create server on %s", testCDSAddr) + srv := server.New(testCDSAddr, nil, log) + assert.NotNil(t, srv, "server") + err = srv.Start(serverCtx) + assert.NoError(t, err, "start") + + testLog.Debug("create client") + c, err := client.New(testCDSAddr, "ns1/gw1", logger) + assert.NoError(t, err, "client") + + ch := make(chan *stnrv1.StunnerConfig, 8) + defer close(ch) + + for _, testCase := range []struct { + name string + serverDropDel, clientDropDel bool + tester func(t *testing.T) + }{ + { + name: "server sends delete - client handles delete", + serverDropDel: false, + clientDropDel: false, + tester: func(t *testing.T) { + conf := watchConfig(ch, 50*time.Millisecond) + assert.NotNil(t, conf, "config") + assert.True(t, client.IsConfigDeleted(conf)) + }, + }, + { + name: "server suppresses delete - client handles delete", + serverDropDel: true, + clientDropDel: false, + tester: func(t *testing.T) { + conf := watchConfig(ch, 50*time.Millisecond) + assert.Nil(t, conf, "config") + }, + }, + { + name: "server sends delete - client suppresses delete", + serverDropDel: false, + clientDropDel: true, + tester: func(t *testing.T) { + conf := watchConfig(ch, 50*time.Millisecond) + assert.Nil(t, conf, "config") + }, + }, + { + name: "server suppresses delete - client suppresses delete", + serverDropDel: true, + clientDropDel: true, + tester: func(t *testing.T) { + conf := watchConfig(ch, 50*time.Millisecond) + assert.Nil(t, conf, "config") + }, + }, + } { + testLog.Debugf("------------------------- %s ----------------------", testCase.name) + + server.SuppressConfigDeletion = testCase.serverDropDel + + clientCtx, clientCancel := context.WithCancel(context.Background()) + err = c.Watch(clientCtx, ch, testCase.clientDropDel) + assert.NoError(t, err, "client watch") + + conf := watchConfig(ch, 25*time.Millisecond) + assert.Nil(t, conf, "noconfig") + + testLog.Trace("Adding config") + testConf := testConfig("ns1/gw1", "realm1") + err = srv.UpdateConfig([]server.Config{testConf}) + assert.NoError(t, err, "update") + + conf = watchConfig(ch, 50*time.Millisecond) + assert.NotNil(t, conf) + assert.Equal(t, *testConf.Config, *conf) + + testLog.Trace("Deleting config") + err = srv.UpdateConfig([]server.Config{}) + assert.NoError(t, err, "update") + testCase.tester(t) + + clientCancel() + } + + server.SuppressConfigDeletion = saved +} + +// func TestServerPatcher(t *testing.T) { +// zc := zap.NewProductionConfig() +// zc.Level = zap.NewAtomicLevelAt(testerLogLevel) +// z, err := zc.Build() +// assert.NoError(t, err, "logger created") +// zlogger := zapr.NewLogger(z) +// log := zlogger.WithName("tester") + +// logger := logger.NewLoggerFactory(stunnerLogLevel) +// testLog := logger.NewLogger("test") + +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() + +// testLog.Debug("create server") +// patcher := func(conf *stnrv1.StunnerConfig, node string) (*stnrv1.StunnerConfig, error) { +// testLog.Debugf("patching config: %s", conf.String()) +// if conf == nil { +// return nil, fmt.Errorf("config patcher: nil config received") +// } +// for i := range conf.Listeners { +// conf.Listeners[i].Addr = node +// } +// testLog.Tracef(" patching ready: %s", conf.String()) +// return conf, nil +// } +// srv := server.New(stnrv1.DefaultConfigDiscoveryAddress, patcher, log) +// assert.NotNil(t, srv, "server") +// err = srv.Start(ctx) +// assert.NoError(t, err, "start") + +// time.Sleep(20 * time.Millisecond) + +// c := testConfigListener("ns1/gw1", "realm1", "1.2.3.4") +// err = srv.UpdateConfig([]server.Config{c}) +// assert.NoError(t, err, "update") +// // expected result +// p := testConfigListener("ns1/gw1", "realm1", "10.11.12.13") +// p.Config.Listeners[0].Addr = "10.11.12.13" +// p.Config.Listeners[1].Addr = "10.11.12.13" +// p.Config.Listeners[2].Addr = "10.11.12.13" + +// testLog.Debug("client w/o node IP") +// loader1, err := client.New(testCDSAddr, "ns1/gw1", logger) +// assert.NoError(t, err, "client") +// sc1, err := loader1.Load() +// assert.NoError(t, err, "load") +// assert.True(t, sc1.DeepEqual(c.Config), "deepeq") + +// testLog.Debug("client w/ node IP") +// loader2, err := client.New(testCDSAddr, "ns1/gw1", map[string]string{"node": "10.11.12.13"}, logger) +// assert.NoError(t, err, "client") +// sc2, err := loader2.Load() +// assert.NoError(t, err, "load") +// assert.True(t, sc2.DeepEqual(p.Config), "deepeq") + +// watchCtx, watchCancel := context.WithCancel(context.Background()) +// defer watchCancel() + +// testLog.Debug("watcher1 w/o node IP") +// watcher1, err := client.New(testCDSAddr, "ns1/gw1", nil, logger) +// assert.NoError(t, err, "client") +// ch1 := make(chan *stnrv1.StunnerConfig, 8) +// defer close(ch1) + +// err = watcher1.Watch(watchCtx, ch1, false) +// assert.NoError(t, err, "client watch") + +// s := watchConfig(ch1, 100*time.Millisecond) +// assert.NotNil(t, s, "watch-config") +// assert.True(t, s.DeepEqual(c.Config), "deepeq") + +// testLog.Debug("watcher2 w/ node IP") +// watcher2, err := client.New(testCDSAddr, "ns1/gw1", map[string]string{"node": "10.11.12.13"}, logger) +// assert.NoError(t, err, "client") +// ch2 := make(chan *stnrv1.StunnerConfig, 8) +// defer close(ch2) + +// err = watcher2.Watch(watchCtx, ch2, false) +// assert.NoError(t, err, "client watch") + +// s = watchConfig(ch2, 100*time.Millisecond) +// assert.NotNil(t, s, "watch-config") +// assert.True(t, s.DeepEqual(p.Config), "deepeq") + +// // testing update +// c = testConfigListener("ns1/gw1", "realm1", "8.7.6.5") +// err = srv.UpdateConfig([]server.Config{c}) +// assert.NoError(t, err, "update") + +// testLog.Debug("client w/o node IP") +// sc1, err = loader1.Load() +// assert.NoError(t, err, "load") +// assert.True(t, sc1.DeepEqual(c.Config), "deepeq") + +// testLog.Debug("client w/ node IP") +// sc2, err = loader2.Load() +// assert.NoError(t, err, "load") +// assert.True(t, sc2.DeepEqual(p.Config), "deepeq") + +// s = watchConfig(ch1, 100*time.Millisecond) +// assert.NotNil(t, s, "watch-config") +// assert.True(t, s.DeepEqual(c.Config), "deepeq") + +// s = watchConfig(ch2, 100*time.Millisecond) +// assert.NotNil(t, s, "watch-config") +// assert.True(t, s.DeepEqual(p.Config), "deepeq") +// } + +// only differ in id and realm +func testConfig(id, realm string) server.Config { + c := client.ZeroConfig(id) + c.Auth.Realm = realm + _ = c.Validate() // make sure deepeq works + return server.Config{Id: id, Config: c} +} + +// func testConfigListener(id, realm, addr string) server.Config { +// c := client.ZeroConfig(id) +// c.Auth.Realm = realm +// c.Listeners = []stnrv1.ListenerConfig{{ +// Name: "l-1", +// Addr: addr, +// Port: 1, +// }, { +// Name: "l-2", +// Port: 2, +// }, { +// Name: "l-3", +// Addr: "101.102.103.104", +// Port: 3, +// }} +// _ = c.Validate() // make sure deepeq works +// return server.Config{Id: id, Config: c} +// } + +// wait for some configurable time for a watch element +func watchConfig(ch chan *stnrv1.StunnerConfig, d time.Duration) *stnrv1.StunnerConfig { + select { + case c := <-ch: + // fmt.Println("++++++++++++ got config ++++++++++++: ", c.String()) + return c + case <-time.After(d): + // fmt.Println("++++++++++++ timeout ++++++++++++") + return nil + } +} + +func findConfById(cs []*stnrv1.StunnerConfig, id string) *stnrv1.StunnerConfig { + for _, c := range cs { + if c != nil && c.Admin.Name == id { + return c + } + + } + + return nil +} diff --git a/pkg/config/client/api/client.gen.go b/pkg/config/client/api/client.gen.go new file mode 100644 index 00000000..f4b100ca --- /dev/null +++ b/pkg/config/client/api/client.gen.go @@ -0,0 +1,626 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen/v2 version v2.1.0 DO NOT EDIT. +package api + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + stunnerv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/oapi-codegen/runtime" +) + +// V1Config Config provides a STUNner config. Schema is defined in https://github.com/l7mp/stunner/tree/main/pkg/apis/v1 +type V1Config = stunnerv1.StunnerConfig + +// V1ConfigList ConfigList is a list of Configs. +type V1ConfigList struct { + // Items Items is the list of Config objects in the list. + Items []V1Config `json:"items"` + + // Version version defines the versioned schema of this object. + Version string `json:"version"` +} + +// V1Error API error. +type V1Error struct { + // Code Error code. + Code int32 `json:"code"` + + // Message Error message. + Message string `json:"message"` +} + +// ListV1ConfigsParams defines parameters for ListV1Configs. +type ListV1ConfigsParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` +} + +// ListV1ConfigsNamespaceParams defines parameters for ListV1ConfigsNamespace. +type ListV1ConfigsNamespaceParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` +} + +// GetV1ConfigNamespaceNameParams defines parameters for GetV1ConfigNamespaceName. +type GetV1ConfigNamespaceNameParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` + + // Node Name of the node the client runs on. + Node *string `form:"node,omitempty" json:"node,omitempty"` +} + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // ListV1Configs request + ListV1Configs(ctx context.Context, params *ListV1ConfigsParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // ListV1ConfigsNamespace request + ListV1ConfigsNamespace(ctx context.Context, namespace string, params *ListV1ConfigsNamespaceParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetV1ConfigNamespaceName request + GetV1ConfigNamespaceName(ctx context.Context, namespace string, name string, params *GetV1ConfigNamespaceNameParams, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) ListV1Configs(ctx context.Context, params *ListV1ConfigsParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewListV1ConfigsRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) ListV1ConfigsNamespace(ctx context.Context, namespace string, params *ListV1ConfigsNamespaceParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewListV1ConfigsNamespaceRequest(c.Server, namespace, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetV1ConfigNamespaceName(ctx context.Context, namespace string, name string, params *GetV1ConfigNamespaceNameParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetV1ConfigNamespaceNameRequest(c.Server, namespace, name, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewListV1ConfigsRequest generates requests for ListV1Configs +func NewListV1ConfigsRequest(server string, params *ListV1ConfigsParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/configs") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if params.Watch != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "watch", runtime.ParamLocationQuery, *params.Watch); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewListV1ConfigsNamespaceRequest generates requests for ListV1ConfigsNamespace +func NewListV1ConfigsNamespaceRequest(server string, namespace string, params *ListV1ConfigsNamespaceParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "namespace", runtime.ParamLocationPath, namespace) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/configs/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if params.Watch != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "watch", runtime.ParamLocationQuery, *params.Watch); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetV1ConfigNamespaceNameRequest generates requests for GetV1ConfigNamespaceName +func NewGetV1ConfigNamespaceNameRequest(server string, namespace string, name string, params *GetV1ConfigNamespaceNameParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "namespace", runtime.ParamLocationPath, namespace) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "name", runtime.ParamLocationPath, name) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/configs/%s/%s", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if params.Watch != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "watch", runtime.ParamLocationQuery, *params.Watch); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + if params.Node != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "node", runtime.ParamLocationQuery, *params.Node); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // ListV1ConfigsWithResponse request + ListV1ConfigsWithResponse(ctx context.Context, params *ListV1ConfigsParams, reqEditors ...RequestEditorFn) (*ListV1ConfigsResponse, error) + + // ListV1ConfigsNamespaceWithResponse request + ListV1ConfigsNamespaceWithResponse(ctx context.Context, namespace string, params *ListV1ConfigsNamespaceParams, reqEditors ...RequestEditorFn) (*ListV1ConfigsNamespaceResponse, error) + + // GetV1ConfigNamespaceNameWithResponse request + GetV1ConfigNamespaceNameWithResponse(ctx context.Context, namespace string, name string, params *GetV1ConfigNamespaceNameParams, reqEditors ...RequestEditorFn) (*GetV1ConfigNamespaceNameResponse, error) +} + +type ListV1ConfigsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *V1ConfigList + JSONDefault *V1Error +} + +// Status returns HTTPResponse.Status +func (r ListV1ConfigsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r ListV1ConfigsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type ListV1ConfigsNamespaceResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *V1ConfigList + JSONDefault *V1Error +} + +// Status returns HTTPResponse.Status +func (r ListV1ConfigsNamespaceResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r ListV1ConfigsNamespaceResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetV1ConfigNamespaceNameResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *V1Config + JSON404 *V1Error + JSON500 *V1Error + JSONDefault *V1Error +} + +// Status returns HTTPResponse.Status +func (r GetV1ConfigNamespaceNameResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetV1ConfigNamespaceNameResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// ListV1ConfigsWithResponse request returning *ListV1ConfigsResponse +func (c *ClientWithResponses) ListV1ConfigsWithResponse(ctx context.Context, params *ListV1ConfigsParams, reqEditors ...RequestEditorFn) (*ListV1ConfigsResponse, error) { + rsp, err := c.ListV1Configs(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseListV1ConfigsResponse(rsp) +} + +// ListV1ConfigsNamespaceWithResponse request returning *ListV1ConfigsNamespaceResponse +func (c *ClientWithResponses) ListV1ConfigsNamespaceWithResponse(ctx context.Context, namespace string, params *ListV1ConfigsNamespaceParams, reqEditors ...RequestEditorFn) (*ListV1ConfigsNamespaceResponse, error) { + rsp, err := c.ListV1ConfigsNamespace(ctx, namespace, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseListV1ConfigsNamespaceResponse(rsp) +} + +// GetV1ConfigNamespaceNameWithResponse request returning *GetV1ConfigNamespaceNameResponse +func (c *ClientWithResponses) GetV1ConfigNamespaceNameWithResponse(ctx context.Context, namespace string, name string, params *GetV1ConfigNamespaceNameParams, reqEditors ...RequestEditorFn) (*GetV1ConfigNamespaceNameResponse, error) { + rsp, err := c.GetV1ConfigNamespaceName(ctx, namespace, name, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetV1ConfigNamespaceNameResponse(rsp) +} + +// ParseListV1ConfigsResponse parses an HTTP response from a ListV1ConfigsWithResponse call +func ParseListV1ConfigsResponse(rsp *http.Response) (*ListV1ConfigsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &ListV1ConfigsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest V1ConfigList + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && true: + var dest V1Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSONDefault = &dest + + } + + return response, nil +} + +// ParseListV1ConfigsNamespaceResponse parses an HTTP response from a ListV1ConfigsNamespaceWithResponse call +func ParseListV1ConfigsNamespaceResponse(rsp *http.Response) (*ListV1ConfigsNamespaceResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &ListV1ConfigsNamespaceResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest V1ConfigList + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && true: + var dest V1Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSONDefault = &dest + + } + + return response, nil +} + +// ParseGetV1ConfigNamespaceNameResponse parses an HTTP response from a GetV1ConfigNamespaceNameWithResponse call +func ParseGetV1ConfigNamespaceNameResponse(rsp *http.Response) (*GetV1ConfigNamespaceNameResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetV1ConfigNamespaceNameResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest V1Config + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest V1Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest V1Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && true: + var dest V1Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSONDefault = &dest + + } + + return response, nil +} diff --git a/pkg/config/client/cds_api.go b/pkg/config/client/cds_api.go new file mode 100644 index 00000000..e017abe4 --- /dev/null +++ b/pkg/config/client/cds_api.go @@ -0,0 +1,401 @@ +//go:generate go run github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen --config=cfg.yaml ../api/stunner_openapi.yaml +package client + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/client/api" + "github.com/l7mp/stunner/pkg/config/util" + "github.com/pion/logging" +) + +const ( + ConfigNamespaceNameAPIEndpoint = "/api/v1/configs/%s/%s" + ConfigsNamespaceAPIEndpoint = "/api/v1/configs/%s" + AllConfigsAPIEndpoint = "/api/v1/configs" +) + +type ConfigList struct { + Version string `json:"version"` + Items []*stnrv1.StunnerConfig `json:"items"` +} + +type ClientOption = api.ClientOption +type HttpRequestDoer = api.HttpRequestDoer + +type CdsApi interface { + // Endpoint returns the address of the server plus the WebSocket API endpoint. + Endpoint() (string, string) + // Get loads the config(s) from the API endpoint. + Get(ctx context.Context) ([]*stnrv1.StunnerConfig, error) + // Watch watches config(s) from the API endpoint of a CDS server. If the server is not + // available watch will retry, and if the connection goes away it will create a new one. If + // set, the suppressDelete instructs the API to ignore config delete updates from the + // server. + Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error + // Poll creates a one-shot config watcher without the retry mechanincs of Watch. + Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error + logging.LeveledLogger +} + +func WithHTTPClient(doer HttpRequestDoer) ClientOption { return api.WithHTTPClient(doer) } + +// AllConfigsAPI is the API for listing all configs in a namespace. +type AllConfigsAPI struct { + addr, httpURI, wsURI string + client *api.ClientWithResponses + logging.LeveledLogger +} + +func NewAllConfigsAPI(addr string, logger logging.LeveledLogger, opts ...ClientOption) (CdsApi, error) { + httpuri, err := getURI(addr) + if err != nil { + return nil, err + } + + wsuri, err := wsURI(addr, AllConfigsAPIEndpoint) + if err != nil { + return nil, err + } + + client, err := api.NewClientWithResponses(httpuri.String(), opts...) + if err != nil { + return nil, err + } + + return &AllConfigsAPI{ + addr: addr, + httpURI: httpuri.String(), + wsURI: wsuri, + client: client, + LeveledLogger: logger, + }, nil +} + +func (a *AllConfigsAPI) Endpoint() (string, string) { + return a.addr, a.wsURI +} + +func (a *AllConfigsAPI) Get(ctx context.Context) ([]*stnrv1.StunnerConfig, error) { + a.Debugf("GET: loading all configs from CDS server %s", a.addr) + + r, err := a.client.ListV1ConfigsWithResponse(ctx, nil) + if err != nil { + return []*stnrv1.StunnerConfig{}, err + } + + if r.HTTPResponse.StatusCode != http.StatusOK { + body := strings.TrimSpace(string(r.Body)) + return []*stnrv1.StunnerConfig{}, fmt.Errorf("HTTP error (status: %s): %s", + r.HTTPResponse.Status, body) + } + + return decodeConfigList(r.Body) +} + +func (a *AllConfigsAPI) Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("WATCH: watching all configs from CDS server %s", a.wsURI) + return watch(ctx, a, ch, suppressDelete) +} + +func (a *AllConfigsAPI) Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("POLL: polling all configs from CDS server %s", a.wsURI) + return poll(ctx, a, ch, suppressDelete) +} + +// ConfigsNamespaceAPI is the API for listing all configs in a namespace. +type ConfigsNamespaceAPI struct { + addr, namespace, httpURI, wsURI string + client *api.ClientWithResponses + logging.LeveledLogger +} + +func NewConfigsNamespaceAPI(addr, namespace string, logger logging.LeveledLogger, opts ...ClientOption) (CdsApi, error) { + httpuri, err := getURI(addr) + if err != nil { + return nil, err + } + + wsuri, err := wsURI(addr, fmt.Sprintf(ConfigsNamespaceAPIEndpoint, namespace)) + if err != nil { + return nil, err + } + + client, err := api.NewClientWithResponses(httpuri.String(), opts...) + if err != nil { + return nil, err + } + + return &ConfigsNamespaceAPI{ + addr: addr, + namespace: namespace, + httpURI: httpuri.String(), + wsURI: wsuri, + client: client, + LeveledLogger: logger, + }, nil +} + +func (a *ConfigsNamespaceAPI) Endpoint() (string, string) { + return a.addr, a.wsURI +} + +func (a *ConfigsNamespaceAPI) Get(ctx context.Context) ([]*stnrv1.StunnerConfig, error) { + a.Debugf("GET: loading all configs in namespace %s from CDS server %s", + a.namespace, a.addr) + + r, err := a.client.ListV1ConfigsNamespaceWithResponse(ctx, a.namespace, nil) + if err != nil { + return []*stnrv1.StunnerConfig{}, err + } + + if r.HTTPResponse.StatusCode != http.StatusOK { + body := strings.TrimSpace(string(r.Body)) + return []*stnrv1.StunnerConfig{}, fmt.Errorf("HTTP error (status: %s): %s", + r.HTTPResponse.Status, body) + } + + return decodeConfigList(r.Body) +} + +func (a *ConfigsNamespaceAPI) Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("WATCH: watching all configs in namespace %s from CDS server %s", + a.namespace, a.wsURI) + return watch(ctx, a, ch, suppressDelete) +} + +func (a *ConfigsNamespaceAPI) Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("POLL: polling all configs in namespace %s from CDS server %s", + a.namespace, a.wsURI) + return poll(ctx, a, ch, suppressDelete) +} + +type ConfigNamespaceNameAPI struct { + addr, namespace, name, httpURI, wsURI string + client *api.ClientWithResponses + logging.LeveledLogger +} + +func NewConfigNamespaceNameAPI(addr, namespace, name string, logger logging.LeveledLogger, opts ...ClientOption) (CdsApi, error) { + httpuri, err := getURI(addr) + if err != nil { + return nil, err + } + + wsuri, err := wsURI(addr, fmt.Sprintf(ConfigNamespaceNameAPIEndpoint, namespace, name)) + if err != nil { + return nil, err + } + + client, err := api.NewClientWithResponses(httpuri.String(), opts...) + if err != nil { + return nil, err + } + + return &ConfigNamespaceNameAPI{ + addr: addr, + namespace: namespace, + name: name, + httpURI: httpuri.String(), + wsURI: wsuri, + client: client, + LeveledLogger: logger, + }, nil +} + +func (a *ConfigNamespaceNameAPI) Endpoint() (string, string) { + return a.addr, a.wsURI +} + +func (a *ConfigNamespaceNameAPI) Get(ctx context.Context) ([]*stnrv1.StunnerConfig, error) { + a.Debugf("GET: loading config for gateway %s/%s from CDS server %s", + a.namespace, a.name, a.addr) + + var params *api.GetV1ConfigNamespaceNameParams + r, err := a.client.GetV1ConfigNamespaceNameWithResponse(ctx, a.namespace, a.name, params) + if err != nil { + return []*stnrv1.StunnerConfig{}, err + } + + if r.HTTPResponse.StatusCode != http.StatusOK { + body := strings.TrimSpace(string(r.Body)) + return []*stnrv1.StunnerConfig{}, fmt.Errorf("HTTP error (status: %s): %s", + r.HTTPResponse.Status, body) + } + + return decodeConfig(r.Body) +} + +func (a *ConfigNamespaceNameAPI) Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("WATCH: watching config for gateway %s/%s from CDS server %s", + a.namespace, a.name, a.wsURI) + return watch(ctx, a, ch, suppressDelete) +} + +func (a *ConfigNamespaceNameAPI) Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + a.Debugf("POLL: polling config for gateway %s/%s from CDS server %s", + a.namespace, a.name, a.wsURI) + return poll(ctx, a, ch, suppressDelete) +} + +func watch(ctx context.Context, a CdsApi, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + go func() { + for { + if err := poll(ctx, a, ch, suppressDelete); err != nil { + _, wsuri := a.Endpoint() + a.Errorf("failed to init CDS watcher (url: %s): %s", wsuri, err.Error()) + } else { + // context got cancelled + return + } + + // wait between each attempt + time.Sleep(RetryPeriod) + } + }() + + return nil +} + +// //////////// +// API workers +// //////////// +func poll(ctx context.Context, a CdsApi, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + _, url := a.Endpoint() + a.Tracef("poll: trying to open connection to CDS server at %s", url) + + wc, _, err := websocket.DefaultDialer.DialContext(ctx, url, makeHeader(url)) + if err != nil { + return err + } + // wrap with a locker to prevent concurrent writes + conn := util.NewConn(wc) + defer conn.Close() // this will close the poller goroutine + + a.Infof("connection successfully opened to config discovery server at %s", url) + + pingTicker := time.NewTicker(PingPeriod) + closePinger := make(chan any) + defer close(closePinger) + + // wait until all threads are closed and we can remove the error channel + errCh := make(chan error, 1) + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer pingTicker.Stop() + defer wg.Done() + + for { + select { + case <-pingTicker.C: + // p.log.Tracef("++++ PING ++++ for CDS server %q at client %q", location, p.id) + conn.SetWriteDeadline(time.Now().Add(WriteWait)) //nolint:errcheck + if err := conn.WriteMessage(websocket.PingMessage, []byte("keepalive")); err != nil { + errCh <- fmt.Errorf("could not ping CDS server at %q: %w", + conn.RemoteAddr(), err) + return + } + case <-closePinger: + a.Tracef("closing ping handler to config discovery server at %q", url) + return + } + } + }() + + // poller + go func() { + defer wg.Done() + + // the next pong must arrive within the PongWait period + conn.SetReadDeadline(time.Now().Add(PongWait)) //nolint:errcheck + // reinit the deadline when we get a pong + conn.SetPongHandler(func(string) error { + // a.Tracef("Got PONG from server %q", url) + conn.SetReadDeadline(time.Now().Add(PongWait)) //nolint:errcheck + return nil + }) + + for { + // ping-pong deadline misses will end up being caught here as a read beyond + // the deadline + msgType, msg, err := conn.ReadMessage() + if err != nil { + errCh <- err + return + } + + if msgType != websocket.TextMessage { + errCh <- fmt.Errorf("unexpected message type (code: %d) from client %q", + msgType, conn.RemoteAddr().String()) + return + } + + if len(msg) == 0 { + a.Warn("ignoring zero-length config") + continue + } + + c, err := ParseConfig(msg) + if err != nil { + // assume it is a YAML/JSON syntax error: report and ignore + a.Warnf("could not parse config: %s", err.Error()) + continue + } + + if err := c.Validate(); err != nil { + a.Warnf("invalid config: %s", err.Error()) + continue + } + + if suppressDelete && IsConfigDeleted(c) { + a.Infof("Ignoring delete configuration update from %q", url) + continue + } + + a.Debugf("new config received from %q: %q", url, c.String()) + + ch <- c + } + }() + + // wait fo cancel + for { + defer func() { + a.Infof("closing connection to server %s", conn.RemoteAddr().String()) + conn.WriteMessage(websocket.CloseMessage, []byte{}) //nolint:errcheck + conn.Close() + closePinger <- struct{}{} + wg.Wait() + close(errCh) + }() + + select { + case <-ctx.Done(): + // cancel: normal return + return nil + case err := <-errCh: + // error: return it + return err + } + } +} + +// creates an origin header +func makeHeader(uri string) http.Header { + header := http.Header{} + url, _ := getURI(uri) //nolint:errcheck + origin := *url + origin.Scheme = "http" + origin.Path = "" + header.Set("origin", origin.String()) + return header +} diff --git a/pkg/config/client/cds_client.go b/pkg/config/client/cds_client.go new file mode 100644 index 00000000..c11e9f75 --- /dev/null +++ b/pkg/config/client/cds_client.go @@ -0,0 +1,57 @@ +//go:generate go run github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen --config=cfg.yaml ../api/stunner_openapi.yaml +package client + +import ( + "context" + "fmt" + "strings" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/pion/logging" +) + +// CDSClient is a client for the config discovery service that knows how to poll configs for a +// specific gateway. Use the CDSAPI to access the general CDS client set. +type CDSClient struct { + CdsApi + addr, id string +} + +// NewCDSClient creates a config discovery service client that can be used to load or watch STUNner +// configurations from a CDS remote server. +func NewCDSClient(addr, id string, logger logging.LeveledLogger) (Client, error) { + ps := strings.Split(id, "/") + if len(ps) != 2 { + return nil, fmt.Errorf("invalid id: %q", id) + } + + client, err := NewConfigNamespaceNameAPI(addr, ps[0], ps[1], logger) + if err != nil { + return nil, err + } + + return &CDSClient{CdsApi: client, addr: addr, id: id}, nil +} + +// String outputs the status of the client. +func (p *CDSClient) String() string { + return fmt.Sprintf("config discovery client %q: using server %s", p.id, p.addr) +} + +// Load grabs a new configuration from the config doscovery server. +func (p *CDSClient) Load() (*stnrv1.StunnerConfig, error) { + configs, err := p.CdsApi.Get(context.Background()) + if err != nil { + return nil, err + } + if len(configs) != 1 { + return nil, fmt.Errorf("expected exactly one config, got %d", len(configs)) + } + + c := configs[0] + if err := c.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + return c, nil +} diff --git a/pkg/config/client/cfg.yaml b/pkg/config/client/cfg.yaml new file mode 100644 index 00000000..1a22cccd --- /dev/null +++ b/pkg/config/client/cfg.yaml @@ -0,0 +1,5 @@ +package: api +generate: + client: true + models: true +output: api/client.gen.go diff --git a/pkg/config/client/client.go b/pkg/config/client/client.go new file mode 100644 index 00000000..9becd437 --- /dev/null +++ b/pkg/config/client/client.go @@ -0,0 +1,69 @@ +package client + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/pion/logging" +) + +var errFileTruncated = errors.New("zero-length config file") + +var ( + // Send pings to the CDS server with this period. Must be less than PongWait. + PingPeriod = 5 * time.Second + + // Time allowed to read the next pong message from the CDS server. + PongWait = 8 * time.Second + + // Time allowed to write a message to the CDS server. + WriteWait = 2 * time.Second + + // Period for retrying failed CDS connections. + RetryPeriod = 1 * time.Second +) + +// Client represents a generic config client. Currently supported config providers: http, ws, or +// file. Configuration obtained through the client are not validated, make sure to validate on the +// receiver side. +type Client interface { + // Load grabs a new configuration from the config client. + Load() (*stnrv1.StunnerConfig, error) + // Watch listens to new configs from a config origin (config file or CDS server) and + // returns them on the given channel. The context cancels the watcher. If the origin is not + // available watch will retry. If set, the suppressDelete flag instructs the client to + // ignore delete config (essentially zero-configs) from the origin. + Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error + // Poll creates a one-shot config watcher without the retry mechanincs of Watch. + Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error + fmt.Stringer +} + +// New creates a generic config client. Origin is either a network address in the form +// ":" or a proper HTTP/WS URI, in which case a CDS client is returned, or a proper file +// URL "file:///" in which case a config file watcher is returned. +func New(origin string, id string, logger logging.LoggerFactory) (Client, error) { + u, err := getURI(origin) + if err != nil { + return nil, fmt.Errorf("failed to parse config origin URI %q: %w", origin, err) + } + + switch strings.ToLower(u.Scheme) { + case "http", "ws", "https", "wss": + client, err := NewCDSClient(u.String(), id, logger.NewLogger("cds-client")) + if err != nil { + return nil, err + } + return client, nil + default: + client, err := NewConfigFileClient(origin, id, logger.NewLogger("config-file-client")) + if err != nil { + return nil, err + } + return client, nil + } +} diff --git a/pkg/config/client/client_test.go b/pkg/config/client/client_test.go new file mode 100644 index 00000000..ec1f0f27 --- /dev/null +++ b/pkg/config/client/client_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseURI(t *testing.T) { + // file + u, err := getURI("file:///tmp/a") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "file", u.Scheme, "file URI scheme") + assert.Equal(t, "", u.Host, "file URI host") + assert.Equal(t, "/tmp/a", u.Path, "file URI path") + + // default is http + u, err = getURI("/tmp/a") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "http", u.Scheme, "file URI scheme") + assert.Equal(t, "", u.Host, "file URI host") + assert.Equal(t, "/tmp/a", u.Path, "file URI path") + + // addr + u, err = getURI("1.2.3.4:12345") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "http", u.Scheme, "file URI scheme") + assert.Equal(t, "1.2.3.4:12345", u.Host, "file URI host") + assert.Equal(t, "", u.Path, "file URI path") + + // addr+path + u, err = getURI("1.2.3.4:12345/a/b") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "http", u.Scheme, "file URI scheme") + assert.Equal(t, "1.2.3.4:12345", u.Host, "file URI host") + assert.Equal(t, "/a/b", u.Path, "file URI path") + + // scheme+addr+path + u, err = getURI("http://1.2.3.4:12345/a/b") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "http", u.Scheme, "file URI scheme") + assert.Equal(t, "1.2.3.4:12345", u.Host, "file URI host") + assert.Equal(t, "/a/b", u.Path, "file URI path") + + // ws+addr+path + u, err = getURI("ws://1.2.3.4:12345/a/b") + assert.NoError(t, err, "file URI parse") + assert.Equal(t, "ws", u.Scheme, "file URI scheme") + assert.Equal(t, "1.2.3.4:12345", u.Host, "file URI host") + assert.Equal(t, "/a/b", u.Path, "file URI path") +} diff --git a/pkg/config/client/config.go b/pkg/config/client/config.go new file mode 100644 index 00000000..f25916c2 --- /dev/null +++ b/pkg/config/client/config.go @@ -0,0 +1,140 @@ +package client + +import ( + "encoding/json" + "fmt" + "maps" + "os" + "regexp" + "strconv" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + stnrv1a1 "github.com/l7mp/stunner/pkg/apis/v1alpha1" + "sigs.k8s.io/yaml" +) + +type ConfigSkeleton struct { + ApiVersion string `json:"version"` +} + +// ZeroConfig builds a zero configuration useful for bootstrapping STUNner. The minimal config +// defaults to static authentication with a dummy username and password and opens no listeners or +// clusters. +func ZeroConfig(id string) *stnrv1.StunnerConfig { + return &stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{Name: id}, + Auth: stnrv1.AuthConfig{ + Type: "static", + Realm: stnrv1.DefaultRealm, + Credentials: map[string]string{ + "username": "dummy-username", + "password": "dummy-password", + }, + }, + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{}, + } +} + +// ParseConfig parses a raw buffer holding a configuration, substituting environment variables for +// placeholders in the configuration. Returns the new configuration or error if parsing fails. +func ParseConfig(c []byte) (*stnrv1.StunnerConfig, error) { + // substitute environtment variables + // default port: STUNNER_PUBLIC_PORT -> STUNNER_PORT + re := regexp.MustCompile(`^[0-9]+$`) + port, ok := os.LookupEnv("STUNNER_PORT") + if !ok || port == "" || !re.Match([]byte(port)) { + publicPort := stnrv1.DefaultPort + publicPortStr, ok := os.LookupEnv("STUNNER_PUBLIC_PORT") + if ok { + if p, err := strconv.Atoi(publicPortStr); err == nil { + publicPort = p + } + } + os.Setenv("STUNNER_PORT", fmt.Sprintf("%d", publicPort)) + } + + // make sure credentials are not affected by environment substitution + + // parse up before env substitution is applied + confRaw, err := parseRaw(c) + if err != nil { + return nil, err + } + + // save credentials + credRaw := make(map[string]string) + maps.Copy(credRaw, confRaw.Auth.Credentials) + + // apply env substitution and parse again + e := os.ExpandEnv(string(c)) + confExp, err := parseRaw([]byte(e)) + if err != nil { + return nil, err + } + + // restore credentials + maps.Copy(confExp.Auth.Credentials, credRaw) + + return confExp, nil +} + +func parseRaw(c []byte) (*stnrv1.StunnerConfig, error) { + // try to parse only the config version first + k := ConfigSkeleton{} + if err := yaml.Unmarshal([]byte(c), &k); err != nil { + if errJ := json.Unmarshal([]byte(c), &k); err != nil { + return nil, fmt.Errorf("could not parse config file API version: "+ + "YAML parse error: %s, JSON parse error: %s\n", + err.Error(), errJ.Error()) + } + } + + s := stnrv1.StunnerConfig{} + + switch k.ApiVersion { + case stnrv1.ApiVersion: + if err := yaml.Unmarshal([]byte(c), &s); err != nil { + if errJ := json.Unmarshal([]byte(c), &s); errJ != nil { + return nil, fmt.Errorf("could not parse config file: "+ + "YAML parse error: %s, JSON parse error: %s\n", + err.Error(), errJ.Error()) + } + } + case stnrv1a1.ApiVersion: + a := stnrv1a1.StunnerConfig{} + if err := yaml.Unmarshal([]byte(c), &a); err != nil { + if errJ := json.Unmarshal([]byte(c), &a); errJ != nil { + return nil, fmt.Errorf("could not parse config file: "+ + "YAML parse error: %s, JSON parse error: %s\n", + err.Error(), errJ.Error()) + } + } + + sv1, err := stnrv1a1.ConvertToV1(&a) + if err != nil { + return nil, fmt.Errorf("could not convert config to API V1: %s", err) + } + + sv1.DeepCopyInto(&s) + } + + return &s, nil +} + +// IsConfigDeleted is a helper that allows to decide whether a config is being deleted. When a +// config is being removed (say, because the corresponding Gateway is deleted), the CDS server +// sends a validated zero-config for the client. This function is a quick helper to decide whether +// the config received is such a zero-config. +func IsConfigDeleted(conf *stnrv1.StunnerConfig) bool { + if conf == nil { + return false + } + zeroConf := ZeroConfig(conf.Admin.Name) + // zeroconfs have to be explcitly validated before deepEq (the cds client validates) + if err := zeroConf.Validate(); err != nil { + return false + } + return conf.DeepEqual(zeroConf) +} diff --git a/pkg/config/client/file_client.go b/pkg/config/client/file_client.go new file mode 100644 index 00000000..adb0e8d8 --- /dev/null +++ b/pkg/config/client/file_client.go @@ -0,0 +1,228 @@ +package client + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/pion/logging" +) + +// ConfigFileClient is the implementation of the Client interface for config files. +type ConfigFileClient struct { + // configFile specifies the config file name to watch. + configFile string + // id is the name of the stunnerd instance. + id string + // log is a leveled logger used to report progress. Either Logger or Log must be specified. + log logging.LeveledLogger +} + +// NewConfigFileClient creates a client that load or watch STUNner configurations from a local +// file. +func NewConfigFileClient(origin, id string, logger logging.LeveledLogger) (Client, error) { + origin = strings.TrimPrefix(origin, "file://") // returns original if there is no "file://" prefix + + return &ConfigFileClient{ + configFile: origin, + id: id, + log: logger, + }, nil + +} + +// String outputs the status of the client. +func (w *ConfigFileClient) String() string { + return fmt.Sprintf("config client using file %q", w.configFile) +} + +// Load grabs a new configuration from a config file. +func (w *ConfigFileClient) Load() (*stnrv1.StunnerConfig, error) { + b, err := os.ReadFile(w.configFile) + if err != nil { + return nil, fmt.Errorf("failed to read config file %q: %s", w.configFile, err.Error()) + } + + if len(b) == 0 { + return nil, errFileTruncated + } + + c, err := ParseConfig(b) + if err != nil { + return nil, fmt.Errorf("failed to parse config: %w", err) + } + + if err := c.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + return c, nil +} + +// Watch watches a configuration file for changes. If no file exists at the given path, it will +// periodically retry until the file appears. +func (w *ConfigFileClient) Watch(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + if w.configFile == "" { + return errors.New("uninitialized config file path") + } + + go func() { + for { + // try to watch + if err := w.Poll(ctx, ch, suppressDelete); err != nil { + w.log.Warnf("Error loading config file %q: %s", + w.configFile, err.Error()) + } else { + return + } + + if !w.tryWatchConfig(ctx) { + return + } + } + }() + + return nil +} + +// Poll watches the config file and emits new configs on the specified channel. Returns an error if +// further action is needed (tryWatchConfig is to be started) or nil on normal exit. +func (w *ConfigFileClient) Poll(ctx context.Context, ch chan<- *stnrv1.StunnerConfig, suppressDelete bool) error { + w.log.Tracef("configWatcher") + + // create a new watcher + watcher, err := fsnotify.NewWatcher() + if err != nil { + return err + } + defer watcher.Close() + + config := w.configFile + if err := watcher.Add(config); err != nil { + return err + } + + // emit an initial config + c, err := w.Load() + if err != nil { + return err + } + + w.log.Debugf("Initial config file successfully loaded from %q: %s", config, c.String()) + + ch <- c + + // save deepcopy so that we can filter repeated events + prev := stnrv1.StunnerConfig{} + c.DeepCopyInto(&prev) + + for { + select { + case <-ctx.Done(): + return nil + + case e, ok := <-watcher.Events: + if !ok { + return errors.New("config watcher received invalid event") + } + + w.log.Debugf("Received watch event: %s", e.String()) + + if e.Has(fsnotify.Remove) { + if err := watcher.Remove(config); err != nil { + w.log.Debugf("Could not remove config file %q watcher: %s", + config, err.Error()) + } + + return fmt.Errorf("config file deleted %q, disabling watcher", e.Op.String()) + } + + if !e.Has(fsnotify.Write) { + w.log.Debugf("Unhandled notify op on config file %q (ignoring): %s", + e.Name, e.Op.String()) + continue + } + + w.log.Debugf("Loading configuration file: %s", config) + c, err := w.Load() + if err != nil { + if errors.Is(err, errFileTruncated) { + w.log.Debugf("Ignoring: %s", err.Error()) + continue + } + return err + } + + // suppress repeated events + if c.DeepEqual(&prev) { + w.log.Debug("Ignoring recurrent notify event for the same config file") + continue + } + + if suppressDelete && IsConfigDeleted(c) { + w.log.Info("Ignoring deleted configuration") + continue + } + + w.log.Debugf("Config file successfully loaded from %q: %s", config, c.String()) + + ch <- c + + // save deepcopy so that we can filter repeated events + c.DeepCopyInto(&prev) + + case err, ok := <-watcher.Errors: + if !ok { + return errors.New("config watcher error handler received invalid error") + } + + if err := watcher.Remove(config); err != nil { + w.log.Debugf("Could not remove config file %q watcher: %s", + config, err.Error()) + } + + return fmt.Errorf("config watcher error, deactivating: %w", err) + } + } +} + +// tryWatchConfig runs a timer to look for the config file at the given path and returns it +// immediately once found. Returns true if further action is needed (configWatcher has to be +// started) or false on normal exit. +func (w *ConfigFileClient) tryWatchConfig(ctx context.Context) bool { + w.log.Tracef("tryWatchConfig") + config := w.configFile + + ticker := time.NewTicker(RetryPeriod) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return false + + case <-ticker.C: + w.log.Debugf("Trying to read config file %q from periodic timer", + config) + + // check if config file exists and it is readable + if _, err := os.Stat(config); errors.Is(err, os.ErrNotExist) { + w.log.Debugf("Config file %q does not exist", config) + + // report status in every 10th second + if time.Now().Second()%10 == 0 { + w.log.Warnf("Waiting for config file %q", config) + } + + continue + } + + return true + } + } +} diff --git a/pkg/config/client/k8s_client.go b/pkg/config/client/k8s_client.go new file mode 100644 index 00000000..989ea527 --- /dev/null +++ b/pkg/config/client/k8s_client.go @@ -0,0 +1,445 @@ +package client + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "os" + "strconv" + "sync" + + "github.com/pion/logging" + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + cliopt "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +// CDSConfigFlags composes a set of flags for CDS server discovery. +type CDSConfigFlags struct { + // Addr is an explicit IP address for the CDS server. + Addr string + // Namespace is the namespace of the CDS server pod. + Namespace string + // Port is the port of the CDS server pod. + Port int +} + +// NewCDSConfigFlags returns CDS service discovery flags with default values set. +func NewCDSConfigFlags() *CDSConfigFlags { + port := stnrv1.DefaultConfigDiscoveryPort + if os.Getenv(stnrv1.DefaultCDSServerPortEnv) != "" { + p, err := strconv.Atoi(os.Getenv(stnrv1.DefaultCDSServerPortEnv)) + if err != nil { + port = p + } + } + return &CDSConfigFlags{ + Addr: os.Getenv(stnrv1.DefaultCDSServerAddrEnv), + Port: port, + Namespace: os.Getenv(stnrv1.DefaultCDSServerNamespaceEnv), + } +} + +// AddFlags binds pod discovery configuration flags to a given flagset. +func (f *CDSConfigFlags) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&f.Addr, "cds-server-address", f.Addr, + "Config discovery service address (overriders cds-namesapce/name and disables service discovery)") + flags.StringVar(&f.Namespace, "cds-server-namespace", f.Namespace, + "Config discovery service namespace (disables service discovery)") + flags.IntVar(&f.Port, "cds-server-port", f.Port, "Config discovery service port") +} + +// PodConfigFlags composes a set of flags for pod discovery. +type PodConfigFlags struct { + // Addr is an explicit IP address for the pod. + Addr string + // Name is the name of the pod. + Name string + // Port is the port to use. + Port int +} + +// NewPodConfigFlags returns Stunnerd service discovery flags with default values set. +func NewPodConfigFlags() *PodConfigFlags { + return &PodConfigFlags{ + Port: stnrv1.DefaultHealthCheckPort, + } +} + +// AddFlags binds pod discovery configuration flags to a given flagset. +func (f *PodConfigFlags) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&f.Addr, "pod-address", f.Addr, + "Address of the stunnerd instance to connect to (overrides K8s pod discovery)") + flags.StringVar(&f.Name, "pod-name", f.Name, + "Name of the specific stunnerd pod to connect to (valid only if both -n and gateway name are specified)") + flags.IntVar(&f.Port, "pod-port", f.Port, "Port of the stunnerd pod to connect to") +} + +// AuthConfigFlags composes a set of flags for authentication service discovery. +type AuthConfigFlags struct { + // Addr is an explicit IP address for the server. + Addr string + // Namespace is the namespace of the server pod. + Namespace string + // Port is the port of the server pod. + Port int + // Enforce turn credential. + TurnAuth bool +} + +// NewAuthConfigFlags returns auth service discovery flags with default values set. +func NewAuthConfigFlags() *AuthConfigFlags { + return &AuthConfigFlags{ + Port: stnrv1.DefaultAuthServicePort, + } +} + +// AddFlags binds pod discovery configuration flags to a given flagset. +func (f *AuthConfigFlags) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&f.Addr, "auth-server-address", f.Addr, + "Auth service address (disables service discovery)") + flags.StringVar(&f.Namespace, "auth-service-namespace", f.Namespace, + "Auth service namespace (disables service discovery)") + flags.IntVar(&f.Port, "auth-service-port", f.Port, "Auth service port") + flags.BoolVar(&f.TurnAuth, "auth-turn-credential", f.TurnAuth, "Request TURN credentials (default: request ICE server config)") +} + +// PodConnector is a helper for discovering and connecting to pods in a Kubernetes cluster. +type PodConnector struct { + cs *kubernetes.Clientset + config *rest.Config + k8sFlags *cliopt.ConfigFlags + log logging.LeveledLogger +} + +// PodInfo allows to return a full pod descriptor to callers. +type PodInfo struct { + // Name is the name of the pod. Only valid + Name string + // Namespace is the Kubernetes namespace of the pod. + Namespace string + // Addr is the Kubernetes namespace of the pod. + Addr string + // Proxy is a boolean telling whether the connection is proxied over a port-forwarder. + Proxy bool +} + +func (p *PodInfo) String() string { + ret := "" + if p.Proxy { + ret += fmt.Sprintf("pod %s/%s at %s", p.Namespace, p.Name, p.Addr) + } else { + ret += p.Addr + } + return ret +} + +// NewK8sDiscoverer returns a new Kubernetes CDS discovery client. +func NewK8sDiscoverer(k8sFlags *cliopt.ConfigFlags, log logging.LeveledLogger) (*PodConnector, error) { + d := &PodConnector{ + k8sFlags: k8sFlags, + log: log, + } + + d.log.Debug("Obtaining kubeconfig") + config, err := d.k8sFlags.ToRESTConfig() + if err != nil { + return nil, fmt.Errorf("error building Kubernetes config: %w", err) + } + d.config = config + + d.log.Debug("Creating a Kubernetes client") + cs, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("error creating http client: %w", err) + } + d.cs = cs + + return d, nil +} + +// DiscoverK8sCDSServer discovers a CDS Server located in a Kubernetes cluster and returns an +// address that a CDS client can be opened to for reaching that CDS server. If necessary, opens a +// port-forward connection to the remote cluster. +func DiscoverK8sCDSServer(ctx context.Context, k8sFlags *cliopt.ConfigFlags, cdsFlags *CDSConfigFlags, log logging.LeveledLogger) (PodInfo, error) { + // if CDS server address is specified, return it + if cdsFlags.Addr != "" { + return PodInfo{ + Addr: fmt.Sprintf("%s:%d", cdsFlags.Addr, cdsFlags.Port), + Proxy: false, + }, nil + } + + ns := "" + nsLog := "" + if cdsFlags.Namespace != "" { + ns = cdsFlags.Namespace + nsLog = ns + } + + d, err := NewK8sDiscoverer(k8sFlags, log) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to init CDS discovery client: %w", err) + } + + label := fmt.Sprintf("%s=%s", stnrv1.DefaultCDSServiceLabelKey, stnrv1.DefaultCDSServiceLabelValue) + d.log.Debugf("Querying CDS server pods in namespace %q using label-selector %q", nsLog, label) + + pods, err := d.cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: label, + }) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to query Kubernetes API server: %w", err) + } + + if len(pods.Items) == 0 { + return PodInfo{}, fmt.Errorf("no CDS server found") + } + + if len(pods.Items) > 1 { + return PodInfo{}, fmt.Errorf("too many CDS servers") + } + + return d.PortFwd(ctx, &pods.Items[0], cdsFlags.Port) +} + +// DiscoverK8sStunnerdPods discovers the stunnerd pods in a Kubernetes cluster, opens a +// port-forwarded connection to each, and returns a local address that can be used to connect to +// each pod. If gateway is empty, return all stunnerd pods in a namespace. If no namespace is given +// (using the -n CLI flag), query all stunnerd pods in the cluster. +func DiscoverK8sStunnerdPods(ctx context.Context, k8sFlags *cliopt.ConfigFlags, podFlags *PodConfigFlags, gwNs, gw string, log logging.LeveledLogger) ([]PodInfo, error) { + var ps []PodInfo + + // direct connection + if podFlags.Addr != "" { + return []PodInfo{{ + Addr: fmt.Sprintf("%s:%d", podFlags.Addr, podFlags.Port), + Proxy: false, + }}, nil + } + + d, err := NewK8sDiscoverer(k8sFlags, log) + if err != nil { + return ps, fmt.Errorf("failed to init CDS discovery client: %w", err) + } + + selector := labels.NewSelector() + appLabel, err := labels.NewRequirement(stnrv1.DefaultAppLabelKey, + selection.Equals, []string{stnrv1.DefaultAppLabelValue}) + if err != nil { + return ps, fmt.Errorf("failed to create app label selector: %w", err) + } + selector = selector.Add(*appLabel) + + if gwNs != "" { + nsLabel, err := labels.NewRequirement(stnrv1.DefaultRelatedGatewayNamespace, + selection.Equals, []string{gwNs}) + if err != nil { + return ps, fmt.Errorf("failed to create namespace label selector: %w", err) + } + selector = selector.Add(*nsLabel) + + if gw != "" { + gwLabel, err := labels.NewRequirement(stnrv1.DefaultRelatedGatewayKey, + selection.Equals, []string{gw}) + if err != nil { + return ps, fmt.Errorf("failed to create namespace label selector: %w", err) + } + selector = selector.Add(*gwLabel) + } + } + + d.log.Debugf("Calling GET on api/pods using namespace %q and label selector %q", + gwNs, selector.String()) + pods, err := d.cs.CoreV1().Pods(gwNs).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return ps, fmt.Errorf("failed to query Kubernetes API server: %w", err) + } + + // filter by pod name + if gwNs != "" && gw != "" && podFlags.Name != "" { + found := false + for i, p := range pods.Items { + if p.GetName() == podFlags.Name { + // keep only the i-th pod + d.log.Debugf("Enforcing pod %s/%s for gateway %s/%s", *k8sFlags.Namespace, gwNs, gw) + pods.Items = pods.Items[i : i+1] + found = true + break + } + } + if !found { + return ps, fmt.Errorf("pod %q not found for gateway %s/%s", + podFlags.Name, gwNs, gw) + } + } + + // open port-forwarders in parallel + var wg sync.WaitGroup + var lock sync.Mutex + ps = make([]PodInfo, len(pods.Items)) + wg.Add(len(pods.Items)) + for i := range pods.Items { + go func(j int) { + defer wg.Done() + pod := pods.Items[j] + + p, err := d.PortFwd(ctx, &pod, podFlags.Port) + if err != nil { + d.log.Errorf("Failed to create port-forwarder to stunnerd pod %s/%s: %s", + pod.GetNamespace(), pod.GetName(), err.Error()) + return + } + + lock.Lock() + defer lock.Unlock() + ps[j] = p + }(i) + } + + wg.Wait() + + d.log.Debugf("Successfully opened %d port-forward connections", len(pods.Items)) + + return ps, nil +} + +// DiscoverK8sAuthServer discovers the cluster authentication service. +func DiscoverK8sAuthServer(ctx context.Context, k8sFlags *cliopt.ConfigFlags, authFlags *AuthConfigFlags, log logging.LeveledLogger) (PodInfo, error) { + if authFlags.Addr != "" { + return PodInfo{ + Addr: fmt.Sprintf("%s:%d", authFlags.Addr, authFlags.Port), + Proxy: false, + }, nil + } + + ns := "" + nsLog := "" + if authFlags.Namespace != "" { + ns = authFlags.Namespace + nsLog = ns + } + + d, err := NewK8sDiscoverer(k8sFlags, log) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to init CDS discovery client: %w", err) + } + + label := fmt.Sprintf("%s=%s", stnrv1.DefaultAppLabelKey, stnrv1.DefaultAuthAppLabelValue) + d.log.Debugf("Querying auth service pods in namespace %q using label-selector %q", nsLog, label) + + pods, err := d.cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{ + LabelSelector: label, + }) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to query Kubernetes API server: %w", err) + } + + if len(pods.Items) == 0 { + return PodInfo{}, fmt.Errorf("no authentication found") + } + + if len(pods.Items) > 1 { + d.log.Infof("Mulitple (%d) authentication service instances found, using the first one", len(pods.Items)) + } + + return d.PortFwd(ctx, &pods.Items[0], authFlags.Port) +} + +// DiscoverK8sPod discovers an arbitrary pod. +func DiscoverK8sPod(ctx context.Context, k8sFlags *cliopt.ConfigFlags, namespace, labelSelector string, port int, log logging.LeveledLogger) (PodInfo, error) { + d, err := NewK8sDiscoverer(k8sFlags, log) + if err != nil { + return PodInfo{}, fmt.Errorf("Failed to K8s discovery client: %w", err) + } + + pods, err := d.cs.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return PodInfo{}, fmt.Errorf("Failed to query Kubernetes API server: %w", err) + } + + if len(pods.Items) == 0 { + return PodInfo{}, errors.New("No pod found") + } + + if len(pods.Items) > 1 { + d.log.Infof("Mulitple (%d) pods found, using the first one", len(pods.Items)) + } + + return d.PortFwd(ctx, &pods.Items[0], port) +} + +func (d *PodConnector) PortFwd(ctx context.Context, pod *corev1.Pod, port int) (PodInfo, error) { + p := PodInfo{ + Name: pod.GetName(), + Namespace: pod.GetNamespace(), + Proxy: true, + } + d.log.Debugf("Found pod: %s/%s", p.Namespace, p.Name) + req := d.cs.RESTClient(). + Post(). + Prefix("api/v1"). + Resource("pods"). + Namespace(p.Namespace). + Name(p.Name). + SubResource("portforward") + + d.log.Debugf("Creating a SPDY stream to API server using URL %q", req.URL().String()) + transport, upgrader, err := spdy.RoundTripperFor(d.config) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to get transport/upgrader from restconfig: %w", err) + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL()) + + d.log.Debugf("Creating a port-forwarder to pod") + remoteAddr := fmt.Sprintf("0:%d", port) + stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) + out, errOut := new(bytes.Buffer), new(bytes.Buffer) + fw, err := portforward.New(dialer, []string{remoteAddr}, stopChan, readyChan, out, errOut) + if err != nil { + return PodInfo{}, fmt.Errorf("failed to create port-forwarder: %w", err) + } + + go func() { + if err := fw.ForwardPorts(); err != nil { + d.log.Errorf("failed to set up port-forwarder: %s", err.Error()) + os.Exit(1) + } + }() + + d.log.Debug("Waiting for port-forwarder...") + <-readyChan + + localPort, err := fw.GetPorts() + if err != nil { + return PodInfo{}, fmt.Errorf("error obtaining local forwarder port: %w", err) + } + + if len(localPort) != 1 { + return PodInfo{}, fmt.Errorf("error setting up port-forwarder: required port pairs (1) "+ + "does not match the length of port forwarder port pairs (%d)", len(localPort)) + } + + go func() { + <-ctx.Done() + close(stopChan) + }() + + p.Addr = fmt.Sprintf("127.0.0.1:%d", localPort[0].Local) + d.log.Debugf("Port-forwarder connected to %s", p.String()) + return p, nil +} diff --git a/pkg/config/client/util.go b/pkg/config/client/util.go new file mode 100644 index 00000000..7766e151 --- /dev/null +++ b/pkg/config/client/util.go @@ -0,0 +1,60 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +func decodeConfig(r []byte) ([]*stnrv1.StunnerConfig, error) { + c := stnrv1.StunnerConfig{} + if err := json.Unmarshal(r, &c); err != nil { + return nil, err + } + + // copy + + return []*stnrv1.StunnerConfig{&c}, nil +} + +func decodeConfigList(r []byte) ([]*stnrv1.StunnerConfig, error) { + l := ConfigList{} + if err := json.Unmarshal(r, &l); err != nil { + return nil, err + } + return l.Items, nil +} + +// getURI tries to parse an address or an URL or a file name into an URL. +func getURI(addr string) (*url.URL, error) { + // default URL scheme is "http" + if !strings.HasPrefix(addr, "http://") && !strings.HasPrefix(addr, "https://") && + !strings.HasPrefix(addr, "ws://") && !strings.HasPrefix(addr, "wss://") && + !strings.HasPrefix(addr, "file://") { + addr = "http://" + addr + } + + url, err := url.Parse(addr) + if err != nil { + return nil, err + } + return url, nil +} + +// wsURI returns a websocket url from a HTTP URI. +func wsURI(addr, endpoint string) (string, error) { + uri, err := getURI(addr) + if err != nil { + return "", err + } + + uri.Scheme = "ws" + uri.Path = endpoint + v := url.Values{} + v.Set("watch", "true") + uri.RawQuery = v.Encode() + + return uri.String(), nil +} diff --git a/pkg/config/server/api/server.gen.go b/pkg/config/server/api/server.gen.go new file mode 100644 index 00000000..16a047ac --- /dev/null +++ b/pkg/config/server/api/server.gen.go @@ -0,0 +1,653 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/deepmap/oapi-codegen/v2 version v2.1.0 DO NOT EDIT. +package api + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/gorilla/mux" + stunnerv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/oapi-codegen/runtime" + strictnethttp "github.com/oapi-codegen/runtime/strictmiddleware/nethttp" +) + +// V1Config Config provides a STUNner config. Schema is defined in https://github.com/l7mp/stunner/tree/main/pkg/apis/v1 +type V1Config = stunnerv1.StunnerConfig + +// V1ConfigList ConfigList is a list of Configs. +type V1ConfigList struct { + // Items Items is the list of Config objects in the list. + Items []V1Config `json:"items"` + + // Version version defines the versioned schema of this object. + Version string `json:"version"` +} + +// V1Error API error. +type V1Error struct { + // Code Error code. + Code int32 `json:"code"` + + // Message Error message. + Message string `json:"message"` +} + +// ListV1ConfigsParams defines parameters for ListV1Configs. +type ListV1ConfigsParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` +} + +// ListV1ConfigsNamespaceParams defines parameters for ListV1ConfigsNamespace. +type ListV1ConfigsNamespaceParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` +} + +// GetV1ConfigNamespaceNameParams defines parameters for GetV1ConfigNamespaceName. +type GetV1ConfigNamespaceNameParams struct { + // Watch Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. + Watch *bool `form:"watch,omitempty" json:"watch,omitempty"` + + // Node Name of the node the client runs on. + Node *string `form:"node,omitempty" json:"node,omitempty"` +} + +// ServerInterface represents all server handlers. +type ServerInterface interface { + + // (GET /api/v1/configs) + ListV1Configs(w http.ResponseWriter, r *http.Request, params ListV1ConfigsParams) + + // (GET /api/v1/configs/{namespace}) + ListV1ConfigsNamespace(w http.ResponseWriter, r *http.Request, namespace string, params ListV1ConfigsNamespaceParams) + + // (GET /api/v1/configs/{namespace}/{name}) + GetV1ConfigNamespaceName(w http.ResponseWriter, r *http.Request, namespace string, name string, params GetV1ConfigNamespaceNameParams) +} + +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface + HandlerMiddlewares []MiddlewareFunc + ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +type MiddlewareFunc func(http.Handler) http.Handler + +// ListV1Configs operation middleware +func (siw *ServerInterfaceWrapper) ListV1Configs(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // Parameter object where we will unmarshal all parameters from the context + var params ListV1ConfigsParams + + // ------------- Optional query parameter "watch" ------------- + + err = runtime.BindQueryParameter("form", true, false, "watch", r.URL.Query(), ¶ms.Watch) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "watch", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.ListV1Configs(w, r, params) + })) + + for i := len(siw.HandlerMiddlewares) - 1; i >= 0; i-- { + handler = siw.HandlerMiddlewares[i](handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// ListV1ConfigsNamespace operation middleware +func (siw *ServerInterfaceWrapper) ListV1ConfigsNamespace(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "namespace" ------------- + var namespace string + + err = runtime.BindStyledParameterWithOptions("simple", "namespace", mux.Vars(r)["namespace"], &namespace, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "namespace", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params ListV1ConfigsNamespaceParams + + // ------------- Optional query parameter "watch" ------------- + + err = runtime.BindQueryParameter("form", true, false, "watch", r.URL.Query(), ¶ms.Watch) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "watch", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.ListV1ConfigsNamespace(w, r, namespace, params) + })) + + for i := len(siw.HandlerMiddlewares) - 1; i >= 0; i-- { + handler = siw.HandlerMiddlewares[i](handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +// GetV1ConfigNamespaceName operation middleware +func (siw *ServerInterfaceWrapper) GetV1ConfigNamespaceName(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var err error + + // ------------- Path parameter "namespace" ------------- + var namespace string + + err = runtime.BindStyledParameterWithOptions("simple", "namespace", mux.Vars(r)["namespace"], &namespace, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "namespace", Err: err}) + return + } + + // ------------- Path parameter "name" ------------- + var name string + + err = runtime.BindStyledParameterWithOptions("simple", "name", mux.Vars(r)["name"], &name, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "name", Err: err}) + return + } + + // Parameter object where we will unmarshal all parameters from the context + var params GetV1ConfigNamespaceNameParams + + // ------------- Optional query parameter "watch" ------------- + + err = runtime.BindQueryParameter("form", true, false, "watch", r.URL.Query(), ¶ms.Watch) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "watch", Err: err}) + return + } + + // ------------- Optional query parameter "node" ------------- + + err = runtime.BindQueryParameter("form", true, false, "node", r.URL.Query(), ¶ms.Node) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "node", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetV1ConfigNamespaceName(w, r, namespace, name, params) + })) + + for i := len(siw.HandlerMiddlewares) - 1; i >= 0; i-- { + handler = siw.HandlerMiddlewares[i](handler) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) +} + +type UnescapedCookieParamError struct { + ParamName string + Err error +} + +func (e *UnescapedCookieParamError) Error() string { + return fmt.Sprintf("error unescaping cookie parameter '%s'", e.ParamName) +} + +func (e *UnescapedCookieParamError) Unwrap() error { + return e.Err +} + +type UnmarshalingParamError struct { + ParamName string + Err error +} + +func (e *UnmarshalingParamError) Error() string { + return fmt.Sprintf("Error unmarshaling parameter %s as JSON: %s", e.ParamName, e.Err.Error()) +} + +func (e *UnmarshalingParamError) Unwrap() error { + return e.Err +} + +type RequiredParamError struct { + ParamName string +} + +func (e *RequiredParamError) Error() string { + return fmt.Sprintf("Query argument %s is required, but not found", e.ParamName) +} + +type RequiredHeaderError struct { + ParamName string + Err error +} + +func (e *RequiredHeaderError) Error() string { + return fmt.Sprintf("Header parameter %s is required, but not found", e.ParamName) +} + +func (e *RequiredHeaderError) Unwrap() error { + return e.Err +} + +type InvalidParamFormatError struct { + ParamName string + Err error +} + +func (e *InvalidParamFormatError) Error() string { + return fmt.Sprintf("Invalid format for parameter %s: %s", e.ParamName, e.Err.Error()) +} + +func (e *InvalidParamFormatError) Unwrap() error { + return e.Err +} + +type TooManyValuesForParamError struct { + ParamName string + Count int +} + +func (e *TooManyValuesForParamError) Error() string { + return fmt.Sprintf("Expected one value for %s, got %d", e.ParamName, e.Count) +} + +// Handler creates http.Handler with routing matching OpenAPI spec. +func Handler(si ServerInterface) http.Handler { + return HandlerWithOptions(si, GorillaServerOptions{}) +} + +type GorillaServerOptions struct { + BaseURL string + BaseRouter *mux.Router + Middlewares []MiddlewareFunc + ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +// HandlerFromMux creates http.Handler with routing matching OpenAPI spec based on the provided mux. +func HandlerFromMux(si ServerInterface, r *mux.Router) http.Handler { + return HandlerWithOptions(si, GorillaServerOptions{ + BaseRouter: r, + }) +} + +func HandlerFromMuxWithBaseURL(si ServerInterface, r *mux.Router, baseURL string) http.Handler { + return HandlerWithOptions(si, GorillaServerOptions{ + BaseURL: baseURL, + BaseRouter: r, + }) +} + +// HandlerWithOptions creates http.Handler with additional options +func HandlerWithOptions(si ServerInterface, options GorillaServerOptions) http.Handler { + r := options.BaseRouter + + if r == nil { + r = mux.NewRouter() + } + if options.ErrorHandlerFunc == nil { + options.ErrorHandlerFunc = func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusBadRequest) + } + } + wrapper := ServerInterfaceWrapper{ + Handler: si, + HandlerMiddlewares: options.Middlewares, + ErrorHandlerFunc: options.ErrorHandlerFunc, + } + + r.HandleFunc(options.BaseURL+"/api/v1/configs", wrapper.ListV1Configs).Methods("GET") + + r.HandleFunc(options.BaseURL+"/api/v1/configs/{namespace}", wrapper.ListV1ConfigsNamespace).Methods("GET") + + r.HandleFunc(options.BaseURL+"/api/v1/configs/{namespace}/{name}", wrapper.GetV1ConfigNamespaceName).Methods("GET") + + return r +} + +type ListV1ConfigsRequestObject struct { + Params ListV1ConfigsParams +} + +type ListV1ConfigsResponseObject interface { + VisitListV1ConfigsResponse(w http.ResponseWriter) error +} + +type ListV1Configs200JSONResponse V1ConfigList + +func (response ListV1Configs200JSONResponse) VisitListV1ConfigsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListV1ConfigsdefaultJSONResponse struct { + Body V1Error + StatusCode int +} + +func (response ListV1ConfigsdefaultJSONResponse) VisitListV1ConfigsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(response.StatusCode) + + return json.NewEncoder(w).Encode(response.Body) +} + +type ListV1ConfigsNamespaceRequestObject struct { + Namespace string `json:"namespace"` + Params ListV1ConfigsNamespaceParams +} + +type ListV1ConfigsNamespaceResponseObject interface { + VisitListV1ConfigsNamespaceResponse(w http.ResponseWriter) error +} + +type ListV1ConfigsNamespace200JSONResponse V1ConfigList + +func (response ListV1ConfigsNamespace200JSONResponse) VisitListV1ConfigsNamespaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListV1ConfigsNamespacedefaultJSONResponse struct { + Body V1Error + StatusCode int +} + +func (response ListV1ConfigsNamespacedefaultJSONResponse) VisitListV1ConfigsNamespaceResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(response.StatusCode) + + return json.NewEncoder(w).Encode(response.Body) +} + +type GetV1ConfigNamespaceNameRequestObject struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + Params GetV1ConfigNamespaceNameParams +} + +type GetV1ConfigNamespaceNameResponseObject interface { + VisitGetV1ConfigNamespaceNameResponse(w http.ResponseWriter) error +} + +type GetV1ConfigNamespaceName200JSONResponse V1Config + +func (response GetV1ConfigNamespaceName200JSONResponse) VisitGetV1ConfigNamespaceNameResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetV1ConfigNamespaceName404JSONResponse V1Error + +func (response GetV1ConfigNamespaceName404JSONResponse) VisitGetV1ConfigNamespaceNameResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetV1ConfigNamespaceName500JSONResponse V1Error + +func (response GetV1ConfigNamespaceName500JSONResponse) VisitGetV1ConfigNamespaceNameResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetV1ConfigNamespaceNamedefaultJSONResponse struct { + Body V1Error + StatusCode int +} + +func (response GetV1ConfigNamespaceNamedefaultJSONResponse) VisitGetV1ConfigNamespaceNameResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(response.StatusCode) + + return json.NewEncoder(w).Encode(response.Body) +} + +// StrictServerInterface represents all server handlers. +type StrictServerInterface interface { + + // (GET /api/v1/configs) + ListV1Configs(ctx context.Context, request ListV1ConfigsRequestObject) (ListV1ConfigsResponseObject, error) + + // (GET /api/v1/configs/{namespace}) + ListV1ConfigsNamespace(ctx context.Context, request ListV1ConfigsNamespaceRequestObject) (ListV1ConfigsNamespaceResponseObject, error) + + // (GET /api/v1/configs/{namespace}/{name}) + GetV1ConfigNamespaceName(ctx context.Context, request GetV1ConfigNamespaceNameRequestObject) (GetV1ConfigNamespaceNameResponseObject, error) +} + +type StrictHandlerFunc = strictnethttp.StrictHTTPHandlerFunc +type StrictMiddlewareFunc = strictnethttp.StrictHTTPMiddlewareFunc + +type StrictHTTPServerOptions struct { + RequestErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) + ResponseErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) +} + +func NewStrictHandler(ssi StrictServerInterface, middlewares []StrictMiddlewareFunc) ServerInterface { + return &strictHandler{ssi: ssi, middlewares: middlewares, options: StrictHTTPServerOptions{ + RequestErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusBadRequest) + }, + ResponseErrorHandlerFunc: func(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusInternalServerError) + }, + }} +} + +func NewStrictHandlerWithOptions(ssi StrictServerInterface, middlewares []StrictMiddlewareFunc, options StrictHTTPServerOptions) ServerInterface { + return &strictHandler{ssi: ssi, middlewares: middlewares, options: options} +} + +type strictHandler struct { + ssi StrictServerInterface + middlewares []StrictMiddlewareFunc + options StrictHTTPServerOptions +} + +// ListV1Configs operation middleware +func (sh *strictHandler) ListV1Configs(w http.ResponseWriter, r *http.Request, params ListV1ConfigsParams) { + var request ListV1ConfigsRequestObject + + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.ListV1Configs(ctx, request.(ListV1ConfigsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListV1Configs") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(ListV1ConfigsResponseObject); ok { + if err := validResponse.VisitListV1ConfigsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// ListV1ConfigsNamespace operation middleware +func (sh *strictHandler) ListV1ConfigsNamespace(w http.ResponseWriter, r *http.Request, namespace string, params ListV1ConfigsNamespaceParams) { + var request ListV1ConfigsNamespaceRequestObject + + request.Namespace = namespace + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.ListV1ConfigsNamespace(ctx, request.(ListV1ConfigsNamespaceRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListV1ConfigsNamespace") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(ListV1ConfigsNamespaceResponseObject); ok { + if err := validResponse.VisitListV1ConfigsNamespaceResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetV1ConfigNamespaceName operation middleware +func (sh *strictHandler) GetV1ConfigNamespaceName(w http.ResponseWriter, r *http.Request, namespace string, name string, params GetV1ConfigNamespaceNameParams) { + var request GetV1ConfigNamespaceNameRequestObject + + request.Namespace = namespace + request.Name = name + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetV1ConfigNamespaceName(ctx, request.(GetV1ConfigNamespaceNameRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetV1ConfigNamespaceName") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetV1ConfigNamespaceNameResponseObject); ok { + if err := validResponse.VisitGetV1ConfigNamespaceNameResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/+xXTW/cNhD9KwTboyztJikK6NQgCAojhVv4ozkEPnClkcRUIpnhaG3D0H8vhuRKG6+3", + "DhAj6NeN4se8x5n3Zrn3srKDswYMeVneS191MKgw3K7zN9Y0uuWPGnyF2pG2RpYyzguHdqtr8EKJi8ur", + "MwMoqrCSi4sQR2gvami0gVpoIzoi58uiaDV14yav7FD0Pw6u8DQaA1gQAhSD0qZwf7SFctoX27XM5O1J", + "a0/ozoEsZdq7XecXcZQ47u060YOzSEzbqOGzQzKTTlEnS3mMwz7yNGVLFn7Rno5lgtf4rkr0PLKNiPM+", + "Z0C0DpA0hKxqgsEfxjnlaQ5BHTwIIuzmI1TkOYO7VY47R/oeoZGl/K5YSlmkOhZLEadMphQqRHXH31tA", + "H+AfskkLqXaRVJqDWsTYTJA67RM9ZjSXCLVpQ/YQPo0aoZblhxltx/x6PhAjyJjtt4gWDym9/u1UAC8d", + "prSyNRweCHEEr/GJxuKgSJZSG3r5YuGqDUELyNgDeK/ao5HS8tP3DHSWcIfX5APaNPYQ6bKD2Um1IuV6", + "ZUCEq98664PTEKLHRlSbHsQw9qRPHFqyle3TJ0sEOMjl1fmZ8IBbXYFoLIr3sDm/fCMGqLUS2rTgGTpc", + "SlPPLM/fXlwGSN5+SKba2W0Wj1znq3zFGbQOjHJalvJlvspfJrOFGrGniu26iMfDVAv0eJnJJgeguFFU", + "dQlz3weq7wV72ztVQXAZC0JxkNNalpIN+fs6mTDwQDUAAXpZfniI+T6A8HWrTnFKmAFLPm7bQC0QvB2x", + "4gIY/qIRgxkHobgmnhDUwI5QdZ2J0dWKIEt7B7sFYSzpRleBYeCrGfnTCHgns12bCreVWWrCnJyknI21", + "PSgjp+mateadNT5q/8VqFS1gCEzIp3KuT0DFRx/dvQT8ol4RWl2Q6eeJ+vWdDHONGnt6Ttjo+UcQrwzc", + "OqgI6mh+3jNlD9VU3M9amL5aWcI7qLhWi8Ce0NfZbt9TQps3xt4JolUEN+ouaO+m0wsh7QW3FPAE9ayW", + "8Ms1i8XsoS7th3CERwS0tKr/xf8vFn8cf50HWAp7JthJ9Iut8TPMzpj1zoN/ojmY03PR+e/Y9C/TaGwN", + "YVD1GgwJHI0X8QXyGJyJj6njmfomPeF4P3i1evVNesGZJdHY0dQ5o/7wvJc8inpqCNCoPjwhAXcP8L9D", + "H5wyGUnFXjJiL0spWQ6k2sf6y2vxbtwAGiDw6f27b+n4MM75PxepikrBL/Sf+K9hru2ix/QgltP19GcA", + "AAD//2H3j/07DwAA", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/pkg/config/server/cfg.yaml b/pkg/config/server/cfg.yaml new file mode 100644 index 00000000..793c0f74 --- /dev/null +++ b/pkg/config/server/cfg.yaml @@ -0,0 +1,9 @@ +package: api +generate: + gorilla-server: true + models: true + embedded-spec: true + strict-server: true +output: api/server.gen.go +compatibility: + apply-gorilla-middleware-first-to-last: true diff --git a/pkg/config/server/config.go b/pkg/config/server/config.go new file mode 100644 index 00000000..159fcd61 --- /dev/null +++ b/pkg/config/server/config.go @@ -0,0 +1,131 @@ +package server + +import ( + "fmt" + "sync" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/client" + "github.com/l7mp/stunner/pkg/config/client/api" +) + +type ConfigList = api.V1ConfigList + +type Config struct { + Id string + Config *stnrv1.StunnerConfig +} + +func (e Config) String() string { + return fmt.Sprintf("id=%s: %s", e.Id, e.Config.String()) +} + +// UpsertConfig upserts a single config in the server. +func (s *Server) UpsertConfig(id string, c *stnrv1.StunnerConfig) { + cpy := &stnrv1.StunnerConfig{} + c.DeepCopyInto(cpy) + s.configs.Upsert(id, cpy) + s.configCh <- Config{Id: id, Config: cpy} +} + +// DeleteConfig removes a config from clients by sending a zero-config. Clients may decide to +// ignore the delete operation by (1) using client.IsConfigDeleted() to identify whether a config +// is being deleted and (2) selectively ignoring config delete updates based on the result. This is +// needed, e.g., in stunnerd, in order to avoid that a client being removed and entering the +// graceful shutdown cycle receive a zeroconfig and abruprly kill all listeners with all active +// connections allocated to them. +func (s *Server) DeleteConfig(id string) { + s.configs.Delete(id) + if SuppressConfigDeletion { + s.log.Info("Suppressing config update for deleted config", "config-id", id) + return + } + + s.deleteCh <- Config{Id: id, Config: client.ZeroConfig(id)} +} + +// UpdateConfig receives a set of ids and newConfigs that represent the state-of-the-world at a +// particular instance of time and generates an update per each change. +func (s *Server) UpdateConfig(newConfigs []Config) error { + s.log.V(4).Info("Processing config updates", "num-configs", len(newConfigs)) + oldConfigs := s.configs.Snapshot() + + for _, oldC := range oldConfigs { + found := false + for _, newC := range newConfigs { + if oldC.Id == newC.Id { + if !oldC.Config.DeepEqual(newC.Config) { + s.log.V(2).Info("Updating config", "config-id", newC.Id, "config", + newC.Config.String()) + s.UpsertConfig(newC.Id, newC.Config) + } else { + s.log.V(2).Info("Config unchanged", "config-id", newC.Id, + "old-config", oldC.Config.String(), + "new-config", newC.Config.String()) + } + found = true + break + } + } + + if !found { + s.log.V(2).Info("Removing config", "config-id", oldC.Id) + s.DeleteConfig(oldC.Id) + } + } + + for _, newC := range newConfigs { + found := false + for _, oldC := range oldConfigs { + if oldC.Id == newC.Id { + found = true + break + } + } + + if !found { + s.log.V(2).Info("Adding config", "config-id", newC.Id, "config", newC.Config) + s.UpsertConfig(newC.Id, newC.Config) + } + } + + return nil +} + +type ConfigStore struct { + configs map[string]*stnrv1.StunnerConfig + lock sync.RWMutex +} + +func NewConfigStore() *ConfigStore { + return &ConfigStore{ + configs: make(map[string]*stnrv1.StunnerConfig), + } +} + +func (t *ConfigStore) Get(id string) *stnrv1.StunnerConfig { + t.lock.RLock() + defer t.lock.RUnlock() + return t.configs[id] +} + +func (t *ConfigStore) Snapshot() []Config { + t.lock.RLock() + defer t.lock.RUnlock() + ret := []Config{} + for id, c := range t.configs { + ret = append(ret, Config{Id: id, Config: c}) + } + return ret +} + +func (t *ConfigStore) Upsert(id string, c *stnrv1.StunnerConfig) { + t.lock.Lock() + defer t.lock.Unlock() + t.configs[id] = c +} +func (t *ConfigStore) Delete(id string) { + t.lock.Lock() + defer t.lock.Unlock() + delete(t.configs, id) +} diff --git a/pkg/config/server/conn.go b/pkg/config/server/conn.go new file mode 100644 index 00000000..2d143b29 --- /dev/null +++ b/pkg/config/server/conn.go @@ -0,0 +1,89 @@ +package server + +import ( + "context" + "fmt" + "sync" + + "github.com/gorilla/websocket" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/util" +) + +type ClientConfigPatcher func(conf *stnrv1.StunnerConfig) (*stnrv1.StunnerConfig, error) + +// Conn represents a client WebSocket connection. +type Conn struct { + *util.Conn + Filter ConfigFilter + patch ClientConfigPatcher + cancel context.CancelFunc +} + +// NewConn wraps a WebSocket connection. +func NewConn(conn *websocket.Conn, filter ConfigFilter, patch ClientConfigPatcher, cancel context.CancelFunc) *Conn { + return &Conn{ + Conn: util.NewConn(conn), + Filter: filter, + patch: patch, + cancel: cancel, + } +} + +// Id returns the IP 5-tuple for a client connection. +func (c *Conn) Id() string { + return fmt.Sprintf("%s:%s", c.RemoteAddr().Network(), c.RemoteAddr().String()) +} + +// ConnTrack represents the server's connection tracking table. +type ConnTrack struct { + conns []*Conn + lock sync.RWMutex +} + +// NewConnTrack creates a new connection tracking table. +func NewConnTrack() *ConnTrack { + return &ConnTrack{ + conns: []*Conn{}, + } +} + +// Get returns a client connection by the IP 5-tuple. +func (t *ConnTrack) Get(cid string) *Conn { + t.lock.RLock() + defer t.lock.RUnlock() + for _, c := range t.conns { + if c.Id() == cid { + return c + } + } + return nil +} + +// Upsert insert a new client connection. +func (t *ConnTrack) Upsert(c *Conn) { + t.lock.Lock() + defer t.lock.Unlock() + t.conns = append(t.conns, c) +} + +// Delete removes a client connection. +func (t *ConnTrack) Delete(conn *Conn) { + id := conn.Id() + t.lock.Lock() + defer t.lock.Unlock() + for i, c := range t.conns { + if c.Id() == id { + t.conns = append(t.conns[:i], t.conns[i+1:]...) + } + } +} + +// Snapshot creates a snapshot of the connection tracking table. +func (t *ConnTrack) Snapshot() []*Conn { + t.lock.RLock() + defer t.lock.RUnlock() + ret := make([]*Conn, len(t.conns)) + copy(ret, t.conns) + return ret +} diff --git a/pkg/config/server/handler.go b/pkg/config/server/handler.go new file mode 100644 index 00000000..f97e81c0 --- /dev/null +++ b/pkg/config/server/handler.go @@ -0,0 +1,95 @@ +package server + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/l7mp/stunner/pkg/config/server/api" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +// make sure the server satisfies the generate OpenAPI server interface +var _ api.StrictServerInterface = (*Server)(nil) + +// ConfigFilter is a callback to filter config updates for a client. +type ConfigFilter func(confId string) bool + +// ConfigPatcher is a callback to patch config updates for a client. +type ConfigPatcher func(conf *stnrv1.StunnerConfig, node string) (*stnrv1.StunnerConfig, error) + +// (GET /api/v1/configs) +func (s *Server) ListV1Configs(ctx context.Context, request api.ListV1ConfigsRequestObject) (api.ListV1ConfigsResponseObject, error) { + s.log.V(1).Info("Handling ListV1Configs API call") + + configs := s.configs.Snapshot() + response := ConfigList{Version: "v1", Items: []stnrv1.StunnerConfig{}} + for _, c := range configs { + cpy := stnrv1.StunnerConfig{} + c.Config.DeepCopyInto(&cpy) + response.Items = append(response.Items, cpy) + } + + s.log.V(3).Info("ListV1Configs API handler: ready", "configlist-len", len(configs)) + + return api.ListV1Configs200JSONResponse(response), nil +} + +// (GET /api/v1/configs/{namespace}) +func (s *Server) ListV1ConfigsNamespace(ctx context.Context, request api.ListV1ConfigsNamespaceRequestObject) (api.ListV1ConfigsNamespaceResponseObject, error) { + s.log.V(1).Info("Handling ListV1ConfigsNamespace API call", "namespace", request.Namespace) + + configs := s.configs.Snapshot() + response := ConfigList{Version: "v1", Items: []stnrv1.StunnerConfig{}} + for _, c := range configs { + ps := strings.Split(c.Id, "/") + if len(ps) == 2 && ps[0] == request.Namespace { + cpy := stnrv1.StunnerConfig{} + c.Config.DeepCopyInto(&cpy) + response.Items = append(response.Items, cpy) + } + } + + s.log.V(3).Info("ListV1ConfigsNamespace API handler: ready", "configlist-len", len(configs)) + + return api.ListV1ConfigsNamespace200JSONResponse(response), nil +} + +// (GET /api/v1/configs/{namespace}/{name}) +func (s *Server) GetV1ConfigNamespaceName(ctx context.Context, request api.GetV1ConfigNamespaceNameRequestObject) (api.GetV1ConfigNamespaceNameResponseObject, error) { + namespace, name := request.Namespace, request.Name + s.log.V(1).Info("Handling GetV1ConfigNamespaceName API call", "namespace", namespace, + "name", name) + + id := fmt.Sprintf("%s/%s", namespace, name) + c := s.configs.Get(id) + if c == nil { + s.log.V(1).Info("GetV1ConfigNamespaceName: Config not found", "config-id", id) + return api.GetV1ConfigNamespaceName404JSONResponse{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Config not found for ID %q", id), + }, nil + } + + ret := &stnrv1.StunnerConfig{} + c.DeepCopyInto(ret) + + if s.patch != nil && request.Params.Node != nil { + conf, err := s.patch(ret, *request.Params.Node) + if err != nil { + s.log.Error(err, "GetV1ConfigNamespaceName: patch config failed") + return api.GetV1ConfigNamespaceName500JSONResponse{ + Code: http.StatusInternalServerError, + Message: fmt.Sprintf("Config patch failed: %s", err.Error()), + }, nil + } + ret = conf + } + + s.log.V(3).Info("GetV1ConfigNamespaceName API handler: ready", + "config", ret.String()) + + return api.GetV1ConfigNamespaceName200JSONResponse(*ret), nil +} diff --git a/pkg/config/server/middleware.go b/pkg/config/server/middleware.go new file mode 100644 index 00000000..4d727c7e --- /dev/null +++ b/pkg/config/server/middleware.go @@ -0,0 +1,94 @@ +package server + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/gorilla/websocket" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/config/server/api" +) + +func (s *Server) WSUpgradeMiddleware(next api.StrictHandlerFunc, operationID string) api.StrictHandlerFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + var filter ConfigFilter + var patcher ClientConfigPatcher + watch := false + + switch operationID { + case "GetV1ConfigNamespaceName": + param, ok := request.(api.GetV1ConfigNamespaceNameRequestObject) + if !ok { + return nil, fmt.Errorf("unexpected parameters in API operation %q", + operationID) + } + + filter = func(confID string) bool { + id := fmt.Sprintf("%s/%s", param.Namespace, param.Name) + return confID == id + } + + watch = param.Params.Watch != nil && *param.Params.Watch + + if s.patch != nil && param.Params.Node != nil { + patcher = func(conf *stnrv1.StunnerConfig) (*stnrv1.StunnerConfig, error) { + return s.patch(conf, *param.Params.Node) + } + } + + case "ListV1ConfigsNamespace": + param, ok := request.(api.ListV1ConfigsNamespaceRequestObject) + if !ok { + return nil, fmt.Errorf("unexpected parameters in API operation %q", + operationID) + } + + filter = func(confID string) bool { + ps := strings.Split(confID, "/") + return len(ps) == 2 && ps[0] == param.Namespace + } + + watch = param.Params.Watch != nil && *param.Params.Watch + + case "ListV1Configs": + param, ok := request.(api.ListV1ConfigsRequestObject) + if !ok { + return nil, fmt.Errorf("unexpected parameters in API operation %q", + operationID) + } + + filter = func(confID string) bool { + return true + } + + watch = param.Params.Watch != nil && *param.Params.Watch + + default: + return nil, fmt.Errorf("invalid API operation %q", + operationID) + } + + if !watch { + return next(ctx, w, r, request) + } + + s.log.V(4).Info("WS upgrade middleware: upgrading connection", "client", r.RemoteAddr) + + // upgrade to webSocket + upgrader := websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } + + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + return nil, err + } + + s.handleConn(ctx, conn, operationID, filter, patcher) + + return nil, nil + } +} diff --git a/pkg/config/server/server.go b/pkg/config/server/server.go new file mode 100644 index 00000000..f986bdfe --- /dev/null +++ b/pkg/config/server/server.go @@ -0,0 +1,229 @@ +//go:generate go run github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen --config=cfg.yaml ../api/stunner_openapi.yaml +package server + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + + "github.com/go-logr/logr" + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "github.com/l7mp/stunner/pkg/config/server/api" + + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" +) + +var ( + // SuppressConfigDeletion allows the server to suppress config deletions all together. Used + // mostly for testing. + SuppressConfigDeletion = false +) + +// Server is a generic config discovery server implementation. +type Server struct { + *http.Server + router *mux.Router + addr string + conns *ConnTrack + configs *ConfigStore + configCh chan Config + deleteCh chan Config + patch ConfigPatcher + log logr.Logger +} + +// New creates a new config discovery server instance for the specified address. +func New(addr string, patch ConfigPatcher, logger logr.Logger) *Server { + if addr == "" { + addr = stnrv1.DefaultConfigDiscoveryAddress + } + + return &Server{ + router: mux.NewRouter(), + conns: NewConnTrack(), + configs: NewConfigStore(), + configCh: make(chan Config, 8), + deleteCh: make(chan Config, 8), + addr: addr, + patch: patch, + log: logger, + } +} + +// Start let the config discovery server listen to new client connections. +func (s *Server) Start(ctx context.Context) error { + handler := api.NewStrictHandler(s, []api.StrictMiddlewareFunc{s.WSUpgradeMiddleware}) + api.HandlerFromMux(handler, s.router) + s.Server = &http.Server{Addr: s.addr, Handler: s.router} + l, err := net.Listen("tcp", s.addr) + if err != nil { + return fmt.Errorf("CDS server failed to listen: %w", err) + } + + go func() { + s.log.Info("Starting CDS server", "address", s.addr, "patch", s.patch != nil) + + err := s.Serve(l) + if err != nil { + if errors.Is(err, net.ErrClosed) || errors.Is(err, http.ErrServerClosed) { + s.log.Info("Closing config discovery server") + } else { + s.log.Error(err, "Error closing config discovery server", "error", err.Error()) + } + return + } + }() + + go func() { + defer close(s.configCh) + defer close(s.deleteCh) + defer s.Close() + + for { + select { + case c := <-s.configCh: + s.log.V(2).Info("Sending config update event", "config-id", c.Id) + s.broadcastConfig(c) + case c := <-s.deleteCh: + s.log.V(2).Info("Sending config delete event", "config-id", c.Id) + s.broadcastConfig(c) + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +// Close closes the server and drops all active connections. +func (s *Server) Close() { + // first close the underlying HTTP server so that we do not get any new connnections + s.Server.Close() + // then kill all active connections + for _, conn := range s.conns.Snapshot() { + s.closeConn(conn) + } +} + +// GetConfigChannel returns the channel that can be used to add configs to the server's config +// store. Use Update to specify more configs at once. +func (s *Server) GetConfigChannel() chan Config { + return s.configCh +} + +// GetConfigStore returns the dataplane configs stores in the server. +func (s *Server) GetConfigStore() *ConfigStore { + return s.configs +} + +// GetConnTrack returns the client connection tracking table of the server. +func (s *Server) GetConnTrack() *ConnTrack { + return s.conns +} + +// RemoveClient forcefully closes a client connection. This is used mainly for testing. +func (s *Server) RemoveClient(id string) { + if conn := s.conns.Get(id); conn != nil { + s.log.V(1).Info("Forcefully removing client connection", "config-id", id, + "client", conn.RemoteAddr().String()) + s.closeConn(conn) + } +} + +func (s *Server) handleConn(reqCtx context.Context, wsConn *websocket.Conn, operationID string, filter ConfigFilter, patch ClientConfigPatcher) { + // since wsConn is hijacked, reqCtx is unreliable in that it may not be canceled when the + // connection is closed, so we create our own connection context that we can cancel + // explicitly + ctx, cancel := context.WithCancel(reqCtx) + conn := NewConn(wsConn, filter, patch, cancel) + s.conns.Upsert(conn) + + // a dummy reader that drops everything it receives: this must be there for the + // WebSocket server to call our pong-handler: conn.Close() will kill this goroutine + go func() { + for { + // drop anything we receive + _, _, err := conn.ReadMessage() + if err != nil { + s.closeConn(conn) + return + } + } + }() + + conn.SetPingHandler(func(string) error { + return conn.WriteMessage(websocket.PongMessage, []byte("keepalive")) + }) + + s.log.V(1).Info("New config stream connection", "api", operationID, "client", conn.Id()) + + // send initial config(list) + for _, conf := range s.configs.Snapshot() { + if filter(conf.Id) { + s.sendConfig(conn, conf.Config) + } + } + + // wait until client closes the connection or the server is cancelled (which will kill all + // the running connections) + <-ctx.Done() + + s.log.V(1).Info("Client connection closed", "api", operationID, "client", conn.Id()) + + conn.Close() +} + +// iterate through all connections and send response if needed +func (s *Server) broadcastConfig(e Config) { + for _, conn := range s.conns.Snapshot() { + if conn.Filter(e.Id) { + s.sendConfig(conn, e.Config) + } + } +} + +func (s *Server) sendConfig(conn *Conn, e *stnrv1.StunnerConfig) { + c := &stnrv1.StunnerConfig{} + e.DeepCopyInto(c) + + if conn.patch != nil { + newC, err := conn.patch(c) + if err != nil { + s.log.Error(err, "Cannot patch config", "event", e.String()) + return + } + c = newC + } + + json, err := json.Marshal(c) + if err != nil { + s.log.Error(err, "Cannot JSON serialize config", "event", e.String()) + return + } + + s.log.V(2).Info("Sending configuration to client", "client", conn.Id()) + + if err := conn.WriteMessage(websocket.TextMessage, json); err != nil { + s.log.Error(err, "Error sending config update", "client", conn.Id()) + s.closeConn(conn) + } +} + +func (s *Server) closeConn(conn *Conn) { + s.log.V(1).Info("Closing client connection", "client", conn.Id()) + + conn.WriteMessage(websocket.CloseMessage, []byte{}) //nolint:errcheck + + if conn.cancel != nil { + conn.cancel() + conn.cancel = nil // make sure we can cancel multiple times + } + + s.conns.Delete(conn) + conn.Close() +} diff --git a/pkg/config/util/conn.go b/pkg/config/util/conn.go new file mode 100644 index 00000000..f6171573 --- /dev/null +++ b/pkg/config/util/conn.go @@ -0,0 +1,34 @@ +package util + +import ( + "sync" + + "github.com/gorilla/websocket" +) + +// Conn represents a client WebSocket connection. An added lock guards the underlying connection +// from concurrent write to websocket connection errors. +type Conn struct { + *websocket.Conn + readLock, writeLock sync.Mutex // for writemessage + +} + +// NewConn wraps a WebSocket connection. +func NewConn(conn *websocket.Conn) *Conn { + return &Conn{Conn: conn} +} + +// WriteMessage writes a message to the client connection with proper locking. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + c.writeLock.Lock() + defer c.writeLock.Unlock() + return c.Conn.WriteMessage(messageType, data) +} + +// ReadMessage reads a message from the client connection with proper locking. +func (c *Conn) ReadMessage() (int, []byte, error) { + c.readLock.Lock() + defer c.readLock.Unlock() + return c.Conn.ReadMessage() +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 575f6fe1..515ef937 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,34 +1,63 @@ package logger import ( + "bufio" + "bytes" "fmt" "io" "log" "os" "strings" + "sync" "github.com/pion/logging" + "golang.org/x/time/rate" ) -const defaultFlags = log.Lmicroseconds | log.Lshortfile | log.Lmsgprefix +const ( + defaultFlags = log.Lmicroseconds | log.Lshortfile | log.Lmsgprefix + DefaultRateLimit = rate.Limit(.25) + DefaultBurstSize = 1 +) + +var logLevels = map[string]logging.LogLevel{ + "DISABLE": logging.LogLevelDisabled, + "ERROR": logging.LogLevelError, + "WARN": logging.LogLevelWarn, + "INFO": logging.LogLevelInfo, + "DEBUG": logging.LogLevelDebug, + "TRACE": logging.LogLevelTrace, +} -// LoggerFactory defines levels by scopes and creates new LeveledLogger. -type LoggerFactory struct { +// LoggerFactory is the basic pion LoggerFactory interface extended with functions for setting and querying the loglevel per scope. +type LoggerFactory interface { + logging.LoggerFactory + // SetLevel sets the global loglevel. + SetLevel(levelSpec string) + // GetLevel gets the loglevel for the given scope. + GetLevel(scope string) string + // SetWriter decorates a logger factory with a writer. + SetWriter(w io.Writer) +} + +// LeveledLoggerFactory defines levels by scopes and creates new LeveledLoggers that can dynamically change their own loglevels. +type LeveledLoggerFactory struct { Writer io.Writer DefaultLogLevel logging.LogLevel ScopeLevels map[string]logging.LogLevel - Loggers map[string]*logging.DefaultLeveledLogger + Loggers map[string]*RateLimitedLogger + lock sync.RWMutex } // NewLoggerFactory sets up a scoped logger for STUNner. -func NewLoggerFactory(levelSpec string) *LoggerFactory { - logger := LoggerFactory{} +func NewLoggerFactory(levelSpec string) LoggerFactory { + logger := LeveledLoggerFactory{} logger.DefaultLogLevel = logging.LogLevelError logger.ScopeLevels = make(map[string]logging.LogLevel) - logger.Writer = os.Stdout + logger.Loggers = make(map[string]*RateLimitedLogger) - logger.ScopeLevels = make(map[string]logging.LogLevel) - logger.Loggers = make(map[string]*logging.DefaultLeveledLogger) + // Set writer + logger.SetWriter(os.Stdout) // resets all child loggers logger.SetLevel(levelSpec) @@ -36,43 +65,22 @@ func NewLoggerFactory(levelSpec string) *LoggerFactory { return &logger } -// NewLogger either returns the existing LeveledLoogger (if it exists) for the given scope or creates a new one. -func (f *LoggerFactory) NewLogger(scope string) logging.LeveledLogger { - logger, found := f.Loggers[scope] - if found { - return logger - } - - // create a new one - logLevel := f.DefaultLogLevel - scopeLevel, found := f.ScopeLevels[scope] - - if found { - logLevel = scopeLevel - } - - l := logging.NewDefaultLeveledLoggerForScope(scope, logLevel, f.Writer). - WithTraceLogger(log.New(f.Writer, fmt.Sprintf("%s TRACE: ", scope), defaultFlags)). - WithDebugLogger(log.New(f.Writer, fmt.Sprintf("%s DEBUG: ", scope), defaultFlags)). - WithInfoLogger(log.New(f.Writer, fmt.Sprintf("%s INFO: ", scope), defaultFlags)). - WithWarnLogger(log.New(f.Writer, fmt.Sprintf("%s WARNING: ", scope), defaultFlags)). - WithErrorLogger(log.New(f.Writer, fmt.Sprintf("%s ERROR: ", scope), defaultFlags)) - - f.Loggers[scope] = l +// NewLogger either returns the existing LeveledLogger (if it exists) for the given scope or creates a new one. +func (f *LeveledLoggerFactory) NewLogger(scope string) logging.LeveledLogger { + logger := f.newLogger(scope, DefaultRateLimit, DefaultBurstSize) + logger.DisableRateLimiter() + return logger +} - return l +// SetWriter sets the writer underlying the logger. +func (f *LeveledLoggerFactory) SetWriter(w io.Writer) { + f.Writer = w } -// Setlevel sets the loglevel. -func (f *LoggerFactory) SetLevel(levelSpec string) { - logLevels := map[string]logging.LogLevel{ - "DISABLE": logging.LogLevelDisabled, - "ERROR": logging.LogLevelError, - "WARN": logging.LogLevelWarn, - "INFO": logging.LogLevelInfo, - "DEBUG": logging.LogLevelDebug, - "TRACE": logging.LogLevelTrace, - } +// SetLevel sets the loglevel. +func (f *LeveledLoggerFactory) SetLevel(levelSpec string) { + f.lock.Lock() + defer f.lock.Unlock() levels := strings.Split(levelSpec, ",") for _, s := range levels { @@ -83,17 +91,18 @@ func (f *LoggerFactory) SetLevel(levelSpec string) { scope := scopedLevel[0] level := scopedLevel[1] - // set log-level - l, found := logLevels[strings.ToUpper(level)] - if !found { + l, ok := logLevels[strings.ToUpper(level)] + if !ok { continue } if strings.ToLower(scope) == "all" { + for c := range f.Loggers { + f.ScopeLevels[c] = l + } f.DefaultLogLevel = l continue } - f.ScopeLevels[scope] = l } @@ -104,5 +113,205 @@ func (f *LoggerFactory) SetLevel(levelSpec string) { } logger.SetLevel(l) + + // disable rate-limiting at DEBUG and TRACE level + if l == logging.LogLevelDebug || l == logging.LogLevelTrace { + logger.DisableRateLimiter() + } + } +} + +// GetLevel gets the loglevel for the given scope. +func (f *LeveledLoggerFactory) GetLevel(scope string) string { + f.lock.RLock() + defer f.lock.RUnlock() + + logLevel := f.DefaultLogLevel + scopeLevel, found := f.ScopeLevels[scope] + if found { + logLevel = scopeLevel + } + + return logLevel.String() +} + +// RateLimitedLoggerFactory is a logger factory that can emit rate-limited loggers. Note that all +// loglevels are rate-limited via single token bucket. Rate-limiting only applies at high loglevels +// (ERROR, WARN and INFO), a logger set to alower loglevel (DEBUG and TRACE) is never rate-limited +// to ease debugging. +type RateLimitedLoggerFactory struct { + *LeveledLoggerFactory + Limit rate.Limit + Burst int +} + +// WithRateLimiter decorates a logger factory with a rate-limiter. All loggers emitted by the +// factory will be automatically rate-limited. +func NewRateLimitedLoggerFactory(logger LoggerFactory, limit rate.Limit, burst int) *RateLimitedLoggerFactory { + leveledF, ok := logger.(*LeveledLoggerFactory) + if !ok { + return nil // this will blow up + } + return &RateLimitedLoggerFactory{ + LeveledLoggerFactory: leveledF, + Limit: limit, + Burst: burst, + } +} + +// NewLogger either returns the existing LeveledLogger (if it exists) for the given scope or creates a new one. +func (f *RateLimitedLoggerFactory) NewLogger(scope string) logging.LeveledLogger { + logger := f.LeveledLoggerFactory.newLogger(scope, f.Limit, f.Burst) + + // disable rate-limiting logging at lower loglevels + l := f.DefaultLogLevel + + scopeLevel, found := f.ScopeLevels[scope] + if found { + l = scopeLevel + } + + // disable rate-limiting at DEBUG and TRACE level + if l == logging.LogLevelDebug || l == logging.LogLevelTrace { + logger.DisableRateLimiter() + } else { + logger.EnableRateLimiter() + } + + return logger +} + +// RateLimitedLogger is a rate-limiter logger for a specific scope. +type RateLimitedLogger struct { + *logging.DefaultLeveledLogger + *RateLimitedWriter +} + +// NewRateLimitedLoggerForScope returns a LeveledLogger configured with a default rate limiter. +func NewRateLimitedLoggerForScope(scope string, level logging.LogLevel, writer io.Writer, limit rate.Limit, burst int) *RateLimitedLogger { + // NewLogger will set the limit and burst + w := NewRateLimitedWriter(writer, limit, burst, true) + return &RateLimitedLogger{ + DefaultLeveledLogger: logging.NewDefaultLeveledLoggerForScope(scope, level, writer), + RateLimitedWriter: w, + } +} + +// newLogger knows how to emit rate-limited loggers. +func (f *LeveledLoggerFactory) newLogger(scope string, limit rate.Limit, burst int) *RateLimitedLogger { + f.lock.Lock() + defer f.lock.Unlock() + + logger, found := f.Loggers[scope] + if found { + return logger + } + + logLevel := f.DefaultLogLevel + scopeLevel, found := f.ScopeLevels[scope] + if found { + logLevel = scopeLevel + } + + l := NewRateLimitedLoggerForScope(scope, logLevel, f.Writer, limit, burst) + + l.DefaultLeveledLogger. + WithTraceLogger(log.New(l.RateLimitedWriter, fmt.Sprintf("%s TRACE: ", scope), defaultFlags)). + WithDebugLogger(log.New(l.RateLimitedWriter, fmt.Sprintf("%s DEBUG: ", scope), defaultFlags)). + WithInfoLogger(log.New(l.RateLimitedWriter, fmt.Sprintf("%s INFO: ", scope), defaultFlags)). + WithWarnLogger(log.New(l.RateLimitedWriter, fmt.Sprintf("%s WARNING: ", scope), defaultFlags)). + WithErrorLogger(log.New(l.RateLimitedWriter, fmt.Sprintf("%s ERROR: ", scope), defaultFlags)) + + f.Loggers[scope] = l + + return l +} + +// RateLimitedWriter is a writer limited by a token bucket. +type RateLimitedWriter struct { + io.Writer + *RateLimiter + Counter int + AddSuppressed bool +} + +// NewRateLimitedWriter creates a writer rate-limited by a token bucket to at most limit events per +// second with the given burst size. If addSuppressed is true then the number of events suppressed +// between logged events is appended to the output. +func NewRateLimitedWriter(writer io.Writer, limit rate.Limit, burst int, addSuppressed bool) *RateLimitedWriter { + return &RateLimitedWriter{ + Writer: writer, + RateLimiter: NewRateLimiter(limit, burst), + Counter: 0, // no need to lock: we are being called under a lock from DefaultLeveledLogger + AddSuppressed: addSuppressed, } } + +// Write fulfills io.Writer. +func (w *RateLimitedWriter) Write(p []byte) (int, error) { + if !w.RateLimiter.Allow() { + w.Counter++ + return 0, nil + } + + if w.AddSuppressed && w.Counter > 0 { + suffix := fmt.Sprintf(" (suppressed %d log events)\n", w.Counter) + p = append(bytes.TrimRight(p, "\r\n"), suffix...) + } + n, err := w.Writer.Write(p) + w.Counter = 0 + + return n, err +} + +// RateLimiter is a token bucket that can be disabled. +type RateLimiter struct { + *rate.Limiter + Enabled bool +} + +func NewRateLimiter(r rate.Limit, b int) *RateLimiter { + return &RateLimiter{ + Limiter: rate.NewLimiter(r, b), + Enabled: false, + } +} + +func (l *RateLimiter) EnableRateLimiter() { + l.Enabled = true +} + +func (l *RateLimiter) DisableRateLimiter() { + l.Enabled = false +} + +func (l *RateLimiter) Allow() bool { + if !l.Enabled { + return true + } + return l.Limiter.Allow() +} + +// AutoFlushWriter wraps a bufio.Writer and ensures that Flush is called after every Write +// operation. +type AutoFlushWriter struct { + *bufio.Writer +} + +// NewAutoFlushWriter creates a new AutoFlushWriter. +func NewAutoFlushWriter(w io.Writer) *AutoFlushWriter { + return &AutoFlushWriter{ + Writer: bufio.NewWriter(w), + } +} + +// Write writes the data and immediately flushes the buffer +func (w *AutoFlushWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + if err != nil { + return n, err + } + + err = w.Flush() + return n, err +} diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go new file mode 100644 index 00000000..253b0b0a --- /dev/null +++ b/pkg/logger/logger_test.go @@ -0,0 +1,600 @@ +package logger + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/pion/transport/v3/test" + "github.com/stretchr/testify/assert" + "golang.org/x/time/rate" +) + +const testScope = "dummy-scope" + +var logBuffer = &bytes.Buffer{} + +type loggerTestCase struct { + name, defaultLogLevel, scopeLogLevel string + prep func(lf LoggerFactory) + tester func(t *testing.T, lf LoggerFactory) +} + +var loggerTests = []loggerTestCase{ + { + name: "default-loglevel", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + log := lf.NewLogger(testScope) + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-disable-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "DISABLE", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Disabled", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Zerof(t, loglenr(), "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-error-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "ERROR", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-warn-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "WARN", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Warn", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Containsf(t, logreadr(), "dummy", "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-info-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "INFO", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Info", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Containsf(t, logreadr(), "dummy", "WARN for level %s", level) + + log.Info("dummy") + assert.Containsf(t, logreadr(), "dummy", "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-debug-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "DEBUG", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Debug", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Containsf(t, logreadr(), "dummy", "WARN for level %s", level) + + log.Info("dummy") + assert.Containsf(t, logreadr(), "dummy", "INFO for level %s", level) + + log.Debug("dummy") + assert.Containsf(t, logreadr(), "dummy", "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "default-loglevel-trace-scope", + defaultLogLevel: "", // default is ERROR + scopeLogLevel: "TRACE", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Trace", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Containsf(t, logreadr(), "dummy", "WARN for level %s", level) + + log.Info("dummy") + assert.Containsf(t, logreadr(), "dummy", "INFO for level %s", level) + + log.Debug("dummy") + assert.Containsf(t, logreadr(), "dummy", "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Containsf(t, logreadr(), "dummy", "TRACE for level %s", level) + }, + }, + { + name: "override-loglevel-trace-scope", + defaultLogLevel: "all:TRACE", + scopeLogLevel: "ERROR", + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Trace", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "complex-loglevel-1", + defaultLogLevel: "all:TRACE", + scopeLogLevel: "TRACE", + prep: func(lf LoggerFactory) { + lf.SetLevel("all:TRACE,dummy-scope:ERROR") + }, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Trace", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "complex-loglevel-2", + defaultLogLevel: "all:TRACE", + scopeLogLevel: "TRACE", + prep: func(lf LoggerFactory) { + lf.SetLevel("dummy-scope:DEBUG,nonExistentScope:TRACE,dummy-scope:ERROR,all:error,some-other-scope:TRACE") + }, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + level = lf.GetLevel("some-other-scope") + assert.Equal(t, "Trace", level, "other scope: level") + + log := lf.NewLogger(testScope) + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Zerof(t, loglenr(), "WARN for level %s", level) + + log.Info("dummy") + assert.Zerof(t, loglenr(), "INFO for level %s", level) + + log.Debug("dummy") + assert.Zerof(t, loglenr(), "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, + { + name: "set-loglevel-for-newly-created-logger", + defaultLogLevel: "all:TRACE", + scopeLogLevel: "TRACE", + prep: func(lf LoggerFactory) { + lf.SetLevel("all:error,new-scope:DEBUG") + }, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel("all") + assert.Equal(t, "Error", level, "default scope: level") + + level = lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "dummy scope: level") + + level = lf.GetLevel("new-scope") + assert.Equal(t, "Debug", level, "new scope: level") + + log := lf.NewLogger("new-scope") + + level = lf.GetLevel("new-scope") + assert.Equal(t, "Debug", level, "new scope: level") + + log.Error("dummy") + assert.Containsf(t, logreadr(), "dummy", "ERROR for level %s", level) + + log.Warn("dummy") + assert.Containsf(t, logreadr(), "dummy", "WARN for level %s", level) + + log.Info("dummy") + assert.Containsf(t, logreadr(), "dummy", "INFO for level %s", level) + + log.Debug("dummy") + assert.Containsf(t, logreadr(), "dummy", "DEBUG for level %s", level) + + log.Trace("dummy") + assert.Zerof(t, loglenr(), "TRACE for level %s", level) + }, + }, +} + +func TestLogger(t *testing.T) { + lim := test.TimeOut(time.Second * 60) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, c := range loggerTests { + t.Run(c.name, func(t *testing.T) { + // t.Logf("-------------- Running test: %s -------------", c.name) + + // create + loggerFactory := NewLoggerFactory(c.defaultLogLevel).(*LeveledLoggerFactory) + loggerFactory.Writer = logBuffer + logreset() + + // create logger + _ = loggerFactory.NewLogger(testScope) + loggerFactory.SetLevel(fmt.Sprintf("%s:%s", testScope, c.scopeLogLevel)) + + // prepare + if c.prep != nil { + c.prep(loggerFactory) + } + + // test + c.tester(t, loggerFactory) + }) + } +} + +// rate-limiter tests + +type rateLimiterLoggerTestCase struct { + name, level string + limit rate.Limit + burst int + prep func(lf LoggerFactory) + tester func(t *testing.T, lf LoggerFactory) +} + +var rateLimitedLoggerTests = []rateLimiterLoggerTestCase{ + { + name: "rate-limited-logger-default", + limit: 1.0, + burst: 1, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel(testScope) + assert.Equal(t, "Error", level, "other scope: level") + + log := lf.NewLogger(testScope) + + // only first call should succeed + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + }, + }, + { + name: "rate-limited-logger-burst-2", + level: "all:INFO", + limit: 100.0, + burst: 2, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel(testScope) + assert.Equal(t, "Info", level, "scope: level") + + log := lf.NewLogger(testScope) + + // first call should succeed + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + + // wait until we get another token + time.Sleep(25 * time.Millisecond) + + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + }, + }, + { + name: "rate-limited-logger-burst-4", + level: "all:INFO", + limit: 100.0, + burst: 4, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel(testScope) + assert.Equal(t, "Info", level, "scope: level") + + log := lf.NewLogger(testScope) + + // only first 4 calls should succeed + log.Info("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Info("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Info("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Info("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + + // wait until we get another token + time.Sleep(15 * time.Millisecond) + + log.Info("dummy") + assert.Contains(t, logread(), "dummy") + assert.Contains(t, logreadr(), "suppressed 2 log") + // consumed all tokens: these should be suppressed + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + }, + }, + { + name: "rate-limited-logger-global-rate-limit", + level: "all:INFO", + limit: 100.0, + burst: 1, + tester: func(t *testing.T, lf LoggerFactory) { + level := lf.GetLevel(testScope) + assert.Equal(t, "Info", level, "scope: level") + + log := lf.NewLogger(testScope) + + // only first call should succeed + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Warn("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Debug("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Trace("dummy") + assert.Zerof(t, loglenr(), "suppressed") + + // wait until we get another token + time.Sleep(15 * time.Millisecond) + + log.Error("dummy") + assert.Contains(t, logreadr(), "dummy") + log.Error("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Warn("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Info("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Debug("dummy") + assert.Zerof(t, loglenr(), "suppressed") + log.Trace("dummy") + assert.Zerof(t, loglenr(), "suppressed") + }, + }, +} + +func TestRateLimitedLogger(t *testing.T) { + lim := test.TimeOut(time.Second * 60) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + for _, c := range rateLimitedLoggerTests { + t.Run(c.name, func(t *testing.T) { + // t.Logf("-------------- Running test: %s -------------", c.name) + + // create + loggerFactory := NewRateLimitedLoggerFactory(NewLoggerFactory(c.level), c.limit, c.burst) + loggerFactory.Writer = logBuffer + logreset() + + // prepare + if c.prep != nil { + c.prep(loggerFactory) + } + + // t.Logf("%#v", loggerFactory) + // t.Logf("%#v", loggerFactory.ScopeLevels) + // t.Logf("%#v", logger) + + // test + c.tester(t, loggerFactory) + }) + } +} + +//nolint:golint,unused +func loglen() int { + return logBuffer.Len() +} + +func loglenr() int { + l := logBuffer.Len() + logBuffer.Reset() + return l +} + +//nolint:golint,unused +func logreset() { + logBuffer.Reset() +} + +func logread() string { + return logBuffer.String() +} + +func logreadr() string { + ret := logBuffer.String() + logBuffer.Reset() + return ret +} diff --git a/pkg/whipconn/config.go b/pkg/whipconn/config.go new file mode 100644 index 00000000..d750dc63 --- /dev/null +++ b/pkg/whipconn/config.go @@ -0,0 +1,38 @@ +package whipconn + +import ( + "fmt" + "hash/fnv" + "net/url" + + "github.com/pion/webrtc/v4" +) + +const ( + messageSize = 2048 + WhipEndpoint = "/whip" +) + +type Config struct { + ICEServers []webrtc.ICEServer + ICETransportPolicy webrtc.ICETransportPolicy + BearerToken, WHIPEndpoint string +} + +func makeURL(addr, endpoint string) *url.URL { + return &url.URL{ + Scheme: "http", + Host: addr, + Path: endpoint, + } +} + +func resourceHash(s string) string { + h := fnv.New32a() + h.Write([]byte(s)) + return fmt.Sprintf("/%d", h.Sum32()) +} + +func makeResourceURL(endpoint, id string) string { + return endpoint + id +} diff --git a/pkg/whipconn/dialer.go b/pkg/whipconn/dialer.go new file mode 100644 index 00000000..76bea810 --- /dev/null +++ b/pkg/whipconn/dialer.go @@ -0,0 +1,253 @@ +// A simple WHIP client and server that implement a stream connection abstraction on top of a WebRTC data channel published via WHIP. +// Client adopted from https://github.com/ggarber/whip-go/ +package whipconn + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/pion/datachannel" + "github.com/pion/logging" + "github.com/pion/webrtc/v4" +) + +var _ net.Conn = &DialerConn{} + +type Dialer struct { + config Config + api *webrtc.API + logger logging.LoggerFactory + log, connLog logging.LeveledLogger +} + +func NewDialer(config Config, logger logging.LoggerFactory) *Dialer { + e := webrtc.SettingEngine{LoggerFactory: logger} + e.DetachDataChannels() + + if config.WHIPEndpoint == "" { + config.WHIPEndpoint = WhipEndpoint + } + + return &Dialer{ + config: config, + api: webrtc.NewAPI(webrtc.WithSettingEngine(e), webrtc.WithMediaEngine(&webrtc.MediaEngine{})), + logger: logger, + log: logger.NewLogger("whip-dialer"), + connLog: logger.NewLogger("whip-conn"), + } +} + +func (d *Dialer) WithSettingEngine(e webrtc.SettingEngine) *Dialer { + e.DetachDataChannels() // make sure this is set + d.api = webrtc.NewAPI(webrtc.WithSettingEngine(e), webrtc.WithMediaEngine(&webrtc.MediaEngine{})) + return d +} + +func (d *Dialer) DialContext(ctx context.Context, addr string) (net.Conn, error) { + stopped := false + defer func() { stopped = true }() + peerConn, err := d.api.NewPeerConnection(webrtc.Configuration{ + ICEServers: d.config.ICEServers, + ICETransportPolicy: d.config.ICETransportPolicy, + }) + if err != nil { + return nil, fmt.Errorf("failed to create PeerConnection: %w", err) + } + + d.log.Trace("Creating DataChannel") + dataChannel, err := peerConn.CreateDataChannel("whipconn", nil) + if err != nil { + return nil, fmt.Errorf("failed to create DataChannel: %w", err) + } + + conn := &DialerConn{ + dialer: d, + addr: addr, + peerConn: peerConn, + log: d.connLog, + } + + connCh := make(chan any, 1) + defer close(connCh) + errCh := make(chan error) + defer close(errCh) + + // Register channel opening handling + dataChannel.OnOpen(func() { + conn.log.Debugf("Creating new connection in data channel %s-%d", + dataChannel.Label(), dataChannel.ID()) + + raw, err := dataChannel.Detach() + if err != nil { + errCh <- fmt.Errorf("failed to detach DataChannel: %w", err) + } + conn.dataConn = raw + + connCh <- struct{}{} + }) + + // If PeerConnection is closed, close the client + peerConn.OnConnectionStateChange(func(p webrtc.PeerConnectionState) { + conn.log.Infof("Connection state has changed: %s", p) + if p == webrtc.PeerConnectionStateFailed || p == webrtc.PeerConnectionStateClosed { + if stopped { + conn.Close() //nolint + } else { + errCh <- fmt.Errorf("ICE connection terminated with state: %s", p.String()) + } + } + }) + + offer, err := peerConn.CreateOffer(nil) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("failed to create offer: %w", err) + } + + err = peerConn.SetLocalDescription(offer) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("failed to set local SDP (Offer): %w", err) + } + + // Block until ICE Gathering is complete, disabling trickle ICE + // we do this because we only can exchange one signaling message + // in a production application you should exchange ICE Candidates via OnICECandidate + gatherComplete := webrtc.GatheringCompletePromise(peerConn) + <-gatherComplete + + d.log.Debugf("ICE gathering complete: %s", peerConn.LocalDescription().SDP) + + sdp := []byte(peerConn.LocalDescription().SDP) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, + makeURL(addr, d.config.WHIPEndpoint).String(), bytes.NewBuffer(sdp)) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("unexpected error building HTTP request: %w", err) + } + + req.Header.Add("Content-Type", "application/sdp") + if d.config.BearerToken != "" { + req.Header.Add("Authorization", "Bearer "+d.config.BearerToken) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("failed to POST WHIP request: %w", err) + } + + d.log.Tracef("Received POST response with status code: %d", resp.StatusCode) + + if resp.StatusCode != 201 { + conn.Close() //nolint + return nil, fmt.Errorf("POST request returned invalid status: %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("failed to read HTTP response body: %w", err) + } + defer resp.Body.Close() + + resourceId := resp.Header.Get("Location") + if resourceId == "" { + conn.Close() //nolint + return nil, errors.New("empty resource id in POST response") + } + conn.resourceId = resourceId + + answer := webrtc.SessionDescription{} + answer.Type = webrtc.SDPTypeAnswer + answer.SDP = string(body) + + err = peerConn.SetRemoteDescription(answer) + if err != nil { + conn.Close() //nolint + return nil, fmt.Errorf("failed to set remote SDP (Answer): %w", err) + } + + // Waiting for the connection or errors surfaced from the callbacks + select { + case <-connCh: + d.log.Infof("Creating new connection %s", conn.String()) + return conn, nil + case err := <-errCh: + conn.Close() + return nil, err + case <-ctx.Done(): + conn.Close() + return nil, ctx.Err() + } +} + +type DialerConn struct { + dialer *Dialer + addr string + peerConn *webrtc.PeerConnection + dataConn datachannel.ReadWriteCloser + resourceId string + closed bool + log logging.LeveledLogger +} + +func (c *DialerConn) Close() error { + if c.closed { + return nil + } + c.closed = true + + c.log.Trace("Closing WHIP client connection") + + uri := makeURL(c.addr, makeResourceURL(c.dialer.config.WHIPEndpoint, c.resourceId)) + req, err := http.NewRequest("DELETE", uri.String(), nil) + if err != nil { + return fmt.Errorf("unexpected error building http request: %w", err) + } + if c.dialer.config.BearerToken != "" { + req.Header.Add("Authorization", "Bearer "+c.dialer.config.BearerToken) + } + + if _, err = http.DefaultClient.Do(req); err != nil { + return fmt.Errorf("failed WHIP DELETE request: %w", err) + } + + // Close the peerconnection + if err := c.peerConn.Close(); err != nil { + return fmt.Errorf("failed to close PeerConnection: %w", err) + } + + return nil +} + +func (c *DialerConn) Read(b []byte) (int, error) { + return c.dataConn.Read(b) +} + +func (c *DialerConn) Write(b []byte) (int, error) { + return c.dataConn.Write(b) +} + +// TODO: implement +func (c *DialerConn) LocalAddr() net.Addr { return nil } +func (c *DialerConn) RemoteAddr() net.Addr { return nil } +func (c *DialerConn) SetDeadline(t time.Time) error { return nil } +func (c *DialerConn) SetReadDeadline(t time.Time) error { return nil } +func (c *DialerConn) SetWriteDeadline(t time.Time) error { return nil } + +// String returns a unique identifier for the connection based on the underlying signaling connection. +func (c *DialerConn) String() string { + return c.resourceId +} + +// GetPeerConnection returns the PeerConnection underlying the data channnel. Useful for checking ICE candidate status. +func (c *DialerConn) GetPeerConnection() *webrtc.PeerConnection { + return c.peerConn +} diff --git a/pkg/whipconn/listener.go b/pkg/whipconn/listener.go new file mode 100644 index 00000000..398253cf --- /dev/null +++ b/pkg/whipconn/listener.go @@ -0,0 +1,390 @@ +// A simple WHIP client and server that implement a stream connection abstraction on top of a WebRTC data channel published via WHIP. +// Adopted from https://github.com/pion/webrtc/tree/master/examples/whip-whep +package whipconn + +import ( + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/pion/datachannel" + "github.com/pion/logging" + "github.com/pion/webrtc/v4" +) + +var _ net.Listener = &Listener{} +var _ net.Conn = &ListenerConn{} + +type Listener struct { + api *webrtc.API + config Config + addr string + server *http.Server + errCh chan error + connCh chan *ListenerConn + conns map[string]*ListenerConn + lock sync.Mutex + logger logging.LoggerFactory + log, connLog logging.LeveledLogger + closed bool +} + +func NewListener(addr string, config Config, logger logging.LoggerFactory) (*Listener, error) { + e := webrtc.SettingEngine{} + e.DetachDataChannels() + + if config.WHIPEndpoint == "" { + config.WHIPEndpoint = WhipEndpoint + } + + l := &Listener{ + addr: addr, + config: config, + api: webrtc.NewAPI(webrtc.WithSettingEngine(e), webrtc.WithMediaEngine(&webrtc.MediaEngine{})), + errCh: make(chan error, 5), + connCh: make(chan *ListenerConn, 128), + conns: map[string]*ListenerConn{}, + logger: logger, + log: logger.NewLogger("whip-listener"), + connLog: logger.NewLogger("whip-conn"), + } + + mux := http.NewServeMux() + mux.HandleFunc("GET /config", l.configGetHandler) + mux.HandleFunc("POST /config", l.configPostHandler) + + deletePatternWithoutSlash := fmt.Sprintf("DELETE %s/{resourceId}", config.WHIPEndpoint) + mux.HandleFunc(deletePatternWithoutSlash, l.whipDeleteHandler) + deletePatternWithSlash := fmt.Sprintf("DELETE %s/{resourceId}/{$}", config.WHIPEndpoint) + mux.HandleFunc(deletePatternWithSlash, l.whipDeleteHandler) + + requestPatternWithoutSlash := fmt.Sprintf("POST %s", config.WHIPEndpoint) + mux.HandleFunc(requestPatternWithoutSlash, l.whipRequestHandler) + requestPatternWithSlash := fmt.Sprintf("POST %s/{$}", config.WHIPEndpoint) + mux.HandleFunc(requestPatternWithSlash, l.whipRequestHandler) + + c, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("failed to open WHIP server socket on %s: %w", addr, err) + } + l.server = &http.Server{Addr: addr, Handler: mux} + go func() { + defer close(l.errCh) + defer close(l.connCh) + + if err := l.server.Serve(c); err != nil { + l.errCh <- err + } + }() + + return l, nil +} + +func (l *Listener) Accept() (net.Conn, error) { + l.log.Trace("Accept: waiting for new connection") + + select { + case err := <-l.errCh: + l.log.Tracef("Accept error: %s", err.Error()) + return nil, err + case conn := <-l.connCh: + l.log.Info("Accept: New connection") + + l.lock.Lock() + l.conns[conn.String()] = conn + l.lock.Unlock() + + return conn, nil + } +} + +func (l *Listener) Close() error { + if l.closed { + return nil + } + l.closed = true + + l.log.Tracef("Closing WHIP server listener at address %s", l.addr) + + // Send an error to stop any Accept() calls running + l.errCh <- net.ErrClosed + + return l.server.Close() +} + +func (l *Listener) configGetHandler(w http.ResponseWriter, r *http.Request) { + l.log.Infof("New Config GET request from client %s", r.RemoteAddr) + + if r.Header.Get("Content-Type") != "application/json" { + err := fmt.Errorf("Expected Content-Type:application/json, got %q", r.Header.Get("Content-Type")) + l.log.Error(err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + if err := json.NewEncoder(w).Encode(l.config); err != nil { + l.log.Errorf("Failed to encode config %#v for client %s: %s", + l.config, r.RemoteAddr, err.Error()) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// Note: is is unsafe to update the whip endpoint without restarting the listener +func (l *Listener) configPostHandler(w http.ResponseWriter, r *http.Request) { + l.log.Infof("New Config POST request from client %s", r.RemoteAddr) + + if r.Header.Get("Content-Type") != "application/json" { + err := fmt.Errorf("Expected Content-Type:application/json, got %q", r.Header.Get("Content-Type")) + l.log.Error(err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + var config Config + if err := json.NewDecoder(r.Body).Decode(&config); err != nil { + l.log.Errorf("Failed to decode config request %#v from client %s: %s", + config, r.RemoteAddr, err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + l.config.ICEServers = config.ICEServers + if config.ICETransportPolicy != l.config.ICETransportPolicy { + l.config.ICETransportPolicy = config.ICETransportPolicy + } + if config.BearerToken != "" { + l.config.BearerToken = config.BearerToken + } + if config.WHIPEndpoint != "" { + l.log.Debugf("Ignoring WHIP endpoint in received config: %s", config.WHIPEndpoint) + } + + l.log.Infof("Using new config: %#v", l.config) +} + +func (l *Listener) whipRequestHandler(w http.ResponseWriter, r *http.Request) { + l.log.Infof("New WHIP POST request from client %s", r.RemoteAddr) + + // Check bearer token + if l.config.BearerToken != "" { + if token := r.Header.Get("Authorization"); token != "Bearer "+l.config.BearerToken { + err := fmt.Errorf("Unauthorized WHIP request from client %s", r.RemoteAddr) + l.errCh <- err + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + } + + if ctype := r.Header.Get("Content-Type"); ctype != "application/sdp" { + err := fmt.Errorf("invalid WHIP request from client %s, expected Content-Type=application/sdp", + r.RemoteAddr) + l.errCh <- err + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Read the offer from HTTP Request + offer, err := io.ReadAll(r.Body) + defer r.Body.Close() + if err != nil { + l.errCh <- fmt.Errorf("failed to read WHIP request body: %w", err) + return + } + + conn := &ListenerConn{ + listener: l, + log: l.connLog, + } + + conn.log.Tracef("Creating PeerConnection for client %s", r.RemoteAddr) + peerConn, err := l.api.NewPeerConnection(webrtc.Configuration{ + ICEServers: l.config.ICEServers, + ICETransportPolicy: l.config.ICETransportPolicy, + }) + if err != nil { + l.errCh <- fmt.Errorf("failed to create a PeerConnection: %w", err) + return + } + conn.PeerConn = peerConn + + peerConn.OnConnectionStateChange(func(p webrtc.PeerConnectionState) { + conn.log.Debugf("PeerConnection state for client %s has changed: %s", r.RemoteAddr, p.String()) + if p == webrtc.PeerConnectionStateFailed || p == webrtc.PeerConnectionStateClosed { + conn.Close() // nolint:errcheck + return + } + }) + + peerConn.OnDataChannel(func(dataChannel *webrtc.DataChannel) { + conn.log.Tracef("New data channel %s-%d", dataChannel.Label(), dataChannel.ID()) + + dataChannel.OnOpen(func() { + conn.log.Tracef("Data channel %s-%d open for client %s", dataChannel.Label(), + dataChannel.ID(), r.RemoteAddr) + conn.dataChan = dataChannel + + raw, dErr := dataChannel.Detach() + if dErr != nil { + l.errCh <- fmt.Errorf("failed to detach DataChannel: %w", err) + return + } + conn.DataConn = raw + conn.started = true + + l.log.Infof("Creating new connection for client %s", r.RemoteAddr) + l.connCh <- conn + }) + }) + + conn.log.Tracef("Set remote SDP (Offer) for client %s", r.RemoteAddr) + if err := peerConn.SetRemoteDescription(webrtc.SessionDescription{ + Type: webrtc.SDPTypeOffer, + SDP: string(offer), + }); err != nil { + l.errCh <- fmt.Errorf("failed to set remote SDP (Offer): %w", err) + return + } + + // Create channel that is blocked until ICE Gathering is complete + gatherComplete := webrtc.GatheringCompletePromise(peerConn) + + // Create answer + answer, err := peerConn.CreateAnswer(nil) + if err != nil { + l.errCh <- fmt.Errorf("failed to create SDP (Answer): %w", err) + return + } else if err = peerConn.SetLocalDescription(answer); err != nil { + l.errCh <- fmt.Errorf("failed to set local SDP (Answer): %w", err) + return + } + + // Block until ICE Gathering is complete, disabling trickle ICE + // we do this because we only can exchange one signaling message + // in a production application you should exchange ICE Candidates via OnICECandidate + <-gatherComplete + + sdp := peerConn.LocalDescription().SDP + l.log.Debugf("ICE gathering complete: %s", sdp) + + // WHIP expects a Location header: the hash of our local SDP + resourceId := resourceHash(sdp) + conn.ResourceUrl = makeResourceURL(l.config.WHIPEndpoint, resourceId) + w.Header().Add("Location", resourceId) + + // WHIP+WHEP expects a HTTP Status Code of 201 + w.WriteHeader(http.StatusCreated) + + // Write Answer with Candidates as HTTP Response + fmt.Fprint(w, peerConn.LocalDescription().SDP) //nolint: errcheck +} + +func (l *Listener) whipDeleteHandler(w http.ResponseWriter, r *http.Request) { + l.log.Infof("New WHIP DELETE request from client %s for resource id %q", + r.RemoteAddr, r.PathValue("resourceId")) + + resourceId := r.PathValue("resourceId") + if resourceId == "" { + http.Error(w, "Empty resource id", http.StatusBadRequest) + return + } + + l.lock.Lock() + conn, ok := l.conns[resourceId] + l.lock.Unlock() + + if !ok { + http.Error(w, "Unknown resource id", http.StatusNotFound) + return + } + + l.log.Infof("Deleting connection with resource id %q", resourceId) + + if err := conn.Close(); err != nil { + http.Error(w, fmt.Sprintf("Failed to close connection: %s", err.Error()), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, fmt.Sprintf("Resource %s deleted", resourceId)) //nolint +} + +func (_ *Listener) Addr() net.Addr { + return nil +} + +func (l *Listener) GetConns() []*ListenerConn { + l.lock.Lock() + defer l.lock.Unlock() + ret := []*ListenerConn{} + for _, c := range l.conns { + ret = append(ret, c) + } + return ret +} + +type ListenerConn struct { + ResourceUrl string + listener *Listener + PeerConn *webrtc.PeerConnection + dataChan *webrtc.DataChannel + DataConn datachannel.ReadWriteCloser + started, closed bool + log logging.LeveledLogger +} + +func (c *ListenerConn) Close() error { + c.log.Tracef("Closing WHIP listener connection %s", c.String()) + + if c.closed { + return nil + } + c.closed = true + + // Close the datachannel + var err error + if c.dataChan != nil && c.dataChan.ReadyState() == webrtc.DataChannelStateOpen { + if err = c.DataConn.Close(); err != nil { + c.log.Debugf("Error closing DataChannel: %s", err.Error()) + } + } + + // Close the peer connection + err = c.PeerConn.Close() + if err != nil { + c.log.Debugf("Error closing PeerConnection: %s", err.Error()) + } + + if c.started { + c.listener.lock.Lock() + delete(c.listener.conns, c.String()) + c.listener.lock.Unlock() + } + + // Return the last error + return err +} + +func (c *ListenerConn) Read(b []byte) (int, error) { + return c.DataConn.Read(b) +} + +func (c *ListenerConn) Write(b []byte) (int, error) { + return c.DataConn.Write(b) +} + +// TODO: implement +func (c *ListenerConn) LocalAddr() net.Addr { return nil } +func (c *ListenerConn) RemoteAddr() net.Addr { return nil } +func (c *ListenerConn) SetDeadline(t time.Time) error { return nil } +func (c *ListenerConn) SetReadDeadline(t time.Time) error { return nil } +func (c *ListenerConn) SetWriteDeadline(t time.Time) error { return nil } + +// String returns a unique identifier for the connection based on the underlying signaling connection. +func (c *ListenerConn) String() string { + return c.ResourceUrl +} diff --git a/pkg/whipconn/whipconn_test.go b/pkg/whipconn/whipconn_test.go new file mode 100644 index 00000000..2befaf4b --- /dev/null +++ b/pkg/whipconn/whipconn_test.go @@ -0,0 +1,374 @@ +package whipconn + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "sync" + "testing" + "time" + + "github.com/pion/logging" + "github.com/pion/webrtc/v4" + "github.com/stretchr/testify/assert" + + slogger "github.com/l7mp/stunner/pkg/logger" +) + +var ( + testerLogLevel = "all:ERROR" + // testerLogLevel = "all:TRACE" + // testerLogLevel = "all:INFO" + addr = "localhost:12345" + timeout = 5 * time.Second + interval = 50 * time.Millisecond + defaultConfig = Config{BearerToken: "whiptoken"} + logger logging.LoggerFactory = slogger.NewLoggerFactory(testerLogLevel) + log logging.LeveledLogger = logger.NewLogger("test") +) + +func echoTest(t *testing.T, conn net.Conn, content string) { + t.Helper() + + n, err := conn.Write([]byte(content)) + assert.NoError(t, err) + assert.Equal(t, len(content), n) + + buf := make([]byte, 2048) + n, err = conn.Read(buf) + assert.NoError(t, err) + assert.Equal(t, content, string(buf[:n])) +} + +var testerTestCases = []struct { + name string + config *Config + tester func(t *testing.T, ctx context.Context, l *Listener) +}{ + { + name: "Basic connectivity", + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + d := NewDialer(defaultConfig, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(ctx, addr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1") + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2") + + assert.NoError(t, clientConn.Close(), "client conn close") + }, + }, { + name: "Invalid bearer token refused", + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + d := NewDialer(Config{BearerToken: "dummy-token"}, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + _, err := d.DialContext(ctx, addr) + assert.Error(t, err) + }, + }, { + name: "Empty bearer token accepted", + config: &Config{BearerToken: ""}, + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + config := defaultConfig + config.BearerToken = "" + d := NewDialer(config, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(ctx, addr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1") + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2") + + assert.NoError(t, clientConn.Close(), "client conn close") + }, + }, { + name: "Closing dialer does not close client connection", + tester: func(t *testing.T, serverCtx context.Context, l *Listener) { + // a new context for the dialer + dialerCtx, dialerCancel := context.WithCancel(context.Background()) + + log.Debug("Creating dialer") + d := NewDialer(defaultConfig, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(dialerCtx, addr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1") + + log.Debug("Closing dialer") + dialerCancel() + + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2") + }, + }, { + name: "Client side close closes server", + tester: func(t *testing.T, serverCtx context.Context, l *Listener) { + log.Debug("Creating dialer") + d := NewDialer(defaultConfig, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(serverCtx, addr) + assert.NoError(t, err) + + assert.Eventually(t, func() bool { return len(l.conns) == 1 }, timeout, interval) + + log.Debug("Closing client connection") + assert.NoError(t, clientConn.Close()) + + // should close the server conn too + assert.Eventually(t, func() bool { return len(l.conns) == 0 }, timeout, interval) + }, + }, { + name: "Server side close closes client", + tester: func(t *testing.T, serverCtx context.Context, l *Listener) { + clientCtx, clientCancel := context.WithCancel(context.Background()) + defer clientCancel() + + log.Debug("Creating dialer") + d := NewDialer(defaultConfig, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(clientCtx, addr) + assert.NoError(t, err) + + assert.Eventually(t, func() bool { return len(l.conns) == 1 }, timeout, interval) + + log.Debug("Closing server connections") + for _, lConn := range l.GetConns() { + assert.NoError(t, lConn.Close()) + } + + assert.Eventually(t, func() bool { return len(l.conns) == 0 }, timeout, interval) + + // should close the client conn too + assert.Eventually(t, func() bool { return clientConn.(*DialerConn).closed == true }, timeout, interval) + }, + }, { + name: "Multiple connections", + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + d := NewDialer(defaultConfig, logger) + assert.NotNil(t, d) + + log.Debug("Dialing: creating 5 connections") + var wg sync.WaitGroup + wg.Add(5) + connChan := make(chan net.Conn, 5) + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + + clientConn, err := d.DialContext(ctx, addr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1111") + + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2222") + + connChan <- clientConn + }() + } + + wg.Wait() + close(connChan) + + assert.Eventually(t, func() bool { return len(l.conns) == 5 }, timeout, interval) + + for c := range connChan { + c.Close() + } + + assert.Eventually(t, func() bool { return len(l.conns) == 0 }, timeout, interval) + }, + }, { + name: "Closing invalid resource fails", + tester: func(t *testing.T, ctx context.Context, l *Listener) { + uri := fmt.Sprintf("http://%s/whip/dummy-id", addr) + req, err := http.NewRequest("DELETE", uri, nil) + assert.NoError(t, err) + req.Header.Add("Authorization", "Bearer "+defaultConfig.BearerToken) + + r, err := http.DefaultClient.Do(req) + assert.NoError(t, err) + assert.Equal(t, http.StatusNotFound, r.StatusCode) + }, + }, { + name: "Connecting with custom path", + config: &Config{WHIPEndpoint: "/custompath"}, + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + d := NewDialer(Config{WHIPEndpoint: "/custompath"}, logger) + assert.NotNil(t, d) + + log.Debug("Dialing") + clientConn, err := d.DialContext(ctx, addr) + assert.NoError(t, err) + + log.Debug("Echo test round 1") + echoTest(t, clientConn, "test1") + log.Debug("Echo test round 2") + echoTest(t, clientConn, "test2") + + assert.NoError(t, clientConn.Close(), "client conn close") + }, + }, { + name: "Failed datachannel connection fails Dial", + tester: func(t *testing.T, ctx context.Context, l *Listener) { + log.Debug("Creating dialer") + // empty ICE servers and forcing relay policy will fail the dialer + d := NewDialer(Config{ + ICEServers: []webrtc.ICEServer{}, + ICETransportPolicy: webrtc.ICETransportPolicyRelay, + BearerToken: "whiptoken", + }, logger) + assert.NotNil(t, d) + + // set agggressive timeouts + e := webrtc.SettingEngine{} + e.SetICETimeouts(500*time.Millisecond, 1000*time.Millisecond, time.Second) + d.WithSettingEngine(e) + + log.Debug("Dialing") + _, err := d.DialContext(ctx, addr) + assert.Error(t, err) + }, + }, +} + +func TestTesterConn(t *testing.T) { + for _, c := range testerTestCases { + var l *Listener + + t.Run(c.name, func(t *testing.T) { + log.Infof("--------------------- %s ----------------------", c.name) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + config := defaultConfig + if c.config != nil { + config = *c.config + } + + log.Debug("Creating listener") + listener, err := NewListener(addr, config, logger) + assert.NoError(t, err) + l = listener + assert.NotNil(t, l) + + log.Debug("Creating echo services") + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + + log.Debug("Accepting server connection") + + // readloop + go func() { + buf := make([]byte, 100) + for { + n, err := conn.Read(buf) + if err != nil { + return + } + + _, err = conn.Write(buf[:n]) + assert.NoError(t, err) + } + }() + } + }() + + c.tester(t, ctx, l) + + l.Close() //nolint + }) + } +} + +func TestConfigEndpoint(t *testing.T) { + log.Debug("Creating listener") + l, err := NewListener(addr, defaultConfig, logger) + assert.NoError(t, err) + assert.NotNil(t, l) + + uri := "http://" + addr + "/config" + req, err := http.NewRequest(http.MethodGet, uri, nil) + req.Header.Add("Content-Type", "application/json") + assert.NoError(t, err) + res, err := http.DefaultClient.Do(req) + assert.NoError(t, err) + + config := Config{} + err = json.NewDecoder(res.Body).Decode(&config) + assert.NoError(t, err) + assert.Equal(t, defaultConfig.ICEServers, config.ICEServers) + assert.Equal(t, defaultConfig.ICETransportPolicy, config.ICETransportPolicy) + assert.Equal(t, defaultConfig.BearerToken, config.BearerToken) + assert.Equal(t, WhipEndpoint, config.WHIPEndpoint) + + ss := []webrtc.ICEServer{{ + URLs: []string{"a", "b"}, + Username: "test-user-1", + Credential: "test-passwd-1r", + }, { + URLs: []string{"c", "d"}, + Username: "test-user-2", + Credential: "test-passwd-2", + }} + + config = Config{ + ICEServers: ss, + ICETransportPolicy: webrtc.ICETransportPolicyRelay, + BearerToken: "some-token", + WHIPEndpoint: "will-be-ignored", + } + + b, err := json.Marshal(config) + assert.NoError(t, err) + _, err = http.Post(uri, "application/json", bytes.NewReader(b)) + assert.NoError(t, err) + + req, err = http.NewRequest(http.MethodGet, uri, nil) + req.Header.Add("Content-Type", "application/json") + assert.NoError(t, err) + res, err = http.DefaultClient.Do(req) + assert.NoError(t, err) + + newConfig := Config{} + err = json.NewDecoder(res.Body).Decode(&newConfig) + assert.NoError(t, err) + assert.Equal(t, config.ICEServers, newConfig.ICEServers) + assert.Equal(t, config.ICETransportPolicy, newConfig.ICETransportPolicy) + assert.Equal(t, config.BearerToken, newConfig.BearerToken) + assert.Equal(t, WhipEndpoint, newConfig.WHIPEndpoint) + + l.Close() //nolint +} diff --git a/reconcile.go b/reconcile.go index 6a41143b..20c4fa5b 100644 --- a/reconcile.go +++ b/reconcile.go @@ -4,23 +4,22 @@ import ( "fmt" "github.com/l7mp/stunner/internal/object" - - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // Reconcile handles updates to the STUNner configuration. Some updates are destructive: in this // case the returned error contains the names of the objects (usually, listeners) that were // restarted during reconciliation (see the documentation of the corresponding STUNner objects for // when STUNner may restart after a reconciliation). Reconcile returns nil no objects were -// restarted, v1alpha1.ErrRestarted to indicate that a shutdown-restart cycle was performed for at +// restarted, v1.ErrRestarted to indicate that a shutdown-restart cycle was performed for at // least one internal object (usually, a listener) for the new config (unless DryRun is enabled), // and an error if an error has occurred during reconciliation, in which case it will rollback the // last working configuration (unless SuppressRollback is on). -func (s *Stunner) Reconcile(req v1alpha1.StunnerConfig) error { +func (s *Stunner) Reconcile(req *stnrv1.StunnerConfig) error { return s.reconcileWithRollback(req, false) } -func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback bool) error { +func (s *Stunner) reconcileWithRollback(req *stnrv1.StunnerConfig, inRollback bool) error { var errFinal error new, deleted, changed := 0, 0, 0 @@ -28,13 +27,13 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b return err } - s.log.Debugf("reconciling STUNner for config: %s ", req.String()) + s.log.Debugf("Reconciling STUNner for config: %s ", req.String()) rollback := s.GetConfig() toBeStarted, toBeRestarted := []object.Object{}, []object.Object{} // admin - adminState, err := s.adminManager.PrepareReconciliation([]v1alpha1.Config{&req.Admin}, &req) + adminState, err := s.adminManager.PrepareReconciliation([]stnrv1.Config{&req.Admin}, req) if err != nil { return fmt.Errorf("error preparing reconciliation for admin config: %s", err.Error()) @@ -45,7 +44,7 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b deleted += len(adminState.DeletedJobQueue) // auth - authState, err := s.authManager.PrepareReconciliation([]v1alpha1.Config{&req.Auth}, &req) + authState, err := s.authManager.PrepareReconciliation([]stnrv1.Config{&req.Auth}, req) if err != nil { return fmt.Errorf("error preparing reconciliation for auth config: %s", err.Error()) @@ -56,11 +55,11 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b deleted += len(authState.DeletedJobQueue) // listener - lconf := make([]v1alpha1.Config, len(req.Listeners)) + lconf := make([]stnrv1.Config, len(req.Listeners)) for i := range req.Listeners { lconf[i] = &(req.Listeners[i]) } - listenerState, err := s.listenerManager.PrepareReconciliation(lconf, &req) + listenerState, err := s.listenerManager.PrepareReconciliation(lconf, req) if err != nil { return fmt.Errorf("error preparing reconciliation for listener config: %s", err.Error()) } @@ -70,11 +69,11 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b deleted += len(listenerState.DeletedJobQueue) // cluster - cconf := make([]v1alpha1.Config, len(req.Clusters)) + cconf := make([]stnrv1.Config, len(req.Clusters)) for i := range req.Clusters { cconf[i] = &(req.Clusters[i]) } - clusterState, err := s.clusterManager.PrepareReconciliation(cconf, &req) + clusterState, err := s.clusterManager.PrepareReconciliation(cconf, req) if err != nil { return fmt.Errorf("error preparing reconciliation for cluster config: %s", err.Error()) } @@ -86,7 +85,7 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b // find all objects (listeners) to be restarted and stop each if !s.dryRun { if err := s.stop(toBeRestarted); err != nil { - s.log.Errorf("could not stop object: %s", err.Error()) + s.log.Errorf("Could not stop object: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -95,13 +94,13 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b } } - s.log.Tracef("reconciliation preparation ready") + s.log.Tracef("Reconciliation preparation ready") // finish reconciliation // admin err = s.adminManager.FinishReconciliation(adminState) if err != nil { - s.log.Errorf("could not reconcile admin config: %s", err.Error()) + s.log.Errorf("Could not reconcile admin config: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -110,13 +109,13 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b } toBeStarted = append(toBeStarted, adminState.ToBeStarted...) - s.log.Infof("setting loglevel to %q", s.GetAdmin().LogLevel) + s.log.Infof("Setting loglevel to %q", s.GetAdmin().LogLevel) s.logger.SetLevel(s.GetAdmin().LogLevel) // auth err = s.authManager.FinishReconciliation(authState) if err != nil { - s.log.Errorf("could not reconcile auth config: %s", err.Error()) + s.log.Errorf("Could not reconcile auth config: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -128,7 +127,7 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b // listener err = s.listenerManager.FinishReconciliation(listenerState) if err != nil { - s.log.Errorf("could not reconcile listener config: %s", err.Error()) + s.log.Errorf("Could not reconcile listener config: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -138,13 +137,13 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b toBeStarted = append(toBeStarted, listenerState.ToBeStarted...) if len(s.listenerManager.Keys()) == 0 { - s.log.Warn("running with no listeners") + s.log.Warn("Running with no listeners") } // cluster err = s.clusterManager.FinishReconciliation(clusterState) if err != nil { - s.log.Errorf("could not reconcile cluster config: %s", err.Error()) + s.log.Errorf("Could not reconcile cluster config: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -154,13 +153,13 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b toBeStarted = append(toBeStarted, clusterState.ToBeStarted...) if len(s.clusterManager.Keys()) == 0 { - s.log.Warn("running with no clusters: all traffic will be dropped") + s.log.Warn("Running with no clusters: all traffic will be dropped") } // find all objects (listeners) to be started or restarted and start each if !s.dryRun { if err := s.start(toBeStarted, toBeRestarted); err != nil { - s.log.Errorf("could not start object: %s", err.Error()) + s.log.Errorf("Could not start object: %s", err.Error()) errFinal = err if !inRollback { goto rollback @@ -174,11 +173,11 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b s.ready = true } - s.log.Infof("reconciliation ready: new objects: %d, changed objects: %d, "+ + s.log.Infof("Reconciliation ready: new objects: %d, changed objects: %d, "+ "deleted objects: %d, started objects: %d, restarted objects: %d", new, changed, deleted, len(toBeStarted), len(toBeRestarted)) - s.log.Info(s.Status()) + s.log.Infof("New dataplane status: %s", s.Status().String()) if len(toBeRestarted) > 0 { names := make([]string, len(toBeRestarted)) @@ -186,15 +185,15 @@ func (s *Stunner) reconcileWithRollback(req v1alpha1.StunnerConfig, inRollback b names[i] = fmt.Sprintf("%s: %s", n.ObjectType(), n.ObjectName()) } - return v1alpha1.ErrRestarted{Objects: names} + return stnrv1.ErrRestarted{Objects: names} } return nil rollback: if !s.suppressRollback { - s.log.Infof("rolling back to previous configuration: %s", rollback.String()) - return s.reconcileWithRollback(*rollback, true) + s.log.Infof("Rolling back to previous configuration: %s", rollback.String()) + return s.reconcileWithRollback(rollback, true) } return errFinal @@ -208,7 +207,7 @@ func (s *Stunner) stop(restarted []object.Object) error { return err } default: - s.log.Errorf("internal error: stop() is not implemented for object %q", + s.log.Errorf("Internal error: stop() is not implemented for object %q", o.ObjectName()) } } @@ -224,7 +223,7 @@ func (s *Stunner) start(started, restarted []object.Object) error { return err } default: - s.log.Errorf("internal error: start() is not implemented for object %q", + s.log.Errorf("Internal error: start() is not implemented for object %q", o.ObjectName()) } } diff --git a/reconcile_test.go b/reconcile_test.go index e8b21b73..89c02086 100644 --- a/reconcile_test.go +++ b/reconcile_test.go @@ -4,16 +4,17 @@ import ( "bytes" "fmt" "net" + // "strconv" "testing" "time" - "github.com/pion/transport/test" + "github.com/pion/transport/v3/test" "github.com/stretchr/testify/assert" "github.com/l7mp/stunner/internal/object" "github.com/l7mp/stunner/internal/resolver" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" a12n "github.com/l7mp/stunner/pkg/authentication" "github.com/l7mp/stunner/pkg/logger" ) @@ -30,30 +31,30 @@ const ( // ***************** type StunnerReconcileTestConfig struct { name string - config v1alpha1.StunnerConfig + config stnrv1.StunnerConfig tester func(t *testing.T, s *Stunner, err error) } var testReconcileDefault = []StunnerReconcileTestConfig{ { name: "reconcile-test: default admin", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -63,23 +64,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() - assert.Equal(t, admin.Name, v1alpha1.DefaultStunnerName, "stunner name") + assert.Equal(t, admin.Name, stnrv1.DefaultStunnerName, "stunner name") // make sure we get the right loglevel, we may override this for debugging the tests - // assert.Equal(t, admin.LogLevel, v1alpha1.DefaultLogLevel, "stunner loglevel") + // assert.Equal(t, admin.LogLevel, stnrv1.DefaultLogLevel, "stunner loglevel") assert.Len(t, s.authManager.Keys(), 1, "authManager keys") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "user", "username ok") assert.Equal(t, auth.Password, "pass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("user", v1alpha1.DefaultRealm, + key, ok := handler("user", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("user", - v1alpha1.DefaultRealm, "pass"), "auth handler ok") + stnrv1.DefaultRealm, "pass"), "auth handler ok") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -87,11 +88,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -100,10 +99,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -124,22 +122,22 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty credentials errs: user", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -150,22 +148,22 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty credentials errs: passwd", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -176,19 +174,19 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty listener is fine", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{{ + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -200,22 +198,22 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty listener name errs", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -226,23 +224,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty cluster is fine", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, tester: func(t *testing.T, s *Stunner, err error) { assert.NoError(t, err, "no restart needed") @@ -250,23 +248,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty cluster name errs", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Endpoints: []string{"0.0.0.0/0"}, }}, }, @@ -278,24 +276,24 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ /// admin { name: "reconcile-test: reconcile name", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ Name: "new-name", LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -308,21 +306,21 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() assert.Equal(t, admin.Name, "new-name", "stunner name") - // assert.Equal(t, admin.LogLevel, v1alpha1.DefaultLogLevel, "stunner loglevel") + // assert.Equal(t, admin.LogLevel, stnrv1.DefaultLogLevel, "stunner loglevel") assert.Len(t, s.authManager.Keys(), 1, "authManager keys") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "user", "username ok") assert.Equal(t, auth.Password, "pass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("user", v1alpha1.DefaultRealm, + key, ok := handler("user", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("user", - v1alpha1.DefaultRealm, "pass"), "auth handler ok") + stnrv1.DefaultRealm, "pass"), "auth handler ok") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -330,11 +328,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -343,10 +339,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -366,23 +361,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile loglevel", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: "anything", }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -398,17 +393,17 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.Len(t, s.authManager.Keys(), 1, "authManager keys") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "user", "username ok") assert.Equal(t, auth.Password, "pass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("user", v1alpha1.DefaultRealm, + key, ok := handler("user", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("user", - v1alpha1.DefaultRealm, "pass"), "auth handler ok") + stnrv1.DefaultRealm, "pass"), "auth handler ok") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -416,11 +411,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -429,10 +422,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -452,24 +444,24 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile metrics_endpoint", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: "anything", MetricsEndpoint: "http://0.0.0.0:8080/metrics", }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -482,23 +474,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() assert.Equal(t, admin.Name, "default-stunnerd", "stunner name") - // assert.Equal(t, admin.LogLevel, v1alpha1.DefaultLogLevel, "stunner loglevel") + // assert.Equal(t, admin.LogLevel, stnrv1.DefaultLogLevel, "stunner loglevel") assert.Equal(t, admin.MetricsEndpoint, "http://0.0.0.0:8080/metrics", "stunner metrics endpoint") assert.Len(t, s.authManager.Keys(), 1, "authManager keys") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "user", "username ok") assert.Equal(t, auth.Password, "pass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("user", v1alpha1.DefaultRealm, + key, ok := handler("user", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("user", - v1alpha1.DefaultRealm, "pass"), "auth handler ok") + stnrv1.DefaultRealm, "pass"), "auth handler ok") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -506,11 +498,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -519,10 +509,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -542,24 +531,24 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, /// auth { - name: "reconcile-test: reconcile plaintextauth name", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + name: "reconcile-test: reconcile staticauth name", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "newuser", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -569,21 +558,21 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NoError(t, err, "no restart needed") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "newuser", "username ok") assert.Equal(t, auth.Password, "pass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("newuser", v1alpha1.DefaultRealm, + key, ok := handler("newuser", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("newuser", - v1alpha1.DefaultRealm, "pass"), "auth handler ok") + stnrv1.DefaultRealm, "pass"), "auth handler ok") assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() - assert.Equal(t, admin.Name, v1alpha1.DefaultStunnerName, "stunner name") + assert.Equal(t, admin.Name, stnrv1.DefaultStunnerName, "stunner name") // assert.Equal(t, admin.LogLevel, "anything", "stunner loglevel") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -592,11 +581,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -605,10 +592,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -627,24 +613,24 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, }, { - name: "reconcile-test: reconcile plaintext auth passwd", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + name: "reconcile-test: reconcile static auth passwd", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "newpass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -654,21 +640,21 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NoError(t, err, "no restart needed") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypePlainText, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeStatic, "auth type ok") assert.Equal(t, auth.Username, "user", "username ok") assert.Equal(t, auth.Password, "newpass", "password ok") handler := s.NewAuthHandler() - key, ok := handler("user", v1alpha1.DefaultRealm, + key, ok := handler("user", stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") assert.Equal(t, key, a12n.GenerateAuthKey("user", - v1alpha1.DefaultRealm, "newpass"), "auth handler ok") + stnrv1.DefaultRealm, "newpass"), "auth handler ok") assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() - assert.Equal(t, admin.Name, v1alpha1.DefaultStunnerName, "stunner name") + assert.Equal(t, admin.Name, stnrv1.DefaultStunnerName, "stunner name") // assert.Equal(t, admin.LogLevel, "anything", "stunner loglevel") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -677,11 +663,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -690,10 +674,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -712,24 +695,24 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, }, { - name: "reconcile-test: reconcile longterm auth", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + name: "reconcile-test: reconcile ephemeral auth", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "newsecret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -739,7 +722,7 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NoError(t, err, "no restart needed") auth := s.GetAuth() - assert.Equal(t, auth.Type, v1alpha1.AuthTypeLongTerm, "auth type ok") + assert.Equal(t, auth.Type, stnrv1.AuthTypeEphemeral, "auth type ok") assert.Equal(t, auth.Secret, "newsecret") duration, _ := time.ParseDuration("10h") @@ -748,16 +731,16 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NoError(t, err, "GetLongTermCredential") handler := s.NewAuthHandler() - key, ok := handler(username, v1alpha1.DefaultRealm, + key, ok := handler(username, stnrv1.DefaultRealm, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}) assert.True(t, ok, "authHandler key ok") - key2 := a12n.GenerateAuthKey(username, v1alpha1.DefaultRealm, passwd) + key2 := a12n.GenerateAuthKey(username, stnrv1.DefaultRealm, passwd) assert.Equal(t, key, key2, "authHandler key matches") assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() - assert.Equal(t, admin.Name, v1alpha1.DefaultStunnerName, "stunner name") + assert.Equal(t, admin.Name, stnrv1.DefaultStunnerName, "stunner name") // assert.Equal(t, admin.LogLevel, "anything", "stunner loglevel") assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") @@ -766,11 +749,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") @@ -779,10 +760,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the open cluster for routing @@ -803,27 +783,25 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ /// listener { name: "reconcile-test: reconcile existing listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ - Name: "default-listener", - Protocol: "tcp", - Addr: "127.0.0.2", - Port: 12345, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, + Listeners: []stnrv1.ListenerConfig{{ + Name: "default-listener", + Protocol: "turn-tcp", + Addr: "127.0.0.2", + Port: 12345, + Routes: []string{"none", "dummy"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -831,7 +809,7 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ tester: func(t *testing.T, s *Stunner, err error) { // requires a restart! assert.Error(t, err, "restarted") - e, ok := err.(v1alpha1.ErrRestarted) + e, ok := err.(stnrv1.ErrRestarted) assert.True(t, ok, "restarted status") assert.Len(t, e.Objects, 1, "restarted object") assert.Contains(t, e.Objects, "listener: default-listener") @@ -842,11 +820,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTCP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTCP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.2", "listener address ok") assert.Equal(t, l.Port, 12345, "listener port ok") - assert.Equal(t, l.MinPort, 10, "listener minport ok") - assert.Equal(t, l.MaxPort, 100, "listener maxport ok") assert.Len(t, l.Routes, 2, "listener route count ok") // sorted!!! assert.Equal(t, l.Routes[0], "dummy", "listener route name ok") @@ -854,7 +830,7 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.Len(t, s.adminManager.Keys(), 1, "adminManager keys") admin := s.GetAdmin() - assert.Equal(t, admin.Name, v1alpha1.DefaultStunnerName, "stunner name") + assert.Equal(t, admin.Name, stnrv1.DefaultStunnerName, "stunner name") // assert.Equal(t, admin.LogLevel, "anything", "stunner loglevel") assert.Len(t, s.clusterManager.Keys(), 1, "clusterManager keys") @@ -862,10 +838,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the old cluster for routing @@ -885,27 +860,25 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile new listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ - Name: "newlistener", - Protocol: "tcp", - Addr: "127.0.0.2", - Port: 1, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, + Listeners: []stnrv1.ListenerConfig{{ + Name: "newlistener", + Protocol: "turn-tcp", + Addr: "127.0.0.2", + Port: 1, + Routes: []string{"none", "dummy"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -923,11 +896,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTCP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTCP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.2", "listener address ok") assert.Equal(t, l.Port, 1, "listener port ok") - assert.Equal(t, l.MinPort, 10, "listener minport ok") - assert.Equal(t, l.MaxPort, 100, "listener maxport ok") assert.Len(t, l.Routes, 2, "listener route count ok") // sorted! assert.Equal(t, l.Routes[0], "dummy", "listener route name ok") @@ -936,10 +907,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the old cluster for routing @@ -959,27 +929,25 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: empty TLS credentials errs", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ - Name: "newlistener", - Protocol: "tls", - Addr: "127.0.0.2", - Port: 1, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, + Listeners: []stnrv1.ListenerConfig{{ + Name: "newlistener", + Protocol: "turn-tls", + Addr: "127.0.0.2", + Port: 1, + Routes: []string{"none", "dummy"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -990,31 +958,29 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile additional listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }, { - Name: "newlistener", - Protocol: "tcp", - Addr: "127.0.0.2", - Port: 1, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, - }}, - Clusters: []v1alpha1.ClusterConfig{{ + Name: "newlistener", + Protocol: "turn-tcp", + Addr: "127.0.0.2", + Port: 1, + Routes: []string{"none", "dummy"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -1028,21 +994,18 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ l := s.GetListener("default-listener") assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolUDP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the old cluster for routing @@ -1063,11 +1026,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTCP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTCP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.2", "listener address ok") assert.Equal(t, l.Port, 1, "listener port ok") - assert.Equal(t, l.MinPort, 10, "listener minport ok") - assert.Equal(t, l.MaxPort, 100, "listener maxport ok") assert.Len(t, l.Routes, 2, "listener route count ok") // sorted! assert.Equal(t, l.Routes[0], "dummy", "listener route name ok") @@ -1090,34 +1051,32 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile existing listener with TLS cert and add a new one", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", - Protocol: "DTLS", + Protocol: "TURN-DTLS", Cert: dummyCert64, Key: dummyKey64, Routes: []string{"allow-any"}, }, { - Name: "newlistener", - Protocol: "tcp", - Addr: "127.0.0.2", - Port: 1, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, - }}, - Clusters: []v1alpha1.ClusterConfig{{ + Name: "newlistener", + Protocol: "turn-tcp", + Addr: "127.0.0.2", + Port: 1, + Routes: []string{"none", "dummy"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -1125,7 +1084,7 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ tester: func(t *testing.T, s *Stunner, err error) { // default-listener restarts assert.Error(t, err, "restarted") - e, ok := err.(v1alpha1.ErrRestarted) + e, ok := err.(stnrv1.ErrRestarted) assert.True(t, ok, "restarted status") assert.Len(t, e.Objects, 1, "restarted object") assert.Contains(t, e.Objects, "listener: default-listener") @@ -1135,23 +1094,20 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ l := s.GetListener("default-listener") assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolDTLS, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNDTLS, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") assert.Equal(t, bytes.Compare(l.Cert, []byte("dummy-cert")), 0, "listener cert ok") assert.Equal(t, bytes.Compare(l.Key, []byte("dummy-key")), 0, "listener key ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the old cluster for routing @@ -1172,11 +1128,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTCP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTCP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.2", "listener address ok") assert.Equal(t, l.Port, 1, "listener port ok") - assert.Equal(t, l.MinPort, 10, "listener minport ok") - assert.Equal(t, l.MaxPort, 100, "listener maxport ok") assert.Len(t, l.Routes, 2, "listener route count ok") // sorted! assert.Equal(t, l.Routes[0], "dummy", "listener route name ok") @@ -1199,34 +1153,32 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile existing listener with TLS cert and add a new one", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", - Protocol: "TLS", + Protocol: "TURN-TLS", Cert: dummyCert64, Key: dummyKey64, Routes: []string{"allow-any"}, }, { - Name: "newlistener", - Protocol: "tcp", - Addr: "127.0.0.2", - Port: 1, - MinRelayPort: 10, - MaxRelayPort: 100, - Routes: []string{"none", "dummy"}, - }}, - Clusters: []v1alpha1.ClusterConfig{{ + Name: "newlistener", + Protocol: "turn-tcp", + Addr: "127.0.0.2", + Port: 1, + Routes: []string{"none", "dummy"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -1234,7 +1186,7 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ tester: func(t *testing.T, s *Stunner, err error) { // default-listener restarts assert.Error(t, err, "restarted") - e, ok := err.(v1alpha1.ErrRestarted) + e, ok := err.(stnrv1.ErrRestarted) assert.True(t, ok, "restarted status") assert.Len(t, e.Objects, 1, "restarted object") assert.Contains(t, e.Objects, "listener: default-listener") @@ -1244,23 +1196,20 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ l := s.GetListener("default-listener") assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTLS, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTLS, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") assert.Equal(t, bytes.Compare(l.Cert, []byte("dummy-cert")), 0, "listener cert ok") assert.Equal(t, bytes.Compare(l.Key, []byte("dummy-key")), 0, "listener key ok") - assert.Equal(t, l.Port, v1alpha1.DefaultPort, "listener port ok") - assert.Equal(t, l.MinPort, v1alpha1.DefaultMinRelayPort, "listener minport ok") - assert.Equal(t, l.MaxPort, v1alpha1.DefaultMaxRelayPort, "listener maxport ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") assert.Len(t, l.Routes, 1, "listener route count ok") assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") // listener uses the old cluster for routing @@ -1281,11 +1230,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ assert.NotNil(t, l, "listener found") assert.IsType(t, l, &object.Listener{}, "listener type ok") - assert.Equal(t, l.Proto, v1alpha1.ListenerProtocolTCP, "listener proto ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNTCP, "listener proto ok") assert.Equal(t, l.Addr.String(), "127.0.0.2", "listener address ok") assert.Equal(t, l.Port, 1, "listener port ok") - assert.Equal(t, l.MinPort, 10, "listener minport ok") - assert.Equal(t, l.MaxPort, 100, "listener maxport ok") assert.Len(t, l.Routes, 2, "listener route count ok") // sorted! assert.Equal(t, l.Routes[0], "dummy", "listener route name ok") @@ -1306,21 +1253,73 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, }, + { + name: "reconcile-test: reconcile existing listener with new public IP and port", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Credentials: map[string]string{ + "username": "user", + "password": "pass", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "default-listener", + Addr: "127.0.0.1", + Protocol: "TURN-UDP", + PublicAddr: "127.0.0.2", + PublicPort: 33478, + Routes: []string{"allow-any"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "allow-any", + Endpoints: []string{"0.0.0.0/0"}, + }}, + }, + tester: func(t *testing.T, s *Stunner, err error) { + // does not require a restart! + assert.NoError(t, err, "restart") + + assert.Len(t, s.listenerManager.Keys(), 1, "listenerManager keys") + + l := s.GetListener("default-listener") + assert.NotNil(t, l, "listener found") + assert.IsType(t, l, &object.Listener{}, "listener type ok") + assert.Equal(t, l.Proto, stnrv1.ListenerProtocolTURNUDP, "listener proto ok") + assert.Equal(t, l.Addr.String(), "127.0.0.1", "listener address ok") + assert.Equal(t, l.Port, stnrv1.DefaultPort, "listener port ok") + assert.Equal(t, l.PublicAddr, "127.0.0.2", "listener public address ok") + assert.Equal(t, l.PublicPort, 33478, "listener public port ok") + assert.Len(t, l.Routes, 1, "listener route count ok") + assert.Equal(t, l.Routes[0], "allow-any", "listener route name ok") + + c := s.GetCluster("allow-any") + assert.NotNil(t, c, "cluster found") + assert.IsType(t, c, &object.Cluster{}, "cluster type ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") + assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") + _, n, _ := net.ParseCIDR("0.0.0.0/0") + assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") + }, + }, { name: "reconcile-test: reconcile deleted listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{{ + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -1342,23 +1341,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ /// cluster { name: "reconcile-test: reconcile existing cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"1.1.1.1", "2.2.2.2/8"}, }}, @@ -1371,13 +1370,10 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") - _, n, _ := net.ParseCIDR("1.1.1.1/32") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") - assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") - _, n, _ = net.ParseCIDR("2.2.2.2/8") - assert.IsType(t, c.Endpoints[1], *n, "cluster endpoint type ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1", "cluster endpoint ok") + _, n, _ := net.ParseCIDR("2.2.2.2/8") assert.Equal(t, c.Endpoints[1].String(), n.String(), "cluster endpoint ok") l := s.GetListener("default-listener") @@ -1398,23 +1394,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile new cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "newcluster", Endpoints: []string{"1.1.1.1", "2.2.2.2/8"}, }}, @@ -1430,13 +1426,10 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c = s.GetCluster("newcluster") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") - _, n, _ := net.ParseCIDR("1.1.1.1/32") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") - assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") - _, n, _ = net.ParseCIDR("2.2.2.2/8") - assert.IsType(t, c.Endpoints[1], *n, "cluster endpoint type ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1", "cluster endpoint ok") + _, n, _ := net.ParseCIDR("2.2.2.2/8") assert.Equal(t, c.Endpoints[1].String(), n.String(), "cluster endpoint ok") l := s.GetListener("default-listener") @@ -1456,25 +1449,81 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ net.ParseIP("3.0.0.0")), "route to 3.0.0.0 fails") }, }, + { + name: "reconcile-test: reconcile cluster with port range", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Credentials: map[string]string{ + "username": "user", + "password": "pass", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "default-listener", + Addr: "127.0.0.1", + Routes: []string{"allow-any"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "newcluster", + Endpoints: []string{"1.1.1.1:<1-2>", "2.2.2.2/8:<3-4>"}, + }}, + }, + tester: func(t *testing.T, s *Stunner, err error) { + assert.NoError(t, err, err) + + assert.Len(t, s.clusterManager.Keys(), 1, "clusterManager keys") + + c := s.GetCluster("allow-any") + assert.Nil(t, c, "cluster found") + + c = s.GetCluster("newcluster") + assert.NotNil(t, c, "cluster found") + assert.IsType(t, c, &object.Cluster{}, "cluster type ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") + assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1:<1-2>", "cluster endpoint ok") + assert.Equal(t, c.Endpoints[1].String(), "2.0.0.0/8:<3-4>", "cluster endpoint ok") + + l := s.GetListener("default-listener") + p := s.NewPermissionHandler(l) + assert.NotNil(t, p, "permission handler exists") + + // listener still uses the old cluster for routing + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("1.1.1.1")), "route to 1.1.1.1 ok") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("1.1.1.2")), "route to 1.1.1.2 fails") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("2.2.2.2")), "route to 2.2.2.2 fails") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("2.128.3.3")), "route to 2.128.3.3 fails") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("3.0.0.0")), "route to 3.0.0.0 fails") + }, + }, { name: "reconcile-test: reconcile additional cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "newcluster", Endpoints: []string{"1.1.1.1", "2.2.2.2/8"}, }, { @@ -1490,10 +1539,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") l := s.GetListener("default-listener") @@ -1503,13 +1551,10 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c = s.GetCluster("newcluster") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") - _, n, _ = net.ParseCIDR("1.1.1.1/32") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") - assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1", "cluster endpoint ok") _, n, _ = net.ParseCIDR("2.2.2.2/8") - assert.IsType(t, c.Endpoints[1], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[1].String(), n.String(), "cluster endpoint ok") // listener still uses the old open cluster for routing @@ -1527,23 +1572,23 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ }, { name: "reconcile-test: reconcile additional cluster and reroute", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"newcluster"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "newcluster", Endpoints: []string{"1.1.1.1", "2.2.2.2/8"}, }, { @@ -1560,10 +1605,9 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c := s.GetCluster("allow-any") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") _, n, _ := net.ParseCIDR("0.0.0.0/0") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") l := s.GetListener("default-listener") @@ -1573,13 +1617,10 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ c = s.GetCluster("newcluster") assert.NotNil(t, c, "cluster found") assert.IsType(t, c, &object.Cluster{}, "cluster type ok") - assert.Equal(t, c.Type, v1alpha1.ClusterTypeStatic, "cluster mode ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") - _, n, _ = net.ParseCIDR("1.1.1.1/32") - assert.IsType(t, c.Endpoints[0], *n, "cluster endpoint type ok") - assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1", "cluster endpoint ok") _, n, _ = net.ParseCIDR("2.2.2.2/8") - assert.IsType(t, c.Endpoints[1], *n, "cluster endpoint type ok") assert.Equal(t, c.Endpoints[1].String(), n.String(), "cluster endpoint ok") assert.True(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, @@ -1594,25 +1635,111 @@ var testReconcileDefault = []StunnerReconcileTestConfig{ net.ParseIP("3.0.0.0")), "route to 3.0.0.0 fails") }, }, + { + name: "reconcile-test: reconcile port-range", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Credentials: map[string]string{ + "username": "user", + "password": "pass", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "default-listener", + Addr: "127.0.0.1", + Routes: []string{"newcluster"}, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "newcluster", + Endpoints: []string{"1.1.1.1:<1-2>", "2.2.2.2/8:<3-4>"}, + }, { + Name: "allow-any", + Endpoints: []string{"0.0.0.0/0"}, + }}, + }, + tester: func(t *testing.T, s *Stunner, err error) { + // only routes have changed, we shouldn't need a restart + assert.NoError(t, err, err) + + assert.Len(t, s.clusterManager.Keys(), 2, "clusterManager keys") + + c := s.GetCluster("allow-any") + assert.NotNil(t, c, "cluster found") + assert.IsType(t, c, &object.Cluster{}, "cluster type ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") + assert.Len(t, c.Endpoints, 1, "cluster endpoint count ok") + _, n, _ := net.ParseCIDR("0.0.0.0/0") + assert.Equal(t, c.Endpoints[0].String(), n.String(), "cluster endpoint ok") + + l := s.GetListener("default-listener") + p := s.NewPermissionHandler(l) + assert.NotNil(t, p, "permission handler exists") + + c = s.GetCluster("newcluster") + assert.NotNil(t, c, "cluster found") + assert.IsType(t, c, &object.Cluster{}, "cluster type ok") + assert.Equal(t, c.Type, stnrv1.ClusterTypeStatic, "cluster mode ok") + assert.Len(t, c.Endpoints, 2, "cluster endpoint count ok") + assert.Equal(t, c.Endpoints[0].String(), "1.1.1.1:<1-2>", "cluster endpoint ok") + assert.Equal(t, c.Endpoints[1].String(), "2.0.0.0/8:<3-4>", "cluster endpoint ok") + + assert.True(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("1.1.1.1")), "route to 1.1.1.1 ok") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("1.1.1.2")), "route to 1.1.1.2 fails") + assert.True(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("2.2.2.2")), "route to 2.2.2.2 ok") + assert.True(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("2.128.3.3")), "route to 2.128.3.3 ok") + assert.False(t, p(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + net.ParseIP("3.0.0.0")), "route to 3.0.0.0 fails") + + assert.True(t, c.Route(net.ParseIP("1.1.1.1")), "route to 1.1.1.1 ok") + assert.False(t, c.Route(net.ParseIP("1.1.1.2")), "route to 1.1.1.2 fails") + assert.True(t, c.Route(net.ParseIP("2.2.2.2")), "route to 2.2.2.2 ok") + assert.True(t, c.Route(net.ParseIP("2.128.3.3")), "route to 2.128.3.3 ok") + assert.False(t, c.Route(net.ParseIP("3.0.0.0")), "route to 3.0.0.0 fails") + + assert.True(t, c.Match(net.ParseIP("1.1.1.1"), 1), "match 1.1.1.1:1 ok") + assert.True(t, c.Match(net.ParseIP("1.1.1.1"), 2), "match 1.1.1.1:2 ok") + assert.False(t, c.Match(net.ParseIP("1.1.1.1"), 3), "match 1.1.1.1:3 fails") + + assert.False(t, c.Match(net.ParseIP("1.1.1.2"), 1), "match 1.1.1.2 fails") + + assert.True(t, c.Match(net.ParseIP("2.2.2.2"), 3), "match 2.2.2.2:3 ok") + assert.True(t, c.Match(net.ParseIP("2.2.2.2"), 4), "match 2.2.2.2:4 ok") + assert.False(t, c.Match(net.ParseIP("2.2.2.2"), 5), "match 2.2.2.2:4 fails") + + assert.True(t, c.Match(net.ParseIP("2.128.3.3"), 3), "match 2.128.3.3:3 ok") + assert.True(t, c.Match(net.ParseIP("2.128.3.3"), 4), "match 2.128.3.3:4 ok") + assert.False(t, c.Match(net.ParseIP("2.128.3.3"), 5), "match 2.128.3.3:5 ok") + + assert.False(t, c.Match(net.ParseIP("3.0.0.0"), 1), "match 3.0.0.0 fails") + }, + }, { name: "reconcile-test: reconcile deleted cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Addr: "127.0.0.1", Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, tester: func(t *testing.T, s *Stunner, err error) { assert.NoError(t, err, err) @@ -1656,7 +1783,6 @@ func TestStunnerReconcile(t *testing.T) { log.Debug("creating a stunnerd") conf, err := NewDefaultConfig("turn://user:pass@127.0.0.1:3478") assert.NoError(t, err, err) - conf.Admin.LogLevel = stunnerTestLoglevel log.Debug("creating a stunnerd") @@ -1667,7 +1793,7 @@ func TestStunnerReconcile(t *testing.T) { }) log.Debug("starting stunnerd") - assert.NoError(t, s.Reconcile(*conf), "starting server") + assert.NoError(t, s.Reconcile(conf), "starting server") runningConf := s.GetConfig() assert.NotNil(t, runningConf, "default stunner get config ok") @@ -1690,7 +1816,7 @@ func TestStunnerReconcile(t *testing.T) { assert.True(t, conf.DeepEqual(runningConf), "default stunner config ok") - err = s.Reconcile(c.config) + err = s.Reconcile(&c.config) c.tester(t, s, err) s.Close() @@ -1706,7 +1832,7 @@ func TestStunnerReconcile(t *testing.T) { type StunnerTestReconcileE2EConfig struct { testName string - config v1alpha1.StunnerConfig + config stnrv1.StunnerConfig echoServerAddr string bindSuccess, allocateSuccess, echoResult, restart bool } @@ -1753,14 +1879,14 @@ func testStunnerReconcileWithVNet(t *testing.T, testcases []StunnerTestReconcile }) log.Debug("starting stunnerd") - assert.NoError(t, s.Reconcile(*conf), "starting server") + assert.NoError(t, s.Reconcile(conf), "starting server") for _, c := range testcases { t.Run(c.testName, func(t *testing.T) { log.Debugf("-------------- Running test: %s -------------", c.testName) log.Debug("reconciling server") - err := s.Reconcile(c.config) + err := s.Reconcile(&c.config) if c.restart { assert.ErrorContains(t, err, "restart", "starting server") } else { @@ -1791,19 +1917,19 @@ func testStunnerReconcileWithVNet(t *testing.T, testcases []StunnerTestReconcile var testReconcileE2E = []StunnerTestReconcileE2EConfig{ { testName: "initial E2E reconcile test: empty server", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{}, + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, echoServerAddr: "1.2.3.5:5678", restart: false, @@ -1813,27 +1939,27 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "adding a listener at the wrong port", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3480, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, echoServerAddr: "1.2.3.5:5678", restart: false, @@ -1843,27 +1969,27 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "adding a cluster to a listener at the wrong port", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3480, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -1878,20 +2004,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "adding a listener at the right port", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -1899,14 +2025,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3480, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -1921,20 +2047,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "changing the port in the wrong listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -1942,14 +2068,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -1963,21 +2089,21 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ echoResult: true, }, { - testName: "changing plaintext credentials to a wrong passwd", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "changing static credentials to a wrong passwd", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "dummy", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -1985,14 +2111,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2006,21 +2132,21 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ echoResult: false, }, { - testName: "changing auth to longterm credentials errs", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "changing auth to ephemeral credentials errs", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "dummy", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2028,14 +2154,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2049,22 +2175,22 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ echoResult: false, }, { - testName: "reverting good plaintext credentials ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + testName: "reverting good static credentials ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Realm: "stunner.l7mp.io", Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2072,14 +2198,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2094,21 +2220,21 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "realm reset induces a server restart", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Realm: "dummy", Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2116,14 +2242,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2138,21 +2264,21 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "reverting the realm induces another server restart", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Realm: "stunner.l7mp.io", Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2160,14 +2286,14 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2182,20 +2308,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "adding a cluster to the wrong IP", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2204,7 +2330,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ @@ -2212,7 +2338,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2230,20 +2356,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "removing working cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2252,7 +2378,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ @@ -2260,7 +2386,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "dummy-cluster", Endpoints: []string{}, }}, @@ -2273,20 +2399,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "reintroducing good cluster to the wrong IP", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2295,7 +2421,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, }, { Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3479, Routes: []string{ @@ -2303,7 +2429,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2321,20 +2447,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "removing wrong listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2342,7 +2468,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2360,20 +2486,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "correct the wrong cluster and remove the good one", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2381,7 +2507,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.10", @@ -2401,20 +2527,20 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "removing wrong cluster and reverting the working one", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2422,7 +2548,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2437,27 +2563,27 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "removing dangling cluster ref", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2470,22 +2596,92 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ allocateSuccess: true, echoResult: true, }, + { + testName: "adding port range to cluster ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Credentials: map[string]string{ + "username": "user", + "password": "pass", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp-ok", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "echo-server-cluster", + Endpoints: []string{ + "1.2.3.5:<5678-5678>", + }, + }}, + }, + echoServerAddr: "1.2.3.5:5678", + restart: false, + bindSuccess: true, + allocateSuccess: true, + echoResult: true, + }, + { + testName: "extensing port range still ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Credentials: map[string]string{ + "username": "user", + "password": "pass", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp-ok", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "echo-server-cluster", + Endpoints: []string{ + "1.2.3.5:<1-10000>", + }, + }}, + }, + echoServerAddr: "1.2.3.5:5678", + restart: false, + bindSuccess: true, + allocateSuccess: true, + echoResult: true, + }, { testName: "converting cluster to strict dns", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -2493,7 +2689,7 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ "dummy-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STRICT_DNS", Endpoints: []string{ @@ -2509,27 +2705,27 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "rewiring to an open cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "open-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "open-cluster", Endpoints: []string{ "0.0.0.0/0", @@ -2544,27 +2740,27 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "closing open cluster", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-ok", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "open-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, echoServerAddr: "1.2.3.5:5678", restart: false, @@ -2574,19 +2770,19 @@ var testReconcileE2E = []StunnerTestReconcileE2EConfig{ }, { testName: "closing listener", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{}, + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{}, }, echoServerAddr: "1.2.3.5:5678", restart: false, @@ -2610,27 +2806,27 @@ var testReconcileRollback = map[string][]StunnerTestReconcileE2EConfig{ "reconcile protocol": { { testName: "base config", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -2644,29 +2840,32 @@ var testReconcileRollback = map[string][]StunnerTestReconcileE2EConfig{ echoResult: true, }, { - // tcp will fail on vnet: must rollback for the test to succeed - testName: "reconcile listener with a changed protocol", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + // this will trigger an error at a later stage of reconciliation that the + // validation phase cannot catch and cause a rollback + testName: "reconcile listener with an invalid TLS cert/key", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user", "password": "pass", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", - Protocol: "tcp", + Protocol: "turn-tls", Addr: "1.2.3.4", Port: 3478, + Key: "ZHVtbXkK", // base64: dummy + Cert: "ZHVtbXkK", // base64: dummy Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", diff --git a/relay.go b/relay.go new file mode 100644 index 00000000..5986113d --- /dev/null +++ b/relay.go @@ -0,0 +1,231 @@ +package stunner + +// code adopted from github.com/livekit/pkg/telemetry + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/pion/logging" + "github.com/pion/transport/v3" + "k8s.io/utils/lru" + + "github.com/l7mp/stunner/internal/object" + "github.com/l7mp/stunner/internal/telemetry" + "github.com/l7mp/stunner/pkg/logger" +) + +const ClusterCacheSize = 512 + +var ( + errNilConn = errors.New("cannot allocate relay connection") + errTodo = errors.New("relay to Net.Conn not implemented") +) + +var ( + ErrPortProhibited = errors.New("peer port administratively prohibited") + ErrInvalidPeerProtocol = errors.New("unknown peer transport protocol") +) + +type PortRangeChecker = func(addr net.Addr) (*object.Cluster, bool) + +// RelayGen can be used to only allocate connections inside a defined target port +// range. A static ip address can be set. +type RelayGen struct { + // Listener is the listener on behalf of which the relay address generator is created. + Listener *object.Listener + + // RelayAddress is the IP returned to the user when the relay is created. + RelayAddress net.IP + + // Address is passed to Listen/ListenPacket when creating the Relay. + Address string + + // ClusterCache is a cache that is used to couple relayed packets to clusters. + ClusterCache *lru.Cache + + // PortRangeChecker is a callback to check whether a peer address is allowed by any of the + // clusters of the listener. + PortRangeChecker PortRangeChecker + + // Net is a pion/transport VNet, used for testing. + Net transport.Net + + // Logger is a logger factory we can use to generate per-listener relay loggers. + Logger logger.LoggerFactory + + telemetry *telemetry.Telemetry +} + +func NewRelayGen(l *object.Listener, t *telemetry.Telemetry, logger logger.LoggerFactory) *RelayGen { + return &RelayGen{ + Listener: l, + RelayAddress: l.Addr, + Address: "0.0.0.0", + ClusterCache: lru.New(ClusterCacheSize), + Net: l.Net, + Logger: logger, + telemetry: t, + } +} + +// Validate is called on server startup and confirms the RelayAddressGenerator is properly configured. +func (r *RelayGen) Validate() error { + return nil +} + +// AllocatePacketConn generates a new transport relay connection and returns the IP/Port to be +// returned to the client in the allocation response. +func (r *RelayGen) AllocatePacketConn(network string, requestedPort int) (net.PacketConn, net.Addr, error) { + if requestedPort <= 1 || requestedPort > 2<<16-1 { + requestedPort = 0 + } + + conn, err := r.Net.ListenPacket(network, fmt.Sprintf("%s:%d", r.Address, requestedPort)) + if err != nil { + return nil, nil, err + } + + conn = NewPortRangePacketConn(conn, r.PortRangeChecker, r.telemetry, + r.Logger.NewLogger(fmt.Sprintf("relay-%s", r.Listener.Name))) + + relayAddr, ok := conn.LocalAddr().(*net.UDPAddr) + if !ok { + return nil, nil, errNilConn + } + + relayAddr.IP = r.RelayAddress + return conn, relayAddr, nil +} + +// AllocateConn generates a new Conn to receive traffic on and the IP/Port to populate the +// allocation response with +func (g *RelayGen) AllocateConn(network string, requestedPort int) (net.Conn, net.Addr, error) { + return nil, nil, errTodo +} + +// GenPortRangeChecker finds the cluster that is responsible for routing the packet and checks +// whether the peer address is in the port range specified for the cluster. The RelayGen caches +// recent hits for simplicity. +func (s *Stunner) GenPortRangeChecker(g *RelayGen) PortRangeChecker { + return func(addr net.Addr) (*object.Cluster, bool) { + u, ok := addr.(*net.UDPAddr) + if !ok { + return nil, false + } + + ip := u.IP.String() + c, ok := g.ClusterCache.Get(ip) + var cluster *object.Cluster + if ok { + // cache hit + cluster = c.(*object.Cluster) + } else { + // route + for _, r := range g.Listener.Routes { + c := s.GetCluster(r) + if c != nil && c.Route(u.IP) { + cluster = c + g.ClusterCache.Add(ip, c) + break + } + } + } + + if cluster != nil { + return cluster, cluster.Match(u.IP, u.Port) + } + + return nil, false + } +} + +// PortRangePacketConn is a net.PacketConn that filters on the target port range and also handles +// telemetry. +type PortRangePacketConn struct { + net.PacketConn + checker PortRangeChecker + readDeadline time.Time + telemetry *telemetry.Telemetry + lock sync.Mutex + log logging.LeveledLogger +} + +// NewPortRangePacketConn decorates a PacketConn with filtering on a target port range. Errors are reported per listener name. +func NewPortRangePacketConn(c net.PacketConn, checker PortRangeChecker, t *telemetry.Telemetry, log logging.LeveledLogger) net.PacketConn { + // cluster add/sub connection is not tracked + // AddConnection(n, t) + r := PortRangePacketConn{ + PacketConn: c, + checker: checker, + telemetry: t, + log: log, + } + + return &r +} + +// WriteTo writes to the PacketConn. +func (c *PortRangePacketConn) WriteTo(p []byte, peerAddr net.Addr) (int, error) { + cluster, ok := c.checker(peerAddr) + if !ok { + return 0, ErrPortProhibited + } + + n, err := c.PacketConn.WriteTo(p, peerAddr) + if n > 0 { + c.telemetry.IncrementBytes(cluster.Name, telemetry.ClusterType, telemetry.Outgoing, uint64(n)) + c.telemetry.IncrementPackets(cluster.Name, telemetry.ClusterType, telemetry.Outgoing, 1) + } + + return n, err +} + +// ReadFrom reads from the PortRangePacketConn. Blocks until a packet from the speciifed port range +// is received and drops all other packets. +func (c *PortRangePacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + for { + var peerAddr net.Addr + + err := c.PacketConn.SetReadDeadline(c.readDeadline) + if err != nil { + return 0, peerAddr, err + } + + n, peerAddr, err := c.PacketConn.ReadFrom(p) + + // Return errors unconditionally: peerAddr will most probably not be valid anyway + // so it is not worth checking + if err != nil { + return n, peerAddr, err + } + + cluster, ok := c.checker(peerAddr) + if !ok { + continue + } + + if n > 0 { + c.telemetry.IncrementBytes(cluster.Name, telemetry.ClusterType, telemetry.Incoming, uint64(n)) + c.telemetry.IncrementPackets(cluster.Name, telemetry.ClusterType, telemetry.Incoming, 1) + } + + return n, peerAddr, nil + } +} + +func (c *PortRangePacketConn) SetReadDeadline(t time.Time) error { + c.lock.Lock() + defer c.lock.Unlock() + c.readDeadline = t + return nil +} + +func (c *PortRangePacketConn) Close() error { + // cluster add/sub connection is not tracked + // SubConnection(c.name, c.connType) + return c.PacketConn.Close() +} diff --git a/relay_test.go b/relay_test.go new file mode 100644 index 00000000..09e665cb --- /dev/null +++ b/relay_test.go @@ -0,0 +1,249 @@ +package stunner + +import ( + "net" + "testing" + "time" + + "github.com/pion/transport/v3/test" + "github.com/pion/transport/v3/vnet" + "github.com/stretchr/testify/assert" + + "github.com/l7mp/stunner/internal/object" + "github.com/l7mp/stunner/internal/telemetry" + "github.com/l7mp/stunner/pkg/logger" +) + +var connTestLoglevel string = "all:ERROR" + +// var connTestLoglevel string = stnrv1.DefaultLogLevel +// var connTestLoglevel string = "all:INFO" +// var connTestLoglevel string = "all:TRACE" +// var connTestLoglevel string = "all:TRACE,vnet:INFO,turn:ERROR,turnc:ERROR" + +var testCluster = object.Cluster{Name: "test-cluster"} + +func getChecker(minPort, maxPort int) PortRangeChecker { + return func(addr net.Addr) (*object.Cluster, bool) { + u, ok := addr.(*net.UDPAddr) + if !ok { + return nil, false + } + + return &testCluster, u.Port >= minPort && u.Port <= maxPort + } +} + +func TestPortRangePacketConn(t *testing.T) { + lim := test.TimeOut(time.Second * 30) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + loggerFactory := logger.NewLoggerFactory(connTestLoglevel) + log := loggerFactory.NewLogger("test") + + log.Debug("Creating vnet") + nw, err := vnet.NewNet(&vnet.NetConfig{}) + if !assert.NoError(t, err, "should succeed") { + return + } + + tm, err := telemetry.New(telemetry.Callbacks{}, false, loggerFactory.NewLogger("metric")) + assert.NoError(t, err, "should succeed") + defer tm.Close() + + t.Run("LoopbackOnValidPort", func(t *testing.T) { + log.Debug("Creating base socket") + addr := "127.0.0.1:15000" + baseConn, err := nw.ListenPacket("udp", addr) + assert.NoError(t, err, "should succeed") + msg := "PING!" + + log.Debug("Creating filtered packet conn wrappeer socket") + conn := NewPortRangePacketConn(baseConn, getChecker(10000, 20000), tm, log) + assert.NoError(t, err, "should create port-range filtered packetconn") + + log.Debug("Sending packet") + udpAddr, err := net.ResolveUDPAddr("udp", addr) + assert.NoError(t, err, "should resolve UDP address") + n, err := conn.WriteTo([]byte(msg), udpAddr) + assert.NoError(t, err, "should succeed") + assert.Equal(t, len(msg), n, "should match") + + log.Debug("Receiving packet") + buf := make([]byte, 1000) + n, remoteAddr, err := conn.ReadFrom(buf) + assert.NoError(t, err, "should succeed") + assert.Equal(t, len(msg), n, "should match") + assert.Equal(t, msg, string(buf[:n]), "should match") + assert.Equal(t, udpAddr.String(), remoteAddr.String(), "should match") //nolint:forcetypeassert + + log.Debug("Closing connection") + assert.NoError(t, conn.Close(), "should succeed") // should close baseConn + }) + + t.Run("LoopbackOnInvalidPort", func(t *testing.T) { + log.Debug("Creating base socket") + addr := "127.0.0.1:25000" + baseConn, err := nw.ListenPacket("udp", addr) + assert.NoError(t, err, "should succeed") + msg := "PING!" + + log.Debug("Creating filtered packet conn wrappeer socket") + conn := NewPortRangePacketConn(baseConn, getChecker(10000, 20000), tm, log) + assert.NoError(t, err, "should create port-range filtered packetconn") + + log.Debug("Sending packet") + udpAddr, err := net.ResolveUDPAddr("udp", addr) + assert.NoError(t, err, "should resolve UDP address") + n, err := conn.WriteTo([]byte(msg), udpAddr) + assert.Error(t, err, "should reject") + assert.Equal(t, 0, n, "should match") + + log.Debug("Receiving packet") + buf := make([]byte, 1000) + // this would hang otherwise + assert.NoError(t, conn.SetReadDeadline(time.Now().Add(10*time.Millisecond)), "read deadline") + _, _, err = conn.ReadFrom(buf) + assert.Error(t, err, "should be rejected") + + log.Debug("Closing connection") + assert.NoError(t, conn.Close(), "should succeed") + }) + + t.Run("LoopbackOnSinglePort", func(t *testing.T) { + log.Debug("Creating base socket") + addr := "127.0.0.1:15000" + baseConn, err := nw.ListenPacket("udp", addr) + assert.NoError(t, err, "should succeed") + msg := "PING!" + + log.Debug("Creating filtered packet conn wrappeer socket") + conn := NewPortRangePacketConn(baseConn, getChecker(15000, 15000), tm, log) + assert.NoError(t, err, "should create port-range filtered packetconn") + + log.Debug("Sending packet") + udpAddr, err := net.ResolveUDPAddr("udp", addr) + assert.NoError(t, err, "should resolve UDP address") + n, err := conn.WriteTo([]byte(msg), udpAddr) + assert.NoError(t, err, "should succeed") + assert.Equal(t, len(msg), n, "should match") + + log.Debug("Receiving packet") + buf := make([]byte, 1000) + n, remoteAddr, err := conn.ReadFrom(buf) + assert.NoError(t, err, "should succeed") + assert.Equal(t, len(msg), n, "should match") + assert.Equal(t, msg, string(buf[:n]), "should match") + assert.Equal(t, udpAddr.String(), remoteAddr.String(), "should match") //nolint:forcetypeassert + + log.Debug("Closing connection") + assert.NoError(t, conn.Close(), "should succeed") // should close baseConn + }) +} + +// BenchmarkPortRangePacketConn sends lots of invalid packets: this is mostly for testing the logger +func BenchmarkPortRangePacketConn(b *testing.B) { + loggerFactory := logger.NewLoggerFactory(connTestLoglevel) + log := loggerFactory.NewLogger("test") + // relayLog := loggerFactory.WithRateLimiter(.25, 1).NewLogger("relay") + relayLog := log + + log.Debug("Creating vnet") + nw, err := vnet.NewNet(&vnet.NetConfig{}) + if err != nil { + b.Fatalf("Cannot allocate vnet: %s", err.Error()) + } + + tm, err := telemetry.New(telemetry.Callbacks{}, false, loggerFactory.NewLogger("metric")) + assert.NoError(b, err, "should succeed") + defer tm.Close() + + log.Debug("Creating base socket") + addr := "127.0.0.1:25000" + baseConn, err := nw.ListenPacket("udp", addr) + if err != nil { + b.Fatalf("Cannot listen on vnet: %s", err.Error()) + } + msg := "PING!" + + log.Debug("Creating filtered packet conn wrappeer socket") + conn := WithCounter(NewPortRangePacketConn(baseConn, getChecker(15000, 15000), tm, relayLog)) + if err != nil { + b.Fatalf("Cannot create port-range packetconn: %s", err.Error()) + } + + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + b.Fatalf("Cannot resove UDP addess: %s", err.Error()) + } + + // Run benchmark + buffer := make([]byte, 1024) + for j := 0; j < b.N; j++ { + _, err := conn.WriteTo([]byte(msg), udpAddr) + if err == nil { + b.Fatal("Conn should reject write to invalid port") + } + + // should never receive: we drop everything + err = conn.SetReadDeadline(time.Now().Add(50 * time.Millisecond)) + if err != nil { + b.Fatalf("Could not set read deadline: %s", err) + } + + conn.ReadFrom(buffer) //nolint:errcheck + } + + readCounter := conn.(*CounterPacketConn).ReadCounter() + if readCounter != 0 { + b.Fatalf("Read counter (%d) should be 0", readCounter) + } + + writeCounter := conn.(*CounterPacketConn).WriteCounter() + if writeCounter != 0 { + b.Fatalf("Write counter (%d) should be %d", writeCounter, b.N) + } + + log.Debug("Closing connection") + err = conn.Close() + if err != nil { + b.Fatalf("Cannot close connection: %s", err.Error()) + } +} + +// CounterPacketConn is a net.PacketConn that filters on the target port range. +type CounterPacketConn struct { + net.PacketConn + readCounter, writeCounter int +} + +// WithCounter decorates a PacketConn with a counter. +func WithCounter(c net.PacketConn) net.PacketConn { + return &CounterPacketConn{ + PacketConn: c, + } +} + +// WriteTo writes to the PacketConn. +func (c *CounterPacketConn) WriteTo(p []byte, peerAddr net.Addr) (int, error) { + n, err := c.PacketConn.WriteTo(p, peerAddr) + if err == nil { + c.writeCounter++ + } + return n, err +} + +// ReadFrom reads from the CounterPacketConn. +func (c *CounterPacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + n, addr, err := c.PacketConn.ReadFrom(p) + if err == nil { + c.readCounter++ + } + return n, addr, err +} + +func (c *CounterPacketConn) ReadCounter() int { return c.readCounter } +func (c *CounterPacketConn) WriteCounter() int { return c.writeCounter } diff --git a/reuseaddr_unix.go b/reuseaddr_unix.go new file mode 100644 index 00000000..fa545c48 --- /dev/null +++ b/reuseaddr_unix.go @@ -0,0 +1,12 @@ +//go:build unix + +package stunner + +import "syscall" + +func reuseAddr(network, address string, conn syscall.RawConn) error { + return conn.Control(func(descriptor uintptr) { + _ = syscall.SetsockoptInt(int(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1) + // syscall.SetsockoptInt(int(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1) + }) +} diff --git a/reuseaddr_windows.go b/reuseaddr_windows.go new file mode 100644 index 00000000..eedc298c --- /dev/null +++ b/reuseaddr_windows.go @@ -0,0 +1,12 @@ +//go:build windows + +package stunner + +import "syscall" + +func reuseAddr(network, address string, conn syscall.RawConn) error { + return conn.Control(func(descriptor uintptr) { + _ = syscall.SetsockoptInt(syscall.Handle(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1) + // syscall.SetsockoptInt(syscall.Handle(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1) + }) +} diff --git a/server.go b/server.go index 0292a04e..e3eda178 100644 --- a/server.go +++ b/server.go @@ -5,15 +5,25 @@ import ( "fmt" "net" - "github.com/pion/dtls/v2" - "github.com/pion/turn/v2" + "github.com/pion/dtls/v3" + "github.com/pion/turn/v4" + "golang.org/x/time/rate" "github.com/l7mp/stunner/internal/object" "github.com/l7mp/stunner/internal/telemetry" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + "github.com/l7mp/stunner/pkg/logger" ) +// Number of log events per second reported at ERROR, WARN and INFO loglevel (logging at DEBUG and +// TRACE levels is not rate-limited). +var LogRateLimit rate.Limit = 1.0 + +// Burst size for rate-limited logging at ERROR, WARN and INFO loglevel (logging at DEBUG and TRACE +// levels is not rate-limited). +var LogBurst = 3 + // Start will start the TURN server that belongs to a listener. func (s *Stunner) StartServer(l *object.Listener) error { s.log.Infof("listener %s (re)starting", l.String()) @@ -22,22 +32,16 @@ func (s *Stunner) StartServer(l *object.Listener) error { var pConns []turn.PacketConnConfig var lConns []turn.ListenerConfig - relay := &telemetry.RelayAddressGenerator{ - Name: l.Name, - RelayAddress: l.Addr, - Address: l.Addr.String(), - MinPort: uint16(l.MinPort), - MaxPort: uint16(l.MaxPort), - Net: l.Net, - } + relay := NewRelayGen(l, s.telemetry, s.logger) + relay.PortRangeChecker = s.GenPortRangeChecker(relay) permissionHandler := s.NewPermissionHandler(l) - addr := fmt.Sprintf("%s:%d", l.Addr.String(), l.Port) + addr := fmt.Sprintf("0.0.0.0:%d", l.Port) switch l.Proto { - case v1alpha1.ListenerProtocolUDP: - socketPool := util.NewPacketConnPool(l.Net, s.udpThreadNum) + case stnrv1.ListenerProtocolTURNUDP: + socketPool := util.NewPacketConnPool(l.Name, l.Net, s.udpThreadNum, s.telemetry) s.log.Infof("setting up UDP listener socket pool at %s with %d readloop threads", addr, socketPool.Size()) @@ -47,9 +51,8 @@ func (s *Stunner) StartServer(l *object.Listener) error { } for _, c := range conns { - udpListener := telemetry.NewPacketConn(c, l.Name, telemetry.ListenerType) conn := turn.PacketConnConfig{ - PacketConn: udpListener, + PacketConn: c, RelayAddressGenerator: relay, PermissionHandler: permissionHandler, } @@ -58,7 +61,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { pConns = append(pConns, conn) } - case v1alpha1.ListenerProtocolTCP: + case stnrv1.ListenerProtocolTURNTCP: s.log.Debugf("setting up TCP listener at %s", addr) tcpListener, err := net.Listen("tcp", addr) @@ -66,7 +69,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { return fmt.Errorf("failed to create TCP listener at %s: %s", addr, err) } - tcpListener = telemetry.NewListener(tcpListener, l.Name, telemetry.ListenerType) + tcpListener = telemetry.NewListener(tcpListener, l.Name, telemetry.ListenerType, s.telemetry) conn := turn.ListenerConfig{ Listener: tcpListener, @@ -78,7 +81,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { l.Conns = append(l.Conns, conn) // cannot test this on vnet, no TLS in vnet.Net - case v1alpha1.ListenerProtocolTLS: + case stnrv1.ListenerProtocolTURNTLS: s.log.Debugf("setting up TLS/TCP listener at %s", addr) cer, err := tls.X509KeyPair(l.Cert, l.Key) @@ -94,7 +97,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { return fmt.Errorf("failed to create TLS listener at %s: %s", addr, err) } - tlsListener = telemetry.NewListener(tlsListener, l.Name, telemetry.ListenerType) + tlsListener = telemetry.NewListener(tlsListener, l.Name, telemetry.ListenerType, s.telemetry) conn := turn.ListenerConfig{ Listener: tlsListener, @@ -105,7 +108,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { lConns = append(lConns, conn) l.Conns = append(l.Conns, conn) - case v1alpha1.ListenerProtocolDTLS: + case stnrv1.ListenerProtocolTURNDTLS: s.log.Debugf("setting up DTLS/UDP listener at %s", addr) cer, err := tls.X509KeyPair(l.Cert, l.Key) @@ -115,7 +118,10 @@ func (s *Stunner) StartServer(l *object.Listener) error { } // for some reason dtls.Listen requires a UDPAddr and not an addr string - udpAddr := &net.UDPAddr{IP: l.Addr, Port: l.Port} + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return fmt.Errorf("failed to parse DTLS listener address %s: %s", addr, err) + } dtlsListener, err := dtls.Listen("udp", udpAddr, &dtls.Config{ Certificates: []tls.Certificate{cer}, // ExtendedMasterSecret: dtls.RequireExtendedMasterSecret, @@ -124,7 +130,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { return fmt.Errorf("failed to create DTLS listener at %s: %s", addr, err) } - dtlsListener = telemetry.NewListener(dtlsListener, l.Name, telemetry.ListenerType) + dtlsListener = telemetry.NewListener(dtlsListener, l.Name, telemetry.ListenerType, s.telemetry) conn := turn.ListenerConfig{ Listener: dtlsListener, @@ -136,7 +142,7 @@ func (s *Stunner) StartServer(l *object.Listener) error { l.Conns = append(l.Conns, conn) default: - return fmt.Errorf("internal error: unknown listener protocol " + l.Proto.String()) + return fmt.Errorf("internal error: unknown listener protocol %q", l.Proto.String()) } // start the TURN server if there are actual listeners configured @@ -148,9 +154,9 @@ func (s *Stunner) StartServer(l *object.Listener) error { t, err := turn.NewServer(turn.ServerConfig{ Realm: s.GetRealm(), AuthHandler: s.NewAuthHandler(), - LoggerFactory: s.logger, PacketConnConfigs: pConns, ListenerConfigs: lConns, + LoggerFactory: logger.NewRateLimitedLoggerFactory(s.logger, LogRateLimit, LogBurst), }) if err != nil { return fmt.Errorf("cannot set up TURN server for listener %s: %w", diff --git a/server_unix_test.go b/server_unix_test.go index b94f32f5..a00d5424 100644 --- a/server_unix_test.go +++ b/server_unix_test.go @@ -8,8 +8,7 @@ import ( "testing" "time" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" - + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" "github.com/l7mp/stunner/pkg/logger" ) @@ -18,29 +17,29 @@ const clientNum = 20 // multithreaded UDP tests var TestStunnerConfigsMultithreadedUDP = []TestStunnerConfigCase{ { - config: v1alpha1.StunnerConfig{ + config: stnrv1.StunnerConfig{ // udp, plaintext - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Type: "plaintext", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", PublicPort: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -55,33 +54,34 @@ func TestStunnerMultithreadedUDP(t *testing.T) { // Benchmark func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { - // loggerFactory := logger.NewLoggerFactory("all:TRACE") + //loggerFactory := logger.NewLoggerFactory("all:TRACE") loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) log := loggerFactory.NewLogger("test") initSeq := []byte("init-data") testSeq := []byte("benchmark-data") - log.Debug("creating a stunnerd") + log.Debug("Creating a stunnerd") stunner := NewStunner(Options{ LogLevel: stunnerTestLoglevel, SuppressRollback: true, UDPListenerThreadNum: udpThreadNum, // ignored for anything but UDP }) + defer stunner.Close() - log.Debug("starting stunnerd") - err := stunner.Reconcile(v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + log.Debug("Starting stunnerd") + err := stunner.Reconcile(&stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Type: "plaintext", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "default-listener", Protocol: proto, Addr: "127.0.0.1", @@ -90,7 +90,7 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { Key: keyPem64, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -100,7 +100,7 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { b.Fatalf("Failed to start stunnerd: %s", err) } - log.Debug("creating a sink") + log.Debug("Creating a sink") sinkAddr, err := net.ResolveUDPAddr("udp4", "0.0.0.0:65432") if err != nil { b.Fatalf("Failed to resolve sink address: %s", err) @@ -110,6 +110,7 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { if err != nil { b.Fatalf("Failed to allocate sink: %s", err) } + defer sink.Close() //nolint:errcheck go func() { buf := make([]byte, 1600) @@ -125,11 +126,14 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { }() log.Debug("creating a turncat client") - stunnerURI := fmt.Sprintf("turn://127.0.0.1:23478?transport=%s", proto) - clientProto := "tcp" - if proto == "udp" || proto == "dtls" { + clientProto, turnScheme := "tcp", "turn" + if proto == "turn-udp" || proto == "turn-dtls" { clientProto = "udp" } + if proto == "turn-tls" || proto == "turn-dtls" { + turnScheme = "turns" + } + stunnerURI := fmt.Sprintf("%s://127.0.0.1:23478?transport=%s", turnScheme, clientProto) testTurncatConfig := TurncatConfig{ ListenerAddr: fmt.Sprintf("%s://127.0.0.1:25000", clientProto), ServerAddr: stunnerURI, @@ -143,9 +147,10 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { if err != nil { b.Fatalf("Failed to create turncat client: %s", err) } + defer turncat.Close() // test with 20 clients - log.Debugf("creating %d senders", clientNum) + log.Debugf("Creating %d senders", clientNum) clients := make([]net.Conn, clientNum) for i := 0; i < clientNum; i++ { var client net.Conn @@ -183,12 +188,9 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { time.Sleep(750 * time.Millisecond) - turncat.Close() - stunner.Close() for i := 0; i < clientNum; i++ { clients[i].Close() } - sink.Close() //nolint:errcheck } // BenchmarkUDPServer will benchmark the STUNner UDP server with a different number of readloop @@ -196,7 +198,7 @@ func RunBenchmarkServer(b *testing.B, proto string, udpThreadNum int) { func BenchmarkUDPServer(b *testing.B) { for i := 1; i <= 4; i++ { b.Run(fmt.Sprintf("udp:thread_num=%d", i), func(b *testing.B) { - RunBenchmarkServer(b, "udp", i) + RunBenchmarkServer(b, "turn-udp", i) }) } } @@ -205,7 +207,7 @@ func BenchmarkUDPServer(b *testing.B) { // threads. Setup: `client --tcp--> turncat --tcp--> stunner --udp--> sink` func BenchmarkTCPServer(b *testing.B) { b.Run("tcp", func(b *testing.B) { - RunBenchmarkServer(b, "tcp", 0) + RunBenchmarkServer(b, "turn-tcp", 0) }) } @@ -213,7 +215,7 @@ func BenchmarkTCPServer(b *testing.B) { // threads. Setup: `client --tcp--> turncat --tls--> stunner --udp--> sink` func BenchmarkTLSServer(b *testing.B) { b.Run("tls", func(b *testing.B) { - RunBenchmarkServer(b, "tls", 0) + RunBenchmarkServer(b, "turn-tls", 0) }) } @@ -221,6 +223,6 @@ func BenchmarkTLSServer(b *testing.B) { // threads. Setup: `client --udp--> turncat --dtls--> stunner --udp--> sink` func BenchmarkDTLSServer(b *testing.B) { b.Run("dtls", func(b *testing.B) { - RunBenchmarkServer(b, "dtls", 0) + RunBenchmarkServer(b, "turn-dtls", 0) }) } diff --git a/stunner.go b/stunner.go index 52038ec6..47941324 100644 --- a/stunner.go +++ b/stunner.go @@ -3,30 +3,34 @@ package stunner import ( "fmt" - "strings" + "os" + "github.com/google/uuid" "github.com/pion/logging" - "github.com/pion/transport/v2" - "github.com/pion/transport/v2/stdnet" + "github.com/pion/transport/v3" + "github.com/pion/transport/v3/stdnet" "github.com/l7mp/stunner/internal/manager" "github.com/l7mp/stunner/internal/object" "github.com/l7mp/stunner/internal/resolver" "github.com/l7mp/stunner/internal/telemetry" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" "github.com/l7mp/stunner/pkg/logger" ) const DefaultLogLevel = "all:WARN" +var DefaultInstanceId = fmt.Sprintf("default/stunnerd-%s", uuid.New().String()) + // Stunner is an instance of the STUNner deamon. type Stunner struct { - version string + name, version string adminManager, authManager, listenerManager, clusterManager manager.Manager suppressRollback, dryRun bool resolver resolver.DnsResolver udpThreadNum int - logger *logger.LoggerFactory + telemetry *telemetry.Telemetry + logger logger.LoggerFactory log logging.LeveledLogger net transport.Net ready, shutdown bool @@ -54,13 +58,13 @@ func NewStunner(options Options) *Stunner { if options.Net == nil { net, err := stdnet.NewNet() // defaults to native operation if err != nil { - log.Error("could not create stdnet.NewNet") + log.Error("Could not create vnet") return nil } vnet = net } else { vnet = options.Net - log.Warn("vnet is enabled") + log.Warn("Virtual net (vnet) is enabled") } udpThreadNum := 0 @@ -68,8 +72,18 @@ func NewStunner(options Options) *Stunner { udpThreadNum = options.UDPListenerThreadNum } + id := options.Name + if id == "" { + if h, err := os.Hostname(); err != nil { + id = DefaultInstanceId + } else { + id = fmt.Sprintf("default/stunnerd-%s", h) + } + } + s := &Stunner{ - version: v1alpha1.ApiVersion, + name: id, + version: stnrv1.ApiVersion, logger: logger, log: log, suppressRollback: options.SuppressRollback, @@ -80,7 +94,7 @@ func NewStunner(options Options) *Stunner { } s.adminManager = manager.NewManager("admin-manager", - object.NewAdminFactory(options.DryRun, s.NewReadinessHandler(), logger), logger) + object.NewAdminFactory(options.DryRun, s.NewReadinessHandler(), s.NewStatusHandler(), logger), logger) s.authManager = manager.NewManager("auth-manager", object.NewAuthFactory(logger), logger) s.listenerManager = manager.NewManager("listener-manager", @@ -88,18 +102,30 @@ func NewStunner(options Options) *Stunner { s.clusterManager = manager.NewManager("cluster-manager", object.NewClusterFactory(r, logger), logger) + telemetryCallbacks := telemetry.Callbacks{ + GetAllocationCount: func() int64 { return s.GetActiveConnections() }, + } + t, err := telemetry.New(telemetryCallbacks, s.dryRun, logger.NewLogger("metrics")) + if err != nil { + log.Errorf("Could not initialize metric provider: %s", err.Error()) + return nil + } + s.telemetry = t + if !s.dryRun { s.resolver.Start() - telemetry.Init() - // telemetry.RegisterMetrics(s.log, func() float64 { return s.GetActiveConnections() }) } - // TODO: remove this when STUNner gains self-managed dataplanes s.ready = true return s } +// GetId returns the id of the current stunnerd instance. +func (s *Stunner) GetId() string { + return s.name +} + // GetVersion returns the STUNner API version. func (s *Stunner) GetVersion() string { return s.version @@ -119,7 +145,7 @@ func (s *Stunner) Shutdown() { // GetAdmin returns the admin object underlying STUNner. func (s *Stunner) GetAdmin() *object.Admin { - a, found := s.adminManager.Get(v1alpha1.DefaultAdminName) + a, found := s.adminManager.Get(stnrv1.DefaultAdminName) if !found { panic("internal error: no Admin found") } @@ -128,7 +154,7 @@ func (s *Stunner) GetAdmin() *object.Admin { // GetAuth returns the authenitation object underlying STUNner. func (s *Stunner) GetAuth() *object.Auth { - a, found := s.authManager.Get(v1alpha1.DefaultAuthName) + a, found := s.authManager.Get(stnrv1.DefaultAuthName) if !found { panic("internal error: no Auth found") } @@ -186,30 +212,43 @@ func (s *Stunner) AllocationCount() int { return n } -// Status returns a short status description of the running STUNner instance. -func (s *Stunner) Status() string { - listeners := s.listenerManager.Keys() - ls := make([]string, len(listeners)) - for i, l := range listeners { - ls[i] = s.GetListener(l).String() +// Status returns the status for the running STUNner instance. +func (s *Stunner) Status() stnrv1.Status { + status := stnrv1.StunnerStatus{ApiVersion: s.version} + if admin := s.GetAdmin(); admin != nil { + status.Admin = admin.Status().(*stnrv1.AdminStatus) + } + if auth := s.GetAuth(); auth != nil { + status.Auth = auth.Status().(*stnrv1.AuthStatus) + } + + ls := s.listenerManager.Keys() + status.Listeners = make([]*stnrv1.ListenerStatus, len(ls)) + for i, lName := range ls { + if l := s.GetListener(lName); l != nil { + status.Listeners[i] = l.Status().(*stnrv1.ListenerStatus) + } } - str := "NONE" - if len(ls) > 0 { - str = strings.Join(ls, ", ") + + cs := s.clusterManager.Keys() + status.Clusters = make([]*stnrv1.ClusterStatus, len(cs)) + for i, cName := range cs { + if c := s.GetCluster(cName); c != nil { + status.Clusters[i] = c.Status().(*stnrv1.ClusterStatus) + } } - status := "READY" + status.AllocationCount = s.AllocationCount() + stat := "READY" if !s.ready { - status = "NOT-READY" + stat = "NOT-READY" } if s.shutdown { - status = "TERMINATING" + stat = "TERMINATING" } + status.Status = stat - auth := s.GetAuth() - return fmt.Sprintf("status: %s, realm: %s, authentication: %s, listeners: %s"+ - ", active allocations: %d", status, auth.Realm, auth.Type.String(), str, - s.AllocationCount()) + return &status } // Close stops the STUNner daemon, cleans up any internal state, and closes all connections @@ -244,13 +283,15 @@ func (s *Stunner) Close() { } } - // telemetry.UnregisterMetrics(s.log) - if !s.dryRun { - telemetry.Close() + if err := s.telemetry.Close(); err != nil { // blocks until finished + s.log.Errorf("Could not shutdown metric provider cleanly: %s", err.Error()) } s.resolver.Close() } // GetActiveConnections returns the number of active downstream (listener-side) TURN allocations. -func (s *Stunner) GetActiveConnections() float64 { return 0.0 } +func (s *Stunner) GetActiveConnections() int64 { + count := s.AllocationCount() + return int64(count) +} diff --git a/stunner_test.go b/stunner_test.go index 1244d6b1..b10e6b54 100644 --- a/stunner_test.go +++ b/stunner_test.go @@ -3,6 +3,7 @@ package stunner import ( "crypto/tls" "encoding/base64" + "encoding/json" "fmt" "net" "net/http" @@ -11,28 +12,36 @@ import ( "testing" "time" - "github.com/pion/dtls/v2" + "github.com/pion/dtls/v3" "github.com/pion/logging" - "github.com/pion/transport/v2" - "github.com/pion/transport/v2/stdnet" - "github.com/pion/transport/v2/test" - "github.com/pion/transport/v2/vnet" - "github.com/pion/turn/v2" + "github.com/pion/transport/v3" + "github.com/pion/transport/v3/stdnet" + "github.com/pion/transport/v3/test" + "github.com/pion/transport/v3/vnet" + "github.com/pion/turn/v4" "github.com/stretchr/testify/assert" "github.com/l7mp/stunner/internal/resolver" + telemetrytester "github.com/l7mp/stunner/internal/telemetry/tester" "github.com/l7mp/stunner/pkg/logger" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" + stnrv1a1 "github.com/l7mp/stunner/pkg/apis/v1alpha1" a12n "github.com/l7mp/stunner/pkg/authentication" + cfgclient "github.com/l7mp/stunner/pkg/config/client" ) -var stunnerTestLoglevel string = "all:ERROR" +const ( + // timeout = 200 * time.Millisecond + // interval = 50 * time.Millisecond -// var stunnerTestLoglevel string = v1alpha1.DefaultLogLevel -// var stunnerTestLoglevel string = "all:INFO" -// var stunnerTestLoglevel string = "all:TRACE" -// var stunnerTestLoglevel string = "all:TRACE,vnet:INFO,turn:ERROR,turnc:ERROR" + stunnerTestLoglevel string = "all:ERROR" + + // stunnerTestLoglevel string = stnrv1.DefaultLogLevel + // stunnerTestLoglevel string = "all:INFO" + // stunnerTestLoglevel string = "all:TRACE" + // stunnerTestLoglevel string = "all:TRACE,vnet:INFO,turn:ERROR,turnc:ERROR" +) var certPem, keyPem, _ = GenerateSelfSignedKey() var certPem64 = base64.StdEncoding.EncodeToString(certPem) @@ -151,8 +160,10 @@ func stunnerEchoTest(conf echoTestConfig) { _, err = conn.WriteTo([]byte("Hello"), echoConn.LocalAddr()) assert.NoError(t, err, err) - _, from, err2 := conn.ReadFrom(buf) + n, from, err2 := conn.ReadFrom(buf) assert.NoError(t, err2, err2) + assert.Equal(t, n, len("Hello"), "message OK") + assert.Equal(t, []byte("Hello"), buf[:n], "message OK") // verify the message was received from the relay address assert.Equal(t, echoConn.LocalAddr().String(), from.String(), @@ -163,15 +174,14 @@ func stunnerEchoTest(conf echoTestConfig) { } else { // should fail _, err = conn.WriteTo([]byte("Hello"), echoConn.LocalAddr()) - assert.Errorf(t, err, "got error message %s", err.Error()) + assert.Errorf(t, err, "got error message %s", err) } assert.NoError(t, conn.Close(), "cannot close relay connection") assert.NoError(t, echoConn.Close(), "cannot close echo server connection") } } - time.Sleep(150 * time.Millisecond) + time.Sleep(50 * time.Millisecond) client.Close() - } // ***************** @@ -261,35 +271,35 @@ func buildVNet(logger logging.LoggerFactory) (*VNet, error) { *********************************************/ type TestStunnerConfigCase struct { - config v1alpha1.StunnerConfig + config stnrv1.StunnerConfig uri string } var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ { - config: v1alpha1.StunnerConfig{ - // udp, plaintext - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // udp, static + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", PublicPort: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -297,28 +307,28 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turn:1.2.3.4:3478?transport=udp", }, { - config: v1alpha1.StunnerConfig{ - // udp, longterm - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // udp, ephemeral + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", PublicPort: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -326,29 +336,29 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turn:1.2.3.4:3478?transport=udp", }, { - config: v1alpha1.StunnerConfig{ - // tcp, plaintext - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // tcp, static + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "tcp", - Protocol: "tcp", + Protocol: "turn-tcp", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", PublicPort: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -356,28 +366,28 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turn:1.2.3.4:3478?transport=tcp", }, { - config: v1alpha1.StunnerConfig{ - // tcp, longterm - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // tcp, ephemeral + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "tcp", - Protocol: "tcp", + Protocol: "turn-tcp", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", PublicPort: 3478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -385,22 +395,22 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turn:1.2.3.4:3478?transport=tcp", }, { - config: v1alpha1.StunnerConfig{ - // tls, plaintext - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // tls, static + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "tls", - Protocol: "tls", + Protocol: "turn-tls", Addr: "127.0.0.1", PublicAddr: "1.2.3.4", PublicPort: 3478, @@ -409,7 +419,7 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ Key: keyPem64, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -417,21 +427,21 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turns:1.2.3.4:3478?transport=tcp", }, { - config: v1alpha1.StunnerConfig{ - // tls, longterm - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // tls, ephemeral + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": "my-secret", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "tls", - Protocol: "tls", + Protocol: "turn-tls", Addr: "127.0.0.1", Port: 23478, PublicAddr: "1.2.3.4", @@ -440,7 +450,7 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ Key: keyPem64, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -448,22 +458,22 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ uri: "turns:1.2.3.4:3478?transport=tcp", }, { - config: v1alpha1.StunnerConfig{ - // dtls, plaintext - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + // dtls, static + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "dtls", - Protocol: "dtls", + Protocol: "turn-dtls", Addr: "127.0.0.1", PublicAddr: "1.2.3.4", PublicPort: 3478, @@ -472,33 +482,33 @@ var TestStunnerConfigsWithLocalhost = []TestStunnerConfigCase{ Key: keyPem64, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, }, uri: "turns:1.2.3.4:3478?transport=udp", }, - // // dtls, longterm + // // dtls, ephemeral // { - // ApiVersion: "v1alpha1", - // Admin: v1alpha1.AdminConfig{ + // ApiVersion: stnrv1.ApiVersion, + // Admin: stnrv1.AdminConfig{ // LogLevel: stunnerTestLoglevel, // }, - // Auth: v1alpha1.AuthConfig{ - // Type: "longterm", + // Auth: stnrv1.AuthConfig{ + // Type: "ephemeral", // Credentials: map[string]string{ // "secret": "my-secret", // }, // }, - // Listeners: []v1alpha1.ListenerConfig{{ + // Listeners: []stnrv1.ListenerConfig{{ // Name: "dtls", - // Protocol: "dtls", + // Protocol: "turn-dtls", // Addr: "127.0.0.1", // Port: 23478, // Routes: []string{"allow-any"}, // }}, - // Clusters: []v1alpha1.ClusterConfig{{ + // Clusters: []stnrv1.ClusterConfig{{ // Name: "allow-any", // Endpoints: []string{"0.0.0.0/0"}, // }}, @@ -551,7 +561,7 @@ func testStunnerLocalhost(t *testing.T, udpThreadNum int, tests []TestStunnerCon // assert.False(t, stunner.IsReady(), "lifecycle 1: not-ready") log.Debug("starting stunnerd") - assert.NoError(t, stunner.Reconcile(c), "starting server") + assert.NoError(t, stunner.Reconcile(&c), "starting server") assert.False(t, stunner.shutdown, "lifecycle 2: alive") assert.True(t, stunner.ready, "lifecycle 2: ready") @@ -559,10 +569,10 @@ func testStunnerLocalhost(t *testing.T, udpThreadNum int, tests []TestStunnerCon var u, p string switch auth { - case "plaintext": + case "plaintext", "static": u = "user1" p = "passwd1" - case "longterm": + case "longterm", "ephemeral": u = a12n.GenerateTimeWindowedUsername(time.Now(), time.Minute, "") p2, err := a12n.GetLongTermCredential(u, "my-secret") assert.NoError(t, err, err) @@ -576,14 +586,14 @@ func testStunnerLocalhost(t *testing.T, udpThreadNum int, tests []TestStunnerCon log.Debug("creating a client") var lconn net.PacketConn switch proto { - case "udp": - lconn, err = net.ListenPacket("udp4", "0.0.0.0:0") + case "turn-udp": + lconn, err = net.ListenPacket("udp", "0.0.0.0:0") assert.NoError(t, err, "cannot create UDP client socket") - case "tcp": + case "turn-tcp": conn, cErr := net.Dial("tcp", stunnerAddr) assert.NoError(t, cErr, "cannot create TCP client socket") lconn = turn.NewSTUNConn(conn) - case "tls": + case "turn-tls": cer, err := tls.X509KeyPair(certPem, keyPem) assert.NoError(t, err, "cannot create certificate for TLS client socket") conn, err := tls.Dial("tcp", stunnerAddr, &tls.Config{ @@ -593,7 +603,7 @@ func testStunnerLocalhost(t *testing.T, udpThreadNum int, tests []TestStunnerCon }) assert.NoError(t, err, "cannot create TLS client socket") lconn = turn.NewSTUNConn(conn) - case "dtls": + case "turn-dtls": cer, err := tls.X509KeyPair(certPem, keyPem) assert.NoError(t, err, "cannot create certificate for DTLS client socket") // for some reason dtls.Listen requires a UDPAddr and not an addr string @@ -640,40 +650,41 @@ func testStunnerLocalhost(t *testing.T, udpThreadNum int, tests []TestStunnerCon // ***************** // // type StunnerClusterConfig struct { -// config v1alpha1.StunnerConfig +// config stnrv1.StunnerConfig // echoServerAddr string // result bool // } type StunnerTestClusterConfig struct { testName string - config v1alpha1.StunnerConfig + config stnrv1.StunnerConfig echoServerAddr string result bool + tester func(h *telemetrytester.Tester) } var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ { testName: "open ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{"echo-server-cluster"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -686,28 +697,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "default cluster type static ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Endpoints: []string{ "1.2.3.5", @@ -719,28 +730,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -753,28 +764,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint with wrong peer addr: fail", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -787,21 +798,21 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint with multiple routes ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -809,7 +820,7 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ "dummy_cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -828,21 +839,21 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint with multiple routes and wrong peer addr fail", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -850,7 +861,7 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -869,28 +880,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint with multiple ips ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -907,28 +918,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "static endpoint with multiple ips with wrong peer addr fail", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STATIC", Endpoints: []string{ @@ -944,28 +955,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "strict_dns ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STRICT_DNS", Endpoints: []string{ @@ -978,28 +989,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "strict_dns cluster and wrong peer addr fail", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STRICT_DNS", Endpoints: []string{ @@ -1012,28 +1023,28 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "strict_dns cluster with multiple domains ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "echo-server-cluster", Type: "STRICT_DNS", Endpoints: []string{ @@ -1047,21 +1058,21 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ }, { testName: "multiple strict_dns clusters ok", - config: v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: stunnerTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "plaintext", + Auth: stnrv1.AuthConfig{ + Type: "static", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp", - Protocol: "udp", + Protocol: "turn-udp", Addr: "1.2.3.4", Port: 3478, Routes: []string{ @@ -1069,7 +1080,7 @@ var testClusterConfigsWithVNet = []StunnerTestClusterConfig{ "echo-server-cluster", }, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "stunner-cluster", Type: "STRICT_DNS", Endpoints: []string{ @@ -1123,15 +1134,15 @@ func TestStunnerClusterWithVNet(t *testing.T) { }) log.Debug("starting stunnerd") - assert.NoError(t, stunner.Reconcile(c.config), "starting server") + assert.NoError(t, stunner.Reconcile(&c.config), "starting server") var u, p string auth := c.config.Auth.Type switch auth { - case "plaintext": + case "plaintext", "static": u = "user1" p = "passwd1" - case "longterm": + case "longterm", "ephemeral": u, p, err = turn.GenerateLongTermCredentials("my-secret", time.Minute) assert.NoError(t, err, err) default: @@ -1154,6 +1165,593 @@ func TestStunnerClusterWithVNet(t *testing.T) { } } +// ***************** +// Port range filtering tests with VNet +// ***************** +var testPortRangeConfigsWithVNet = []StunnerTestClusterConfig{ + { + testName: "static endpoint with peer address in the admitted port range ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Type: "static", + Credentials: map[string]string{ + "username": "user1", + "password": "passwd1", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "echo-server-cluster", + Type: "STATIC", + Endpoints: []string{ + "1.2.3.5:<5670-5680>", + }, + }}, + }, + echoServerAddr: "1.2.3.5:5678", + result: true, + tester: func(h *telemetrytester.Tester) { + // stunner_listener_connections_total + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections_total")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + + // stunner_listener_connections + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + + // stunner_listener_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "rx"), 20) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "tx"), 20) + + // stunner_listener_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "rx"), 20) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "tx"), 20) + + // stunner_cluster_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "rx"), 20) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "tx"), 20) + + // stunner_cluster_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "rx"), 20) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "tx"), 20) + }, + }, + { + testName: "static endpoint with peer address matching singleton admitted port ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Type: "static", + Credentials: map[string]string{ + "username": "user1", + "password": "passwd1", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{ + { + Name: "dummy-cluster", + Type: "STATIC", + Endpoints: []string{"1.2.3.6:<5678-5678>"}, + }, { + Name: "echo-server-cluster", + Type: "STATIC", + Endpoints: []string{"1.2.3.5:<5678-5678>"}, + }, + }, + }, + echoServerAddr: "1.2.3.5:5678", + result: true, + tester: func(h *telemetrytester.Tester) { + // stunner_listener_connections_total + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections_total")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + + // stunner_listener_connections + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + + // stunner_listener_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "rx"), 200) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "tx"), 200) + + // stunner_listener_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "rx"), 2000) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "tx"), 2000) + + // stunner_cluster_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "rx"), 200) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "tx"), 200) + + // stunner_cluster_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "rx"), 2000) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "tx"), 2000) + }, + }, + { + testName: "static endpoint with peer address in rejected port range fails", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Type: "static", + Credentials: map[string]string{ + "username": "user1", + "password": "passwd1", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "echo-server-cluster", + Type: "STATIC", + Endpoints: []string{ + "1.2.3.5:<1-5677>", + }, + }}, + }, + echoServerAddr: "1.2.3.5:5678", + result: false, + tester: func(h *telemetrytester.Tester) { + // stunner_listener_connections_total + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections_total")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + + // stunner_listener_connections + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + + // stunner_listener_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "rx"), 500) // signaling+data + assert.Less(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "tx"), 50) // just signaling + + // stunner_listener_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "rx"), 1000) // signaling+data + assert.Less(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "tx"), 1000) // just signaling + + // stunner_cluster_packets_total + assert.Equal(h, 0, h.CollectAndCount("stunner_cluster_packets_total")) + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "rx")) // fail + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "tx")) // fail + + // stunner_cluster_bytes_total + assert.Equal(h, 0, h.CollectAndCount("stunner_cluster_bytes_total")) + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "rx")) // fail + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "tx")) // fail + }, + }, + { + testName: "static endpoint with peer address in rejected singleton port fails", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Type: "static", + Credentials: map[string]string{ + "username": "user1", + "password": "passwd1", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{{ + Name: "echo-server-cluster", + Type: "STATIC", + Endpoints: []string{ + "1.2.3.5:<5677-5677>", + }, + }}, + }, + echoServerAddr: "1.2.3.5:5678", + result: false, + tester: func(h *telemetrytester.Tester) { + // stunner_listener_connections_total + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections_total")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + + // stunner_listener_connections + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + + // stunner_listener_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "rx"), 500) // signaling+data + assert.Less(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "tx"), 50) // just signaling + + // stunner_listener_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "rx"), 1000) // signaling+data + assert.Less(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "tx"), 1000) // just signaling + + // stunner_cluster_packets_total + assert.Equal(h, 0, h.CollectAndCount("stunner_cluster_packets_total")) + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "rx")) // fail + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "tx")) // fail + + // stunner_cluster_bytes_total + assert.Equal(h, 0, h.CollectAndCount("stunner_cluster_bytes_total")) + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "rx")) // fail + assert.Equal(h, 0, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "tx")) // fail + }, + }, + { + testName: "strict_dns with default port range ok", + config: stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: stunnerTestLoglevel, + }, + Auth: stnrv1.AuthConfig{ + Type: "static", + Credentials: map[string]string{ + "username": "user1", + "password": "passwd1", + }, + }, + Listeners: []stnrv1.ListenerConfig{{ + Name: "udp", + Protocol: "turn-udp", + Addr: "1.2.3.4", + Port: 3478, + Routes: []string{ + "echo-server-cluster", + }, + }}, + Clusters: []stnrv1.ClusterConfig{ + { + Name: "dummy-cluster", + Type: "STATIC", + Endpoints: []string{"1.2.3.6"}, + }, { + Name: "echo-server-cluster", + Type: "STRICT_DNS", + Endpoints: []string{"echo-server.l7mp.io"}, + }, + }, + }, + echoServerAddr: "1.2.3.5:5678", + result: true, + tester: func(h *telemetrytester.Tester) { + // stunner_listener_connections_total + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections_total")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections_total", "name", "udp")) + + // stunner_listener_connections + assert.Equal(h, 1, h.CollectAndCount("stunner_listener_connections")) // name: udp + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + assert.Equal(h, 1, h.CollectAndGetInt("stunner_listener_connections", "name", "udp")) + + // stunner_listener_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "rx"), 200) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_packets_total", "name", "udp", "direction", "tx"), 200) + + // stunner_listener_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_listener_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "rx"), 2000) + assert.Greater(h, h.CollectAndGetInt("stunner_listener_bytes_total", "name", "udp", "direction", "tx"), 2000) + + // stunner_cluster_packets_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_packets_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "rx"), 200) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_packets_total", "name", "echo-server-cluster", "direction", "tx"), 200) + + // stunner_cluster_bytes_total + assert.Equal(h, 2, h.CollectAndCount("stunner_cluster_bytes_total")) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "rx"), 2000) + assert.Greater(h, h.CollectAndGetInt("stunner_cluster_bytes_total", "name", "echo-server-cluster", "direction", "tx"), 2000) + }, + }, + // TODO: implement port-range filtering for DNS clusters + // { + // testName: "strict_dns with prohibited port range fails", + // config: stnrv1.StunnerConfig{ + // ApiVersion: stnrv1.ApiVersion, + // Admin: stnrv1.AdminConfig{ + // LogLevel: stunnerTestLoglevel, + // }, + // Auth: stnrv1.AuthConfig{ + // Type: "static", + // Credentials: map[string]string{ + // "username": "user1", + // "password": "passwd1", + // }, + // }, + // Listeners: []stnrv1.ListenerConfig{{ + // Name: "udp", + // Protocol: "turn-udp", + // Addr: "1.2.3.4", + // Port: 3478, + // Routes: []string{ + // "echo-server-cluster", + // }, + // }}, + // Clusters: []stnrv1.ClusterConfig{{ + // Name: "echo-server-cluster", + // Type: "STRICT_DNS", + // MinRelayPort: 1, + // MaxRelayPort: 1, + // Endpoints: []string{ + // "echo-server.l7mp.io", + // }, + // }}, + // }, + // echoServerAddr: "1.2.3.5:5678", + // result: false, + // tester: func(t *testing.T) { + // c := telemetry.ListenerConnsTotal + // assert.Equal(t, 1, testutil.CollectAndCount(c), "ListenerConnsTotal") + // assert.Equal(t, float64(1), testutil.ToFloat64(c.WithLabelValues("udp"))) + + // g := telemetry.ListenerConnsActive + // assert.Equal(t, 1, testutil.CollectAndCount(g), "ListenerConnsTotal") + // assert.Equal(t, float64(1), testutil.ToFloat64(g.WithLabelValues("udp"))) + + // c = telemetry.ListenerPacketsTotal + // assert.Equal(t, 2, testutil.CollectAndCount(c), "ListenerConnsTotal") + // assert.GreaterOrEqual(t, testutil.ToFloat64(c.WithLabelValues("udp", "rx")), float64(500)) // signaling+data + // assert.LessOrEqual(t, testutil.ToFloat64(c.WithLabelValues("udp", "tx")), float64(50)) // just signaling + + // c = telemetry.ListenerBytesTotal + // assert.Equal(t, 2, testutil.CollectAndCount(c), "ListenerConnsTotal") + // assert.GreaterOrEqual(t, testutil.ToFloat64(c.WithLabelValues("udp", "rx")), float64(1000)) // signaling+data + // assert.LessOrEqual(t, testutil.ToFloat64(c.WithLabelValues("udp", "tx")), float64(1000)) // just signaling + + // c = telemetry.ClusterPacketsTotal + // assert.Equal(t, 0, testutil.CollectAndCount(c), "ListenerConnsTotal") + // assert.Equal(t, float64(0), testutil.ToFloat64(c.WithLabelValues("echo-server-cluster", "rx"))) + // assert.Equal(t, float64(0), testutil.ToFloat64(c.WithLabelValues("echo-server-cluster", "tx"))) + + // c = telemetry.ClusterBytesTotal + // assert.Equal(t, 0, testutil.CollectAndCount(c), "ListenerConnsTotal") + // assert.Equal(t, float64(0), testutil.ToFloat64(c.WithLabelValues("echo-server-cluster", "rx"))) + // assert.Equal(t, float64(0), testutil.ToFloat64(c.WithLabelValues("echo-server-cluster", "tx"))) + // }, + // }, +} + +func TestStunnerPortRangeWithVNet(t *testing.T) { + lim := test.TimeOut(time.Second * 60) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test") + + // log rate-limiter settings + LogRateLimit = 2 + LogBurst = 1 + + for _, c := range testPortRangeConfigsWithVNet { + t.Run(c.testName, func(t *testing.T) { + log.Debugf("-------------- Running test: %s -------------", c.testName) + + // patch in the vnet + log.Debug("building virtual network") + v, err := buildVNet(loggerFactory) + assert.NoError(t, err, err) + + log.Debug("setting up the mock DNS") + mockDns := resolver.NewMockResolver(map[string]([]string){ + "stunner.l7mp.io": []string{"1.2.3.4"}, + "echo-server.l7mp.io": []string{"1.2.3.5"}, + "dummy.l7mp.io": []string{"1.2.3.10"}, + }, loggerFactory) + + log.Debug("creating a stunnerd") + stunner := NewStunner(Options{ + LogLevel: stunnerTestLoglevel, + SuppressRollback: true, + DryRun: false, + Resolver: mockDns, + Net: v.podnet, + }) + + log.Debug("starting stunnerd") + assert.NoError(t, stunner.Reconcile(&c.config), "starting server") + + var u, p string + auth := c.config.Auth.Type + switch auth { + case "plaintext", "static": + u = "user1" + p = "passwd1" + case "longterm", "ephemeral": + u, p, err = turn.GenerateLongTermCredentials("my-secret", time.Minute) + assert.NoError(t, err, err) + default: + assert.NoError(t, fmt.Errorf("internal error: unknown auth type in test")) + } + + log.Debug("creating a client") + lconn, err := v.wan.ListenPacket("udp4", "0.0.0.0:0") + assert.NoError(t, err, "cannot create client listening socket") + + testConfig := echoTestConfig{t, v.podnet, v.wan, stunner, + "stunner.l7mp.io:3478", lconn, u, p, net.IPv4(5, 6, 7, 8), + c.echoServerAddr, true, true, c.result, loggerFactory} + stunnerEchoFloodTest(testConfig) + + if c.tester != nil { + c.tester(telemetrytester.New(stunner.telemetry, t)) + } + + assert.NoError(t, lconn.Close(), "cannot close TURN client connection") + stunner.Close() + assert.NoError(t, v.Close(), "cannot close VNet") + }) + } +} + +func stunnerEchoFloodTest(conf echoTestConfig) { + t := conf.t + t.Helper() + log := conf.loggerFactory.NewLogger("test") + + client, err := turn.NewClient(&turn.ClientConfig{ + STUNServerAddr: conf.stunnerAddr, + TURNServerAddr: conf.stunnerAddr, + Username: conf.user, + Password: conf.pass, + Conn: conf.lconn, + Net: conf.wan, + LoggerFactory: conf.loggerFactory, + }) + + assert.NoError(t, err, "cannot create TURN client") + assert.NoError(t, client.Listen(), "cannot listen on TURN client") + defer client.Close() + + log.Debug("sending a binding request") + // reflAddr, err := bindingRequestWithTimeout(client, 10000 * time.Millisecond) + reflAddr, err := client.SendBindingRequest() + if conf.bindSuccess == false { + assert.Error(t, err, "binding request failed") + } else { + assert.NoError(t, err, "binding request ok") + log.Debugf("mapped-address: %v", reflAddr.String()) + udpAddr := reflAddr.(*net.UDPAddr) + + // The mapped-address should have IP address that was assigned to the LAN router. + assert.True(t, udpAddr.IP.Equal(conf.natAddr), "wrong srfx address") + + log.Debug("sending an allocate request") + conn, err := client.Allocate() + if conf.allocateSuccess == false { + assert.Error(t, err, err) + } else { + assert.NoError(t, err, err) + + // log.Debugf("laddr: %s", conn.LocalAddr().String()) + + log.Debugf("creating echo-server listener socket at: %s", conn.LocalAddr().String()) + echoConn, err := conf.podnet.ListenPacket("udp4", conf.echoServerAddr) + assert.NoError(t, err, "creating echo socket") + + // assert.NotNil(t, err, "echo socket not nil") + + go func() { + buf := make([]byte, 1600) + for { + n, from, err2 := echoConn.ReadFrom(buf) + if err2 != nil { + break + } + + // verify the message was received from the relay address + assert.Equal(t, conn.LocalAddr().String(), from.String(), + "message should be received from the relay address") + assert.Equal(t, "Hello", string(buf[:n]), "wrong message payload") + + // echo the data + _, err2 = echoConn.WriteTo(buf[:n], from) + assert.NoError(t, err2, err2) + } + }() + + buf := make([]byte, 1600) + if conf.echoSuccess == true { + for i := 0; i < 500; i++ { + log.Debug("sending \"Hello\"") + _, err = conn.WriteTo([]byte("Hello"), echoConn.LocalAddr()) + assert.NoError(t, err, err) + + n, from, err2 := conn.ReadFrom(buf) + assert.NoError(t, err2, err2) + assert.Equal(t, n, len("Hello"), "message OK") + assert.Equal(t, []byte("Hello"), buf[:n], "message OK") + + // verify the message was received from the relay address + assert.Equal(t, echoConn.LocalAddr().String(), from.String(), + "message should be received from the relay address") + + time.Sleep(2 * time.Millisecond) + } + } else { + // should fail but it does not: client does not get feedback on + // server-side port filtering + for i := 0; i < 500; i++ { + log.Debug("sending \"Hello\"") + _, err = conn.WriteTo([]byte("Hello"), echoConn.LocalAddr()) + assert.NoError(t, err, err) + + // read should time out + assert.NoError(t, conn.SetReadDeadline(time.Now().Add(2*time.Millisecond)), "read deafline") + _, _, err2 := conn.ReadFrom(buf) + assert.Error(t, err2, "deadline exceeded") + } + } + assert.NoError(t, conn.Close(), "cannot close relay connection") + assert.NoError(t, echoConn.Close(), "cannot close echo server connection") + } + } + + time.Sleep(150 * time.Millisecond) + client.Close() +} + /******************************************** * * lifecycle + health check tests @@ -1279,21 +1877,21 @@ func TestStunnerLifecycle(t *testing.T) { assert.Error(t, err, "no default readiness check for empty server") log.Debug("starting stunnerd with an empty stunner config") - conf := v1alpha1.StunnerConfig{ - ApiVersion: v1alpha1.ApiVersion, - Admin: v1alpha1.AdminConfig{LogLevel: stunnerTestLoglevel}, - Auth: v1alpha1.AuthConfig{ + conf := stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{LogLevel: stunnerTestLoglevel}, + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user-1", "password": "pass-1", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{}, + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{}, } log.Debug("reconciling empty server") - err = s.Reconcile(conf) + err = s.Reconcile(&conf) assert.NoError(t, err, "reconcile empty server") status, err := doLivenessCheck("http://127.0.0.1:8086") @@ -1310,7 +1908,7 @@ func TestStunnerLifecycle(t *testing.T) { log.Debug("reconciling server") conf.Admin.HealthCheckEndpoint = c.hcEndpoint - err := s.Reconcile(conf) + err := s.Reconcile(&conf) assert.NoError(t, err, "cannot reconcile") // obtain hc address @@ -1328,7 +1926,7 @@ func TestStunnerLifecycle(t *testing.T) { port := u.Port() if port == "" { - port = strconv.Itoa(v1alpha1.DefaultHealthCheckPort) + port = strconv.Itoa(stnrv1.DefaultHealthCheckPort) } hc := fmt.Sprintf("http://%s:%s", addr, port) @@ -1344,7 +1942,7 @@ func TestStunnerLifecycle(t *testing.T) { // make sure health-check is running h := "0.0.0.0" conf.Admin.HealthCheckEndpoint = &h - assert.NoError(t, s.Reconcile(conf), "cannot reconcile") + assert.NoError(t, s.Reconcile(&conf), "cannot reconcile") status, err = doLivenessCheck("http://127.0.0.1:8086") assert.NoError(t, err, "liveness test before graceful-shutdown: running") @@ -1450,21 +2048,21 @@ func TestStunnerMetrics(t *testing.T) { // assert.False(t, s.IsReady(), "empty server not ready") log.Debug("starting stunnerd with an empty stunner config") - conf := v1alpha1.StunnerConfig{ - ApiVersion: v1alpha1.ApiVersion, - Admin: v1alpha1.AdminConfig{LogLevel: stunnerTestLoglevel}, - Auth: v1alpha1.AuthConfig{ + conf := stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{LogLevel: stunnerTestLoglevel}, + Auth: stnrv1.AuthConfig{ Credentials: map[string]string{ "username": "user-1", "password": "pass-1", }, }, - Listeners: []v1alpha1.ListenerConfig{}, - Clusters: []v1alpha1.ClusterConfig{}, + Listeners: []stnrv1.ListenerConfig{}, + Clusters: []stnrv1.ClusterConfig{}, } log.Debug("reconciling empty server") - err := s.Reconcile(conf) + err := s.Reconcile(&conf) assert.NoError(t, err, "reconcile empty server") assert.True(t, s.IsReady(), "server ready") @@ -1475,7 +2073,7 @@ func TestStunnerMetrics(t *testing.T) { log.Debug("reconciling server") conf.Admin.MetricsEndpoint = c.mcEndpoint - err := s.Reconcile(conf) + err := s.Reconcile(&conf) assert.NoError(t, err, "cannot reconcile") // obtain metric address @@ -1489,7 +2087,7 @@ func TestStunnerMetrics(t *testing.T) { port := u.Port() if port == "" { - port = strconv.Itoa(v1alpha1.DefaultMetricsPort) + port = strconv.Itoa(stnrv1.DefaultMetricsPort) } path := u.EscapedPath() @@ -1530,3 +2128,148 @@ func doLivenessCheck(uri string) (bool, error) { func doReadinessCheck(uri string) (bool, error) { return doHttp(uri + "/ready") } + +// ***************** +// v1alpha1 API compatibility tests +// ***************** +type TestConfigV1Alpha1 struct { + testName string + config []byte + echoServerAddr string + result bool +} + +var testConfigsV1Alpha1 = []TestConfigV1Alpha1{ + { + testName: "open ok", + config: []byte(`{"version":"v1alpha1","admin":{"loglevel":"all:ERROR"},"auth":{"type":"plaintext","credentials":{"password":"passwd1","username":"user1"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster"]}],"clusters":[{"name":"echo-server-cluster","type":"STATIC","endpoints":["1.2.3.5"]}]}`), + echoServerAddr: "1.2.3.5:5678", + result: true, + }, + { + testName: "default cluster type static ok", + config: []byte(`{"version":"v1alpha1","admin":{"loglevel":"all:ERROR"},"auth":{"type":"plaintext","credentials":{"password":"passwd1","username":"user1"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster"]}],"clusters":[{"name":"echo-server-cluster","endpoints":["1.2.3.5"]}]}`), + echoServerAddr: "1.2.3.5:5678", + result: true, + }, + { + testName: "static endpoint ok", + config: []byte(`{"version":"v1alpha1","admin":{"loglevel":"all:ERROR"},"auth":{"type":"plaintext","credentials":{"password":"passwd1","username":"user1"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster"]}],"clusters":[{"name":"echo-server-cluster","type":"STATIC","endpoints":["1.2.3.5"]}]}`), + echoServerAddr: "1.2.3.5:5678", + result: true, + }, + { + testName: "static endpoint with multiple routes ok", + config: []byte(`{"version":"v1alpha1","admin":{"loglevel":"all:ERROR"},"auth":{"type":"plaintext","credentials":{"password":"passwd1","username":"user1"}},"listeners":[{"name":"udp","protocol":"turn-udp","address":"1.2.3.4","port":3478,"routes":["echo-server-cluster","dummy_cluster"]}],"clusters":[{"name":"echo-server-cluster","type":"STATIC","endpoints":["1.2.3.5"]},{"name":"dummy_cluster","type":"STATIC","endpoints":["9.8.7.6"]}]}`), + echoServerAddr: "1.2.3.5:5678", + result: true, + }, + { + testName: "longterm endpoint with multiple routes ok", + config: []byte(`{"version":"v1alpha1","admin":{"loglevel":"all:ERROR"},"auth":{"type":"longterm","credentials":{"secret":"my-secret"}},"listeners":[{"name":"udp","protocol":"turn-udp","public_address":"1.2.3.4","public_port":3478,"address":"127.0.0.1","port":3478,"routes":["allow-any"]}],"clusters":[{"name":"allow-any","endpoints":["0.0.0.0/0"]}]}`), + echoServerAddr: "1.2.3.5:5678", + result: true, + }, +} + +func TestStunnerConfigV1Alpha1(t *testing.T) { + lim := test.TimeOut(time.Second * 60) + defer lim.Stop() + + report := test.CheckRoutines(t) + defer report() + + loggerFactory := logger.NewLoggerFactory(stunnerTestLoglevel) + log := loggerFactory.NewLogger("test") + + for _, c := range testConfigsV1Alpha1 { + t.Run(c.testName, func(t *testing.T) { + log.Debugf("-------------- Running test: %s -------------", c.testName) + + // patch in the vnet + log.Debug("building virtual network") + v, err := buildVNet(loggerFactory) + assert.NoError(t, err, err) + + log.Debug("creating a stunnerd") + stunner := NewStunner(Options{ + LogLevel: stunnerTestLoglevel, + SuppressRollback: true, + Net: v.podnet, + }) + + log.Debug("parsing config to v1alpha1 format") + a := stnrv1a1.StunnerConfig{} + assert.NoError(t, json.Unmarshal(c.config, &a), "parsing config file to v1alpha1 format") + + assert.Equal(t, stnrv1a1.ApiVersion, a.ApiVersion, "version") + assert.Equal(t, "all:ERROR", a.Admin.LogLevel, "loglevel") + // expect the old names + assert.True(t, a.Auth.Type == "plaintext" || a.Auth.Type == "longterm", "loglevel") + assert.Len(t, a.Listeners, 1, "listeners len") + assert.Equal(t, "udp", a.Listeners[0].Name, "listener name") + assert.Equal(t, "turn-udp", a.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, a.Listeners[0].Port, "listener port") + assert.True(t, len(a.Clusters) > 0, "clusters len") + + log.Debug("conveting config to v1 format") + a = stnrv1a1.StunnerConfig{} + assert.NoError(t, json.Unmarshal(c.config, &a), "parsing config file to v1alpha1 format") + config, err := stnrv1a1.ConvertToV1(&a) + assert.NoError(t, err, "convert load v1alpha1 config to v1") + + assert.Equal(t, stnrv1.ApiVersion, config.ApiVersion, "version") + assert.Equal(t, "all:ERROR", config.Admin.LogLevel, "loglevel") + // expect the new names + assert.True(t, config.Auth.Type == "static" || config.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, config.Listeners, 1, "listeners len") + assert.Equal(t, "udp", config.Listeners[0].Name, "listener name") + assert.Equal(t, "turn-udp", config.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, config.Listeners[0].Port, "listener port") + assert.True(t, len(config.Clusters) > 0, "clusters len") + + log.Debug("parsing config directly to v1 format") + config, err = cfgclient.ParseConfig(c.config) + assert.NoError(t, err, "load v1alpha1 config ") + + assert.Equal(t, stnrv1.ApiVersion, config.ApiVersion, "version") + assert.Equal(t, "all:ERROR", config.Admin.LogLevel, "loglevel") + // expect the new names + assert.True(t, config.Auth.Type == "static" || config.Auth.Type == "ephemeral", "loglevel") + assert.Len(t, config.Listeners, 1, "listeners len") + assert.Equal(t, "udp", config.Listeners[0].Name, "listener name") + assert.Equal(t, "turn-udp", config.Listeners[0].Protocol, "listener proto") + assert.Equal(t, 3478, config.Listeners[0].Port, "listener port") + assert.True(t, len(config.Clusters) > 0, "clusters len") + + log.Debug("starting stunnerd") + assert.NoError(t, stunner.Reconcile(config), "starting server") + + var u, p string + auth := config.Auth.Type + switch auth { + case "plaintext", "static": + u = "user1" + p = "passwd1" + case "longterm", "ephemeral": + u, p, err = turn.GenerateLongTermCredentials("my-secret", time.Minute) + assert.NoError(t, err, err) + default: + assert.NoError(t, fmt.Errorf("internal error: unknown auth type in test")) + } + + log.Debug("creating a client") + lconn, err := v.wan.ListenPacket("udp4", "0.0.0.0:0") + assert.NoError(t, err, "cannot create client listening socket") + + testConfig := echoTestConfig{t, v.podnet, v.wan, stunner, + "stunner.l7mp.io:3478", lconn, u, p, net.IPv4(5, 6, 7, 8), + c.echoServerAddr, true, true, c.result, loggerFactory} + stunnerEchoTest(testConfig) + + assert.NoError(t, lconn.Close(), "cannot close TURN client connection") + stunner.Close() + assert.NoError(t, v.Close(), "cannot close VNet") + }) + } +} diff --git a/tools.go b/tools.go new file mode 100644 index 00000000..2940fbb0 --- /dev/null +++ b/tools.go @@ -0,0 +1,7 @@ +//go:build tools + +package stunner + +import ( + _ "github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen" +) diff --git a/turncat.go b/turncat.go index 7f836d3a..7dd9dcce 100644 --- a/turncat.go +++ b/turncat.go @@ -5,16 +5,17 @@ import ( "crypto/tls" "fmt" "net" + "net/url" "os" "strings" "sync" - "github.com/pion/dtls/v2" + "github.com/pion/dtls/v3" "github.com/pion/logging" - "github.com/pion/turn/v2" + "github.com/pion/turn/v4" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) const UDP_PACKET_SIZE = 1500 @@ -34,6 +35,8 @@ type TurncatConfig struct { Realm string // AuthGet specifies the function to generate auth tokens. AuthGen AuthGen + // ServerName is the SNI used for virtual hosting (unless it is an IP address). + ServerName string // InsecureMode controls whether self-signed TLS certificates are accepted by the TURN // client. InsecureMode bool @@ -52,6 +55,7 @@ type Turncat struct { connTrack map[string]*connection // Conntrack table. lock *sync.Mutex // Sync access to the conntrack state. authGen AuthGen // Generate auth tokens. + serverName string insecure bool loggerFactory logging.LoggerFactory log logging.LeveledLogger @@ -78,8 +82,7 @@ func NewTurncat(config *TurncatConfig) (*Turncat, error) { log.Tracef("Resolving TURN server address: %s", config.ServerAddr) server, sErr := ParseUri(config.ServerAddr) if sErr != nil { - return nil, fmt.Errorf("error resolving server address %s: %s", - config.ServerAddr, sErr.Error()) + return nil, fmt.Errorf("error resolving server address %s: %w", config.ServerAddr, sErr) } if server.Address == "" || server.Port == 0 { return nil, fmt.Errorf("error resolving TURN server address %s: empty address (\"%s\") "+ @@ -87,30 +90,32 @@ func NewTurncat(config *TurncatConfig) (*Turncat, error) { } log.Tracef("Resolving listener address: %s", config.ListenerAddr) - listener, lErr := ParseUri(config.ListenerAddr) - if lErr != nil { - return nil, fmt.Errorf("error resolving listener address %s: %s", - config.ListenerAddr, lErr.Error()) + // special case the "-" client address + if config.ListenerAddr == "-" { + config.ListenerAddr = "file://stdin" } - if listener.Port == 0 { - return nil, fmt.Errorf("error resolving listener address %s: invalid port (%d)", - config.ListenerAddr, listener.Port) + listener, lErr := url.Parse(config.ListenerAddr) + if lErr != nil { + return nil, fmt.Errorf("error parsing listener address %q: %w", config.ListenerAddr, lErr) } + listenerProtocol := strings.ToLower(listener.Scheme) log.Tracef("Resolving peer address: %s", config.PeerAddr) - peer, pErr := ParseUri(config.PeerAddr) + peer, pErr := url.Parse(config.PeerAddr) if pErr != nil { - return nil, fmt.Errorf("error resolving peer address %s: %s", - config.PeerAddr, pErr.Error()) + return nil, fmt.Errorf("error parsing peer address %q: %w", config.PeerAddr, pErr) + } + // default to UDP + peerAddress, err := net.ResolveUDPAddr("udp", peer.Host) + if err != nil { + return nil, fmt.Errorf("error resolving peer address %q: %w", config.PeerAddr, err) } - if peer.Address == "" || peer.Port == 0 || !strings.HasPrefix(peer.Protocol, "udp") { - return nil, fmt.Errorf("error resolving peer address %s: invalid protocol (\"%s\"), "+ - "empty address (\"%s\") or invalid port (%d)", config.PeerAddr, - peer.Protocol, peer.Address, peer.Port) + if peerAddress == nil || peerAddress.IP == nil { + return nil, fmt.Errorf("empty IP address in peer URL %q", config.PeerAddr) } if config.Realm == "" { - config.Realm = v1alpha1.DefaultRealm + config.Realm = stnrv1.DefaultRealm } // a global listener connection for the local tunnel endpoint @@ -119,45 +124,57 @@ func NewTurncat(config *TurncatConfig) (*Turncat, error) { var listenerConn interface{} listenerConf := &net.ListenConfig{Control: reuseAddr} - switch listener.Protocol { + var listenerAddress net.Addr + switch listenerProtocol { case "file": listenerConn = util.NewFileConn(os.Stdin) case "udp", "udp4", "udp6", "unixgram", "ip", "ip4", "ip6": - l, err := listenerConf.ListenPacket(context.Background(), listener.Addr.Network(), - listener.Addr.String()) + addr, err := net.ResolveUDPAddr("udp", listener.Host) + if err != nil { + return nil, fmt.Errorf("error resolving listener address %q: %w", config.ListenerAddr, err) + } + + l, err := listenerConf.ListenPacket(context.Background(), addr.Network(), addr.String()) if err != nil { return nil, fmt.Errorf("cannot create listening client packet socket at %s: %s", config.ListenerAddr, err) } + listenerAddress = addr listenerConn = l case "tcp", "tcp4", "tcp6", "unix", "unixpacket": - l, err := listenerConf.Listen(context.Background(), listener.Addr.Network(), listener.Addr.String()) + addr, err := net.ResolveTCPAddr("tcp", listener.Host) + if err != nil { + return nil, fmt.Errorf("error resolving listener address %q: %w", config.ListenerAddr, err) + } + + l, err := listenerConf.Listen(context.Background(), addr.Network(), addr.String()) if err != nil { return nil, fmt.Errorf("cannot create listening client socket at %s: %s", config.ListenerAddr, err) } + listenerAddress = addr listenerConn = l default: - return nil, fmt.Errorf("unknown client protocol %s for client %s", - listener.Addr.Network(), config.ListenerAddr) + return nil, fmt.Errorf("unknown client protocol %s", listenerProtocol) } t := &Turncat{ - listenerAddr: listener.Addr, + listenerAddr: listenerAddress, serverAddr: server.Addr, serverProto: server.Protocol, - peerAddr: peer.Addr, + peerAddr: peerAddress, listenerConn: listenerConn, connTrack: make(map[string]*connection), lock: new(sync.Mutex), realm: config.Realm, authGen: config.AuthGen, + serverName: config.ServerName, insecure: config.InsecureMode, loggerFactory: loggerFactory, log: log, } - switch t.listenerAddr.Network() { + switch listenerProtocol { case "udp", "udp4", "udp6", "unixgram", "ip", "ip4", "ip6": // client connection is a packet conn, write our own Listen/Accept loop for UDP // main loop: for every new packet we create a new connection and connect it back to the client @@ -169,12 +186,13 @@ func NewTurncat(config *TurncatConfig) (*Turncat, error) { // client connection is file go t.runListenFile() default: - t.log.Errorf("internal error: unknown client protocol %s for client %s:%s", - t.listenerAddr.Network(), t.listenerAddr.Network(), t.listenerAddr.String()) + t.log.Errorf("Internal error: unknown client protocol %q for client %s:%s", + listenerAddress.Network(), listenerAddress.Network(), listenerAddress.String()) } - log.Infof("Turncat client listening on %s, TURN server: %s, peer: %s", - config.ListenerAddr, config.ServerAddr, config.PeerAddr) + log.Infof("Client listening on %s, TURN server: %s, peer: %s:%s", + config.ListenerAddr, config.ServerAddr, + peerAddress.Network(), peerAddress.String()) return t, nil } @@ -182,7 +200,7 @@ func NewTurncat(config *TurncatConfig) (*Turncat, error) { // Close terminates all relay connections created via turncat and deletes the relay. Errors in this // phase are not critical and not propagated back to the caller. func (t *Turncat) Close() { - t.log.Info("closing Turncat") + t.log.Info("Closing Turncat") // close all active connections for _, conn := range t.connTrack { @@ -192,72 +210,72 @@ func (t *Turncat) Close() { // close the global listener socket switch t.listenerConn.(type) { case net.Listener: - t.log.Tracef("closing turncat listener connection") + t.log.Tracef("Closing turncat listener connection") l := t.listenerConn.(net.Listener) if err := l.Close(); err != nil { - t.log.Warnf("error closing listener connection: %s", err.Error()) + t.log.Warnf("Error closing listener connection: %s", err.Error()) } case net.PacketConn: - t.log.Tracef("closing turncat packet listener connection") + t.log.Tracef("Closing turncat packet listener connection") l := t.listenerConn.(net.PacketConn) if err := l.Close(); err != nil { - t.log.Warnf("error closing listener packet connection: %s", err.Error()) + t.log.Warnf("Error closing listener packet connection: %s", err.Error()) } case *util.FileConn: // do nothing default: - t.log.Error("internal error: unknown listener socket type") + t.log.Error("Internal error: unknown listener socket type") } } // Generate a new connection by opening a UDP connection to the server func (t *Turncat) newConnection(clientConn net.Conn) (*connection, error) { clientAddr := clientConn.RemoteAddr() - t.log.Debugf("new connection from client %s", clientAddr.String()) + t.log.Debugf("New connection from client %s", clientAddr.String()) conn := new(connection) conn.clientAddr = clientAddr conn.clientConn = clientConn - t.log.Tracef("Setting up TURN client to server %s:%s", - t.serverAddr.Network(), t.serverAddr.String()) + t.log.Tracef("Setting up TURN client to server %s:%s", t.serverAddr.Network(), t.serverAddr.String()) user, passwd, errAuth := t.authGen() if errAuth != nil { - return nil, fmt.Errorf("cannot generate username/password pair for client %s:%s: %s", + return nil, fmt.Errorf("failed to generate username/password pair for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), errAuth) } // connection for the TURN client var turnConn net.PacketConn - switch t.serverProto { - case "udp", "udp4", "udp6", "unixgram", "ip", "ip4", "ip6": + switch strings.ToLower(t.serverProto) { + case "turn-udp": t, err := net.ListenPacket(t.serverAddr.Network(), "0.0.0.0:0") if err != nil { - return nil, fmt.Errorf("cannot allocate TURN listening packet socket for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN listening packet socket for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), err) } turnConn = t - case "tcp", "tcp4", "tcp6", "unix", "unixpacket": + case "turn-tcp": c, err := net.Dial(t.serverAddr.Network(), t.serverAddr.String()) if err != nil { - return nil, fmt.Errorf("cannot allocate TURN socket for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN socket for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), err) } turnConn = turn.NewSTUNConn(c) - case "tls": + case "turn-tls": // cert, err := tls.LoadX509KeyPair(certFile.Name(), keyFile.Name()) // assert.NoError(t, err, "cannot create certificate for TLS client socket") c, err := tls.Dial("tcp", t.serverAddr.String(), &tls.Config{ MinVersion: tls.VersionTLS10, + ServerName: t.serverName, InsecureSkipVerify: t.insecure, }) if err != nil { - return nil, fmt.Errorf("cannot allocate TURN/TLS socket for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN/TLS socket for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), err) } turnConn = turn.NewSTUNConn(c) - case "dtls": + case "turn-dtls": // cert, err := tls.LoadX509KeyPair(certFile.Name(), keyFile.Name()) // assert.NoError(t, err, "cannot create certificate for DTLS client socket") udpAddr, _ := net.ResolveUDPAddr("udp", t.serverAddr.String()) @@ -265,13 +283,13 @@ func (t *Turncat) newConnection(clientConn net.Conn) (*connection, error) { InsecureSkipVerify: t.insecure, }) if err != nil { - return nil, fmt.Errorf("cannot allocate TURN/DTLS socket for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN/DTLS socket for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), err) } turnConn = turn.NewSTUNConn(conn) default: - return nil, fmt.Errorf("unknown TURN server protocol %s for client %s:%s", - t.serverAddr.Network(), clientAddr.Network(), clientAddr.String()) + return nil, fmt.Errorf("unknown TURN server protocol %q for client %s:%s", + t.serverProto, clientAddr.Network(), clientAddr.String()) } turnClient, err := turn.NewClient(&turn.ClientConfig{ @@ -285,7 +303,7 @@ func (t *Turncat) newConnection(clientConn net.Conn) (*connection, error) { }) if err != nil { turnConn.Close() - return nil, fmt.Errorf("cannot allocate TURN client for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN client for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), err) } conn.turnConn = turnConn @@ -293,7 +311,7 @@ func (t *Turncat) newConnection(clientConn net.Conn) (*connection, error) { // Start the TURN client if err = turnClient.Listen(); err != nil { turnConn.Close() - return nil, fmt.Errorf("cannot listen on TURN client: %s", err) + return nil, fmt.Errorf("failed to listen on TURN client: %s", err) } conn.turnClient = turnClient @@ -302,14 +320,14 @@ func (t *Turncat) newConnection(clientConn net.Conn) (*connection, error) { serverConn, serverErr := turnClient.Allocate() if serverErr != nil { turnClient.Close() - return nil, fmt.Errorf("could not allocate new TURN relay transport for client %s:%s: %s", + return nil, fmt.Errorf("failed to allocate TURN relay transport for client %s:%s: %s", clientAddr.Network(), clientAddr.String(), serverErr.Error()) } conn.serverConn = serverConn // The relayConn's local address is actually the transport // address assigned on the TURN server. - t.log.Infof("new connection: client-address=%s, relayed-address=%s", + t.log.Infof("New connection: client-address=%s, relayed-address=%s", clientAddr.String(), conn.serverConn.LocalAddr().String()) return conn, nil @@ -329,14 +347,14 @@ func (t *Turncat) deleteConnection(conn *connection) { delete(t.connTrack, caddr) t.lock.Unlock() - t.log.Infof("closing client connection to %s", caddr) + t.log.Infof("Closing client connection to %s", caddr) if err := conn.clientConn.Close(); err != nil { - t.log.Warnf("error closing client connection for %s:%s: %s", + t.log.Warnf("Error closing client connection for %s:%s: %s", conn.clientAddr.Network(), conn.clientAddr.String(), err.Error()) } if err := conn.serverConn.Close(); err != nil { - t.log.Warnf("error closing relayed TURN server connection for %s:%s: %s", + t.log.Warnf("Error closing relayed TURN server connection for %s:%s: %s", conn.clientAddr.Network(), conn.clientAddr.String(), err.Error()) } @@ -353,7 +371,7 @@ func (t *Turncat) runConnection(conn *connection) { n, peerAddr, readErr := conn.serverConn.ReadFrom(buffer[0:]) if readErr != nil { if !util.IsClosedErr(readErr) { - t.log.Debugf("cannot read from TURN relay connection for client %s:%s: %s", + t.log.Debugf("Cannot read from TURN relay connection for client %s:%s: %s", conn.clientAddr.Network(), conn.clientAddr.String(), readErr.Error()) t.deleteConnection(conn) } @@ -362,7 +380,7 @@ func (t *Turncat) runConnection(conn *connection) { // TODO: not sure if this is the recommended way to compare net.Addrs if peerAddr.Network() != t.peerAddr.Network() || peerAddr.String() != t.peerAddr.String() { - t.log.Debugf("received packet of %d bytes from unknown peer %s:%s (expected: "+ + t.log.Debugf("Received packet of %d bytes from unknown peer %s:%s (expected: "+ "%s:%s) on TURN relay connection for client %s:%s: ignoring", n, peerAddr.Network(), peerAddr.String(), t.peerAddr.Network(), t.peerAddr.String(), @@ -370,12 +388,12 @@ func (t *Turncat) runConnection(conn *connection) { continue } - t.log.Tracef("forwarding packet of %d bytes from peer %s:%s on TURN relay connection "+ + t.log.Tracef("Forwarding packet of %d bytes from peer %s:%s on TURN relay connection "+ "for client %s:%s", n, peerAddr.Network(), peerAddr.String(), conn.clientAddr.Network(), conn.clientAddr.String()) if _, writeErr := conn.clientConn.Write(buffer[0:n]); writeErr != nil { - t.log.Debugf("cannot write to client connection for client %s:%s: %s", + t.log.Debugf("Cannot write to client connection for client %s:%s: %s", conn.clientAddr.Network(), conn.clientAddr.String(), writeErr.Error()) t.deleteConnection(conn) return @@ -390,19 +408,19 @@ func (t *Turncat) runConnection(conn *connection) { n, readErr := conn.clientConn.Read(buffer[0:]) if readErr != nil { if !util.IsClosedErr(readErr) { - t.log.Debugf("cannot read from client connection for client %s:%s (likely hamrless): %s", + t.log.Debugf("Cannot read from client connection for client %s:%s (likely hamrless): %s", conn.clientAddr.Network(), conn.clientAddr.String(), readErr.Error()) t.deleteConnection(conn) } return } - t.log.Tracef("forwarding packet of %d bytes from client %s:%s to peer %s:%s on TURN relay connection", + t.log.Tracef("Forwarding packet of %d bytes from client %s:%s to peer %s:%s on TURN relay connection", n, conn.clientAddr.Network(), conn.clientAddr.String(), t.peerAddr.Network(), t.peerAddr.String()) if _, writeErr := conn.serverConn.WriteTo(buffer[0:n], t.peerAddr); writeErr != nil { - t.log.Debugf("cannot write to TURN relay connection for client %s (likely hamrless): %s", + t.log.Debugf("Cannot write to TURN relay connection for client %s (likely harmless): %s", conn.clientAddr.String(), writeErr.Error()) t.deleteConnection(conn) return @@ -414,7 +432,7 @@ func (t *Turncat) runConnection(conn *connection) { func (t *Turncat) runListenPacket() { listenerConn, ok := t.listenerConn.(net.PacketConn) if !ok { - t.log.Error("cannot listen on client connection: expected net.PacketConn") + t.log.Error("Cannot listen on client connection: expected net.PacketConn") // terminate go routine return } @@ -424,7 +442,7 @@ func (t *Turncat) runListenPacket() { n, clientAddr, err := listenerConn.ReadFrom(buffer[0:]) if err != nil { if !util.IsClosedErr(err) { - t.log.Warnf("cannot read from listener connection: %s", err.Error()) + t.log.Warnf("Cannot read from listener connection: %s", err.Error()) } return } @@ -434,15 +452,15 @@ func (t *Turncat) runListenPacket() { caddr := fmt.Sprintf("%s:%s", clientAddr.Network(), clientAddr.String()) trackConn, found := t.connTrack[caddr] if !found { - t.log.Tracef("new client connection: read initial packet of %d bytes on listener"+ + t.log.Tracef("New client connection: read initial packet of %d bytes on listener"+ "connnection from client %s", n, caddr) // create per-client connection, connect back to client, then call runConnection - t.log.Tracef("connnecting back to client %s", caddr) + t.log.Tracef("Connnecting back to client %s", caddr) dialer := &net.Dialer{LocalAddr: t.listenerAddr, Control: reuseAddr} clientConn, clientErr := dialer.Dial(clientAddr.Network(), clientAddr.String()) if clientErr != nil { - t.log.Warnf("cannot connect back to client %s:%s: %s", + t.log.Warnf("Cannot connect back to client %s:%s: %s", clientAddr.Network(), clientAddr.String(), clientErr.Error()) continue } @@ -450,8 +468,7 @@ func (t *Turncat) runListenPacket() { conn, err := t.newConnection(clientConn) if err != nil { t.lock.Unlock() - t.log.Warnf("relay setup failed for client %s, dropping client connection", - caddr) + t.log.Warnf("Relay setup failed for client %s: %s", caddr, err.Error()) continue } @@ -464,7 +481,7 @@ func (t *Turncat) runListenPacket() { // and send the packet out if _, err := conn.serverConn.WriteTo(buffer[0:n], t.peerAddr); err != nil { - t.log.Warnf("cannot write initial packet to TURN relay connection for client %s: %s", + t.log.Warnf("Cannot write initial packet to TURN relay connection for client %s: %s", caddr, err.Error()) t.deleteConnection(conn) continue @@ -475,7 +492,7 @@ func (t *Turncat) runListenPacket() { // packets are left stuck in the global listener socket t.lock.Unlock() - t.log.Debugf("received packet from a known client %s on the global listener connection, sender too fast?", + t.log.Debugf("Received packet from a known client %s on the global listener connection, sender too fast?", caddr) // send out anyway if _, err := trackConn.serverConn.WriteTo(buffer[0:n], t.peerAddr); err != nil { diff --git a/turncat_test.go b/turncat_test.go index 7b59e29b..f776522a 100644 --- a/turncat_test.go +++ b/turncat_test.go @@ -3,23 +3,23 @@ package stunner import ( "fmt" "net" + "net/url" "strings" "testing" "time" "github.com/pion/logging" - "github.com/pion/transport/test" - "github.com/pion/turn/v2" + "github.com/pion/transport/v3/test" + "github.com/pion/turn/v4" "github.com/stretchr/testify/assert" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" "github.com/l7mp/stunner/pkg/logger" - - "github.com/l7mp/stunner/pkg/apis/v1alpha1" ) var turncatTestLoglevel string = "all:ERROR" -//var turncatTestLoglevel string = "all:TRACE" +// var turncatTestLoglevel string = "all:TRACE" var sharedSecret = "my-secret" var defaultDuration = "10m" @@ -38,7 +38,7 @@ type turncatEchoTestConfig struct { // client lconn net.Conn // peer - peer *StunnerUri + peer net.Addr loggerFactory logging.LoggerFactory } @@ -46,8 +46,7 @@ func turncatEchoTest(conf turncatEchoTestConfig) { t := conf.t log := conf.loggerFactory.NewLogger("test") - peerAddr := fmt.Sprintf("%s:%d", conf.peer.Address, conf.peer.Port) - echoConn, err := net.ListenPacket("udp4", peerAddr) + echoConn, err := net.ListenPacket(conf.peer.Network(), conf.peer.String()) assert.NoError(t, err, "cannot allocate echo server connection") go func() { @@ -112,32 +111,33 @@ func TestTurncatPlaintext(t *testing.T) { SuppressRollback: true, }) - err := stunner.Reconcile(v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ - LogLevel: turncatTestLoglevel, + err := stunner.Reconcile(&stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ + LogLevel: turncatTestLoglevel, + MetricsEndpoint: "", }, - Auth: v1alpha1.AuthConfig{ + Auth: stnrv1.AuthConfig{ Type: "plaintext", Credentials: map[string]string{ "username": "user1", "password": "passwd1", }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-listener-23478", - Protocol: "udp", + Protocol: "turn-udp", Addr: "127.0.0.1", Port: 23478, Routes: []string{"allow-any"}, }, { Name: "tcp-listener-23478", - Protocol: "tcp", + Protocol: "turn-tcp", Addr: "127.0.0.1", Port: 23478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -178,35 +178,32 @@ func TestTurncatPlaintext(t *testing.T) { } for _, c := range testTurncatConfigs { - listener, err := ParseUri(c.ListenerAddr) + listener, err := url.Parse(c.ListenerAddr) assert.NoError(t, err, "cannot parse turncat listener URI") server, err := ParseUri(c.ServerAddr) assert.NoError(t, err, "cannot parse server URI") testName := fmt.Sprintf("TestTurncat_NewTurncat_Plaintext_client:%s_server:%s", - listener.Protocol, server.Protocol) + listener.Scheme, server.Protocol) t.Run(testName, func(t *testing.T) { log.Debugf("-------------- Running test: %s -------------", testName) - peer, err := ParseUri(c.PeerAddr) - assert.NoError(t, err, "cannot parse peer URI") log.Debug("creating turncat relay") turncat, err := NewTurncat(&c) assert.NoError(t, err, "cannot create turncat relay") - lconn, err := net.Dial(listener.Protocol, - fmt.Sprintf("%s:%d", listener.Address, listener.Port)) + lconn, err := net.Dial(turncat.listenerAddr.Network(), turncat.listenerAddr.String()) assert.NoError(t, err, "cannot create client socket") - if strings.HasPrefix(listener.Protocol, "tcp") { + if strings.HasPrefix(turncat.listenerAddr.Network(), "tcp") { // prevent "addess already in use" errors: close sends RST assert.NoError(t, lconn.(*net.TCPConn).SetLinger(0), "cannnot set TCP linger") } - testConfig := turncatEchoTestConfig{t, stunner, lconn, peer, logger} + testConfig := turncatEchoTestConfig{t, stunner, lconn, turncat.peerAddr, logger} turncatEchoTest(testConfig) turncat.Close() @@ -230,31 +227,31 @@ func TestTurncatLongterm(t *testing.T) { LogLevel: turncatTestLoglevel, SuppressRollback: true, }) - err := stunner.Reconcile(v1alpha1.StunnerConfig{ - ApiVersion: "v1alpha1", - Admin: v1alpha1.AdminConfig{ + err := stunner.Reconcile(&stnrv1.StunnerConfig{ + ApiVersion: stnrv1.ApiVersion, + Admin: stnrv1.AdminConfig{ LogLevel: turncatTestLoglevel, }, - Auth: v1alpha1.AuthConfig{ - Type: "longterm", + Auth: stnrv1.AuthConfig{ + Type: "ephemeral", Credentials: map[string]string{ "secret": sharedSecret, }, }, - Listeners: []v1alpha1.ListenerConfig{{ + Listeners: []stnrv1.ListenerConfig{{ Name: "udp-listener-23478", - Protocol: "udp", + Protocol: "turn-udp", Addr: "127.0.0.1", Port: 23478, Routes: []string{"allow-any"}, }, { Name: "tcp-listener-23478", - Protocol: "tcp", + Protocol: "turn-tcp", Addr: "127.0.0.1", Port: 23478, Routes: []string{"allow-any"}, }}, - Clusters: []v1alpha1.ClusterConfig{{ + Clusters: []stnrv1.ClusterConfig{{ Name: "allow-any", Endpoints: []string{"0.0.0.0/0"}, }}, @@ -295,35 +292,32 @@ func TestTurncatLongterm(t *testing.T) { } for _, c := range testTurncatConfigs { - listener, err := ParseUri(c.ListenerAddr) + listener, err := url.Parse(c.ListenerAddr) assert.NoError(t, err, "cannot parse turncat listener URI") server, err := ParseUri(c.ServerAddr) assert.NoError(t, err, "cannot parse server URI") - testName := fmt.Sprintf("TestTurncat_NewTurncat_Longterm_client:%s_server:%s", - listener.Protocol, server.Protocol) + testName := fmt.Sprintf("TestTurncat_NewTurncat_Ephemeral_client:%s_server:%s", + listener.Scheme, server.Protocol) t.Run(testName, func(t *testing.T) { log.Debugf("-------------- Running test: %s -------------", testName) - peer, err := ParseUri(c.PeerAddr) - assert.NoError(t, err, "cannot parse peer URI") log.Debug("creating turncat relay") turncat, err := NewTurncat(&c) assert.NoError(t, err, "cannot create turncat relay") - lconn, err := net.Dial(listener.Protocol, - fmt.Sprintf("%s:%d", listener.Address, listener.Port)) + lconn, err := net.Dial(turncat.listenerAddr.Network(), turncat.listenerAddr.String()) assert.NoError(t, err, "cannot create client socket") - if strings.HasPrefix(listener.Protocol, "tcp") { + if strings.HasPrefix(turncat.listenerAddr.Network(), "tcp") { // prevent "addess already in use" errors: close sends RST assert.NoError(t, lconn.(*net.TCPConn).SetLinger(0), "cannnot set TCP linger") } - testConfig := turncatEchoTestConfig{t, stunner, lconn, peer, logger} + testConfig := turncatEchoTestConfig{t, stunner, lconn, turncat.peerAddr, logger} turncatEchoTest(testConfig) turncat.Close() diff --git a/uri.go b/uri.go index f61bed00..3258419e 100644 --- a/uri.go +++ b/uri.go @@ -7,10 +7,9 @@ import ( "os" "strconv" "strings" - "syscall" "github.com/l7mp/stunner/internal/util" - "github.com/l7mp/stunner/pkg/apis/v1alpha1" + stnrv1 "github.com/l7mp/stunner/pkg/apis/v1" ) // StunnerUri is the specification of a STUNner listener URI @@ -35,7 +34,7 @@ func ParseUri(uri string) (*StunnerUri, error) { u, err := url.Parse(uri) if err != nil { - return nil, fmt.Errorf("Invalid URI '%s': %s", uri, err) + return nil, fmt.Errorf("invalid URI '%s': %s", uri, err) } s.Address = u.Hostname() @@ -45,28 +44,20 @@ func ParseUri(uri string) (*StunnerUri, error) { s.Password = password } - proto := strings.ToLower(u.Scheme) - if proto == "turn" { - q := u.Query() - if len(q["transport"]) > 0 { - proto = strings.ToLower(q["transport"][0]) - } else { - proto = "udp" - } + proto, err := getStunnerProtoForURI(u) + if err != nil { + return nil, err } s.Protocol = proto - port, _ := strconv.Atoi(u.Port()) - s.Port = port - - switch proto { - case "udp", "udp4", "udp6", "dtls": + switch strings.ToLower(proto) { + case "udp", "udp4", "udp6", "dtls", "turn-udp", "turn-dtls": a, err := net.ResolveUDPAddr("udp", s.Address+":"+u.Port()) if err != nil { return nil, err } s.Addr = a - case "tcp", "tcp4", "tcp6", "tls": + case "tcp", "tcp4", "tcp6", "tls", "turn-tcp", "turn-tls": a, err := net.ResolveTCPAddr("tcp", s.Address+":"+u.Port()) if err != nil { return nil, err @@ -88,50 +79,47 @@ func ParseUri(uri string) (*StunnerUri, error) { return nil, fmt.Errorf("invalid protocol: %s", proto) } - return &s, nil -} + defaultPort := 3478 + if strings.ToLower(proto) == "turn-tls" || strings.ToLower(proto) == "turn-dtls" { + defaultPort = 443 + } -// GetUriFromListener returns a standard TURN URI from a listener config -func GetUriFromListener(req *v1alpha1.ListenerConfig) (string, error) { - proto, err := v1alpha1.NewListenerProtocol(req.Protocol) + port, err := strconv.Atoi(u.Port()) if err != nil { - return "", err + port = defaultPort } + s.Port = port - service, protocol := "", "" - switch proto { - case v1alpha1.ListenerProtocolUDP: - service = "turn" - protocol = "udp" - case v1alpha1.ListenerProtocolTCP: - service = "turn" - protocol = "tcp" - case v1alpha1.ListenerProtocolDTLS: - service = "turns" - protocol = "udp" - case v1alpha1.ListenerProtocolTLS: - service = "turns" - protocol = "tcp" - } + return &s, nil +} - addr := req.PublicAddr - if addr == "" { - // fallback to server addr - addr = req.Addr +func (u *StunnerUri) String() string { + req := stnrv1.ListenerConfig{ + Protocol: u.Protocol, + PublicAddr: u.Address, + PublicPort: u.Port, } - port := req.PublicPort - if port == 0 { - // fallback to server addr - port = req.Port + uri, err := GetStandardURLFromListener(&req) + if err != nil { + return "" } - uri := fmt.Sprintf("%s:%s:%d?transport=%s", service, addr, port, protocol) - return uri, nil + return uri +} + +// GetUriFromListener returns a standard TURN URI as per RFC7065from a listener config. +func GetUriFromListener(req *stnrv1.ListenerConfig) (string, error) { + return req.GetListenerURI(true) +} + +// GetStandardURLFromListener returns a standard URL (that can be parsed using net/url) from a listener config. +func GetStandardURLFromListener(req *stnrv1.ListenerConfig) (string, error) { + return req.GetListenerURI(false) } // GetUriFromListener returns a standard TURN URI from a listener config -func GetTurnUris(req *v1alpha1.StunnerConfig) ([]string, error) { +func GetTurnUris(req *stnrv1.StunnerConfig) ([]string, error) { ret := []string{} for i := range req.Listeners { uri, err := GetUriFromListener(&req.Listeners[i]) @@ -144,9 +132,36 @@ func GetTurnUris(req *v1alpha1.StunnerConfig) ([]string, error) { return ret, nil } -func reuseAddr(network, address string, conn syscall.RawConn) error { - return conn.Control(func(descriptor uintptr) { - _ = syscall.SetsockoptInt(int(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1) - // syscall.SetsockoptInt(int(descriptor), syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1) - }) +func getStunnerProtoForURI(u *url.URL) (string, error) { + scheme := strings.ToLower(u.Scheme) + if scheme == "" { + scheme = "turn" + } + + proto := "udp" + q := u.Query() + if len(q["transport"]) > 0 { + proto = strings.ToLower(q["transport"][0]) + } + + // fully specified protocol names (ignore "turns" scheme for compatibility) + switch proto { + case "tls": + return "TURN-TLS", nil + case "dtls": + return "TURN-DTLS", nil + } + + // using RFC7065 compatible URIs + if scheme == "turn" && proto == "udp" { + return "TURN-UDP", nil + } else if scheme == "turn" && proto == "tcp" { + return "TURN-TCP", nil + } else if scheme == "turns" && proto == "udp" { + return "TURN-DTLS", nil + } else if scheme == "turns" && proto == "tcp" { + return "TURN-TLS", nil + } + + return "", fmt.Errorf("invalid scheme/protocol in URI %q", u.String()) }