From c680f5a2b36a98029b2b20aed9def302f7a78f1e Mon Sep 17 00:00:00 2001 From: Binbin Li Date: Tue, 30 Apr 2024 12:07:23 +0000 Subject: [PATCH] test: add e2e tests for multi-tenancy --- Makefile | 8 +- internal/context/utils.go | 8 +- internal/context/utils_test.go | 2 +- internal/logger/logger.go | 2 + .../samples/constraint.yaml | 11 ++ .../multi-tenancy-validation/template.yaml | 48 ++++++++ test/bats/azure-test.bats | 40 +++---- test/bats/base-test.bats | 107 ++++++++++-------- test/bats/high-availability.bats | 4 +- test/bats/plugin-test.bats | 52 +++++++-- 10 files changed, 192 insertions(+), 90 deletions(-) create mode 100644 library/multi-tenancy-validation/samples/constraint.yaml create mode 100644 library/multi-tenancy-validation/template.yaml diff --git a/Makefile b/Makefile index 70366ad1c..dbf711cd6 100644 --- a/Makefile +++ b/Makefile @@ -125,13 +125,13 @@ delete-ratify: .PHONY: deploy-demo-constraints deploy-demo-constraints: - kubectl apply -f ./library/default/template.yaml - kubectl apply -f ./library/default/samples/constraint.yaml + kubectl apply -f ./library/multi-tenancy-validation/template.yaml + kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml .PHONY: delete-demo-constraints delete-demo-constraints: - kubectl delete -f ./library/default/template.yaml - kubectl delete -f ./library/default/samples/constraint.yaml + kubectl delete -f ./library/multi-tenancy-validation/template.yaml + kubectl delete -f ./library/multi-tenancy-validation/samples/constraint.yaml .PHONY: deploy-rego-policy deploy-rego-policy: diff --git a/internal/context/utils.go b/internal/context/utils.go index 39c8ae5b7..1f77a87c0 100644 --- a/internal/context/utils.go +++ b/internal/context/utils.go @@ -22,16 +22,16 @@ import ( type contextKey string -const contextKeyNamespace = contextKey("namespace") +const ContextKeyNamespace = contextKey("namespace") // SetContextWithNamespace embeds namespace to the context. func SetContextWithNamespace(ctx context.Context, namespace string) context.Context { - return context.WithValue(ctx, contextKeyNamespace, namespace) + return context.WithValue(ctx, ContextKeyNamespace, namespace) } // GetNamespace returns the embedded namespace from the context. func GetNamespace(ctx context.Context) string { - namespace := ctx.Value(contextKeyNamespace) + namespace := ctx.Value(ContextKeyNamespace) if namespace == nil { return "" } @@ -40,7 +40,7 @@ func GetNamespace(ctx context.Context) string { // CreateCacheKey creates a new cache key prefixed with embedded namespace. func CreateCacheKey(ctx context.Context, key string) string { - namespace := ctx.Value(contextKeyNamespace) + namespace := ctx.Value(ContextKeyNamespace) if namespace == nil { return key } diff --git a/internal/context/utils_test.go b/internal/context/utils_test.go index 1b2d94a0a..8a52fafab 100644 --- a/internal/context/utils_test.go +++ b/internal/context/utils_test.go @@ -28,7 +28,7 @@ const ( func TestSetContext(t *testing.T) { ctx := context.Background() ctx = SetContextWithNamespace(ctx, testNamespace) - namespace := ctx.Value(contextKeyNamespace).(string) + namespace := ctx.Value(ContextKeyNamespace).(string) if namespace != testNamespace { t.Fatalf("expected namespace %s, got %s", testNamespace, namespace) } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 83b76c1fe..b833fa275 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -23,6 +23,7 @@ import ( logstash "github.com/bshuster-repo/logrus-logstash-hook" re "github.com/deislabs/ratify/errors" + icontext "github.com/deislabs/ratify/internal/context" dcontext "github.com/docker/distribution/context" "github.com/google/uuid" "github.com/sirupsen/logrus" @@ -93,6 +94,7 @@ func InitContext(ctx context.Context, r *http.Request) context.Context { // GetLogger returns a logger with provided values. func GetLogger(ctx context.Context, opt Option) dcontext.Logger { + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, icontext.ContextKeyNamespace)) ctx = context.WithValue(ctx, ContextKeyComponentType, opt.ComponentType) return dcontext.GetLogger(ctx, ContextKeyComponentType) } diff --git a/library/multi-tenancy-validation/samples/constraint.yaml b/library/multi-tenancy-validation/samples/constraint.yaml new file mode 100644 index 000000000..cb483c322 --- /dev/null +++ b/library/multi-tenancy-validation/samples/constraint.yaml @@ -0,0 +1,11 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: RatifyVerification +metadata: + name: ratify-constraint +spec: + enforcementAction: deny + match: + kinds: + - apiGroups: [""] + kinds: ["Pod"] + namespaces: ["default", "new-namespace"] diff --git a/library/multi-tenancy-validation/template.yaml b/library/multi-tenancy-validation/template.yaml new file mode 100644 index 000000000..425de64fd --- /dev/null +++ b/library/multi-tenancy-validation/template.yaml @@ -0,0 +1,48 @@ +apiVersion: templates.gatekeeper.sh/v1beta1 +kind: ConstraintTemplate +metadata: + name: ratifyverification +spec: + crd: + spec: + names: + kind: RatifyVerification + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package ratifyverification + + # Get data from Ratify + remote_data := response { + images := [img | img = concat("", ["[",input.review.object.metadata.namespace,"]",input.review.object.spec.containers[_].image])] + images_init := [img | img = concat("", ["[",input.review.object.metadata.namespace,"]",input.review.object.spec.initContainers[_].image])] + images_ephemeral := [img | img = concat("", ["[",input.review.object.metadata.namespace,"]",input.review.object.spec.ephemeralContainers[_].image])] + other_images := array.concat(images_init, images_ephemeral) + all_images := array.concat(other_images, images) + response := external_data({"provider": "ratify-provider", "keys": all_images}) + } + + # Base Gatekeeper violation + violation[{"msg": msg}] { + general_violation[{"result": msg}] + } + + # Check if there are any system errors + general_violation[{"result": result}] { + err := remote_data.system_error + err != "" + result := sprintf("System error calling external data provider: %s", [err]) + } + + # Check if there are errors for any of the images + general_violation[{"result": result}] { + count(remote_data.errors) > 0 + result := sprintf("Error validating one or more images: %s", remote_data.errors) + } + + # Check if the success criteria is true + general_violation[{"result": result}] { + subject_validation := remote_data.responses[_] + subject_validation[1].isSuccess == false + result := sprintf("Subject failed verification: %s", [subject_validation[0]]) + } diff --git a/test/bats/azure-test.bats b/test/bats/azure-test.bats index 1a4f94169..2ef2b4b7d 100644 --- a/test/bats/azure-test.bats +++ b/test/bats/azure-test.bats @@ -54,9 +54,9 @@ SLEEP_TIME=1 } # configure the default template/constraint - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success # verify that the image can be run with a root cert, root verification cert should have been configured on deployment @@ -87,10 +87,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo1 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run demo --namespace default --image=${TEST_REGISTRY}/notation:signed @@ -106,10 +106,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod cosign-demo2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl apply -f ./test/bats/tests/config/config_v1beta1_verifier_cosign_akv.yaml @@ -130,10 +130,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete verifiers.config.ratify.deislabs.io/verifier-license-checker --namespace default --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -156,10 +156,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod sbom2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -186,10 +186,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod schemavalidator2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -216,10 +216,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod all-in-one --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -262,23 +262,23 @@ SLEEP_TIME=1 @test "configmap update test" { skip "Skipping test for now as we are no longer watching for configfile update in a K8s environment.This test ensures we are watching config file updates in a non-kub scenario" - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run demo2 --image=${TEST_REGISTRY}/notation:signed assert_success run kubectl get configmaps ratify-configuration --namespace=gatekeeper-system -o yaml >currentConfig.yaml - run kubectl delete -f ./library/default/samples/constraint.yaml + run kubectl delete -f ./library/multi-tenancy-validation/samples/constraint.yaml wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl replace --namespace=gatekeeper-system -f ${BATS_TESTS_DIR}/configmap/invalidconfigmap.yaml" echo "Waiting for 150 second for configuration update" sleep 150 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success run kubectl run demo3 --image=${TEST_REGISTRY}/notation:signed echo "Current time after validate : $(date +"%T")" @@ -307,10 +307,10 @@ SLEEP_TIME=1 echo "cleaning up" wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod mutate-demo --namespace default --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run mutate-demo --namespace default --image=${TEST_REGISTRY}/notation:signed diff --git a/test/bats/base-test.bats b/test/bats/base-test.bats index 8b40dca23..204c10f7c 100644 --- a/test/bats/base-test.bats +++ b/test/bats/base-test.bats @@ -28,10 +28,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod initcontainer-pod --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod initcontainer-pod1 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 # validate key management provider status property shows success @@ -45,7 +45,7 @@ RATIFY_NAMESPACE=gatekeeper-system # validate initContainers image run kubectl apply -f ./test/testdata/pod_initContainers_signed.yaml --namespace default assert_success - + run kubectl apply -f ./test/testdata/pod_initContainers_unsigned.yaml --namespace default assert_failure @@ -79,10 +79,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo1 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -97,47 +97,56 @@ RATIFY_NAMESPACE=gatekeeper-system } @test "notation test with certs across namespace" { - skip "cluster-wide verifiers cannot access KMPs in specific namespace, need to add another test for namespaced verifiers accessing namespaced KMPs once we support multi-tenancy" teardown() { echo "cleaning up" wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo1 --namespace default --force --ignore-not-found=true' - + # restore cert store in ratify namespace - run bash -c "kubectl get namespacedkeymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 -o yaml -n default > kmprovider.yaml" - run kubectl delete namespacedkeymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 -n default - sed 's/default/gatekeeper-system/' kmprovider.yaml > kmproviderNewNS.yaml - run kubectl apply -f kmproviderNewNS.yaml + run kubectl apply -f clusterkmprovider.yaml assert_success # restore the original notation verifier for other tests wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_notation.yaml' + + # delete new namespace + run kubectl delete namespace new-namespace + assert_success } - run kubectl apply -f ./library/default/template.yaml + + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml - + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 - - # apply the key management provider to default namespace - run bash -c "kubectl get namespacedkeymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 -o yaml -n ${RATIFY_NAMESPACE} > kmprovider.yaml" + + # create a new namespace. + run kubectl create namespace new-namespace + assert_success + sleep 3 + + # apply the key management provider to new namespace + run bash -c "kubectl get keymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 -o yaml > clusterkmprovider.yaml" assert_success - sed 's/gatekeeper-system/default/' kmprovider.yaml > kmproviderNewNS.yaml - run kubectl apply -f kmproviderNewNS.yaml + sed 's/KeyManagementProvider/NamespacedKeyManagementProvider/' clusterkmprovider.yaml > namespacedkmprovider.yaml + run kubectl apply -f namespacedkmprovider.yaml -n new-namespace assert_success - run kubectl delete namespacedkeymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 -n ${RATIFY_NAMESPACE} + + # delete the cluster-wide key management provider + run kubectl delete keymanagementproviders.config.ratify.deislabs.io/ratify-notation-inline-cert-0 assert_success - - # configure the notation verifier to use inline certificate store with specific namespace - run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_notation_specificnskmprovider.yaml + + # configure the notation verifier to use inline certificate store in new namespace. + sed 's/default\//new-namespace\//' ./config/samples/clustered/verifier/config_v1beta1_verifier_notation_specificnskmprovider.yaml > verifier-new-namespace.yaml + run kubectl apply -f verifier-new-namespace.yaml assert_success + sleep 3 - run kubectl run demo --namespace default --image=registry:5000/notation:signed + run kubectl run demo --namespace new-namespace --image=registry:5000/notation:signed assert_success - run kubectl run demo1 --namespace default --image=registry:5000/notation:unsigned + run kubectl run demo1 --namespace new-namespace --image=registry:5000/notation:unsigned assert_failure } @@ -147,10 +156,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod cosign-demo-key --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod cosign-demo-unsigned --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -172,10 +181,10 @@ RATIFY_NAMESPACE=gatekeeper-system run kubectl replace -f ./config/samples/clustered/verifier/config_v1beta1_verifier_cosign_legacy.yaml sleep 5 - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -243,7 +252,7 @@ RATIFY_NAMESPACE=gatekeeper-system assert_success # apply a invalid verifier CR, validate status with error - sed 's/licensechecker/invalidlicensechecker/' ./config/samples/clustered/verifier/config_v1beta1_verifier_complete_licensechecker.yaml > invalidVerifier.yaml + sed 's/licensechecker/invalidlicensechecker/' ./config/samples/clustered/verifier/config_v1beta1_verifier_complete_licensechecker.yaml >invalidVerifier.yaml run kubectl apply -f invalidVerifier.yaml assert_success run bash -c "kubectl describe verifiers.config.ratify.deislabs.io/verifier-license-checker -n ${RATIFY_NAMESPACE} | grep 'Brieferror: Original Error:'" @@ -265,7 +274,7 @@ RATIFY_NAMESPACE=gatekeeper-system } # apply a invalid verifier CR, validate status with error - sed 's/:v1/:invalid/' ./config/samples/clustered/store/config_v1beta1_store_dynamic.yaml > invalidstore.yaml + sed 's/:v1/:invalid/' ./config/samples/clustered/store/config_v1beta1_store_dynamic.yaml >invalidstore.yaml run kubectl apply -f invalidstore.yaml assert_success # wait for download of image @@ -276,23 +285,23 @@ RATIFY_NAMESPACE=gatekeeper-system @test "configmap update test" { skip "Skipping test for now as we are no longer watching for configfile update in a K8s environment. This test ensures we are watching config file updates in a non-kub scenario" - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run demo2 --image=registry:5000/notation:signed assert_success run kubectl get configmaps ratify-configuration --namespace=${RATIFY_NAMESPACE} -o yaml >currentConfig.yaml - run kubectl delete -f ./library/default/samples/constraint.yaml + run kubectl delete -f ./library/multi-tenancy-validation/samples/constraint.yaml wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl replace --namespace=${RATIFY_NAMESPACE} -f ${BATS_TESTS_DIR}/configmap/invalidconfigmap.yaml" echo "Waiting for 150 second for configuration update" sleep 150 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success run kubectl run demo3 --image=registry:5000/notation:signed echo "Current time after validate : $(date +"%T")" @@ -307,10 +316,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod mutate-demo --namespace default --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run mutate-demo --namespace default --image=registry:5000/notation:signed @@ -372,9 +381,9 @@ RATIFY_NAMESPACE=gatekeeper-system } # configure the default template/constraint - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success # verify that the image cannot be run due to an invalid cert @@ -408,10 +417,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo1 --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete certificatestores.config.ratify.deislabs.io/ratify-notation-inline-cert-0 --namespace ${RATIFY_NAMESPACE} --ignore-not-found=true' } - # configure the default template/constraint - run kubectl apply -f ./library/default/template.yaml + # configure the default template/constraint + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success # validate key management provider status property shows success @@ -422,7 +431,7 @@ RATIFY_NAMESPACE=gatekeeper-system sleep 10 - # apply an invalid cert in an inline certificate store + # apply an invalid cert in an inline certificate store run kubectl apply -f ./test/bats/tests/config/config_v1beta1_certstore_inline_invalid.yaml -n ${RATIFY_NAMESPACE} assert_success # validate key management provider status property shows success @@ -442,10 +451,10 @@ RATIFY_NAMESPACE=gatekeeper-system wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl replace -f ./config/samples/clustered/store/config_v1beta1_store_oras_http.yaml' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 # apply store CRD with K8s secret auth provier enabled @@ -469,9 +478,9 @@ RATIFY_NAMESPACE=gatekeeper-system } # configure the default template/constraint - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success # add the root certificate as an inline key management provider @@ -520,10 +529,10 @@ RATIFY_NAMESPACE=gatekeeper-system sleep 100 # verify that the verification succeeds - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl run demo --namespace default --image=registry:5000/notation:signed diff --git a/test/bats/high-availability.bats b/test/bats/high-availability.bats index cf6d246ce..a175019aa 100644 --- a/test/bats/high-availability.bats +++ b/test/bats/high-availability.bats @@ -25,10 +25,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo --namespace default --force --ignore-not-found=true' wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod demo2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 # validate key management provider status property shows success diff --git a/test/bats/plugin-test.bats b/test/bats/plugin-test.bats index 233da79fa..bebdf3c10 100644 --- a/test/bats/plugin-test.bats +++ b/test/bats/plugin-test.bats @@ -67,10 +67,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete verifiers.config.ratify.deislabs.io/verifier-license-checker --namespace default --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_partial_licensechecker.yaml @@ -92,10 +92,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod sbom2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -129,10 +129,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod schemavalidator2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -157,10 +157,10 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod vulnerabilityreport2 --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5 @@ -185,10 +185,42 @@ SLEEP_TIME=1 wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod all-in-one --namespace default --force --ignore-not-found=true' } - run kubectl apply -f ./library/default/template.yaml + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml assert_success sleep 5 - run kubectl apply -f ./library/default/samples/constraint.yaml + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml + assert_success + sleep 5 + + run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_cosign.yaml + sleep 5 + run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_sbom.yaml + sleep 5 + run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_complete_licensechecker.yaml + run kubectl apply -f ./config/samples/clustered/verifier/config_v1beta1_verifier_schemavalidator.yaml + sleep 5 + + # wait for the httpserver cache to be invalidated + sleep 15 + run kubectl run all-in-one --namespace default --image=registry:5000/all:v0 + assert_success +} + +@test "namespaced sbom/notary/cosign/licensechecker/schemavalidator verifiers test" { + skip "not ready" + teardown() { + echo "cleaning up" + wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete verifiers.config.ratify.deislabs.io/verifier-license-checker --namespace default --ignore-not-found=true' + wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete verifiers.config.ratify.deislabs.io/verifier-sbom --namespace default --ignore-not-found=true' + # Skipping test for now until expected usage/configuration of this plugin can be verified + # wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete verifiers.config.ratify.deislabs.io/verifier-schemavalidator --namespace default --ignore-not-found=true' + wait_for_process ${WAIT_TIME} ${SLEEP_TIME} 'kubectl delete pod all-in-one --namespace default --force --ignore-not-found=true' + } + + run kubectl apply -f ./library/multi-tenancy-validation/template.yaml + assert_success + sleep 5 + run kubectl apply -f ./library/multi-tenancy-validation/samples/constraint.yaml assert_success sleep 5