diff --git a/src/confcom/HISTORY.rst b/src/confcom/HISTORY.rst index 9e59bb524ab..b2a5d15494d 100644 --- a/src/confcom/HISTORY.rst +++ b/src/confcom/HISTORY.rst @@ -2,6 +2,17 @@ Release History =============== + +1.2.0 +++++++ +* fixing metadata for uploaded fragments +* fixing support for non-image feed names and attaching fragments to an image +* bug fixes for image-attached fragments +* adding ability to generate a fragment import from an image name using the remote attached fragments +* updating stdout import statement to look more like the file output +* adding `--omit-id` to the `acifragmentgen` command +* updating genpolicy to version 3.2.0.azl3.genpolicy2 + 1.1.1 ++++++ * updating dmverity-vhd version with bugfix for empty image layers diff --git a/src/confcom/azext_confcom/README.md b/src/confcom/azext_confcom/README.md index 293efbe8e5f..5b76f41ca1f 100644 --- a/src/confcom/azext_confcom/README.md +++ b/src/confcom/azext_confcom/README.md @@ -670,7 +670,7 @@ For information on what a policy fragment is, see [policy fragments](#policy-fra Example 1: The following command creates a security fragment and prints it to stdout as well as saving it to a file `contoso.rego`: ```bash -az confcom acifragmentgen --config ./fragment_config.json --svn 1 --namespace contoso +az confcom acifragmentgen --input ./fragment_config.json --svn 1 --namespace contoso ``` The config file is a JSON file that contains the following information: @@ -708,7 +708,7 @@ The `--svn` argument is used to specify the security version number of the fragm Example 2: This command creates a signed security fragment and attaches it to a container image in an ORAS-compliant registry: ```bash -az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --config ./samples/config.json --upload-fragment +az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --input ./samples/config.json --upload-fragment ``` Example 3: This command creates a file to be used by `acipolicygen` that says which fragments should be included in the policy. Note that the policy must be [COSE](https://www.iana.org/assignments/cose/cose.xhtml) signed: @@ -721,19 +721,30 @@ This outputs a file `fragments.json` that contains the following information: ```json { - "path": "./contoso.rego.cose", - "feed": "contoso.azurecr.io/example", - "includes": [ - "containers", - "fragments" - ], - "issuer": "did:x509:0:sha256:mLzv0uyBNQvC6hi4y9qy8hr6NSZuYFv6gfCwAEWBNqc::subject:CN:Contoso", - "minimum_svn": "1" + "fragments": [ + { + "feed": "contoso.azurecr.io/example", + "includes": [ + "containers", + "fragments" + ], + "issuer": "did:x509:0:sha256:mLzv0uyBNQvC6hi4y9qy8hr6NSZuYFv6gfCwAEWBNqc::subject:CN:Contoso", + "minimum_svn": "1" + } + ] } ``` This file is then used by `acipolicygen` to generate a policy that includes custom fragments. +Example 4: The command creates a signed policy fragment and attaches it to a specified image in an ORAS-compliant registry: + +```bash +az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --input ./samples/.json --upload-fragment --image-target contoso.azurecr.io/:latest --feed contoso.azurecr.io/ +``` + +This could be useful in scenarios where an image-attached fragment is required but the fragment's feed is different from the image's location. + ## Microsoft Azure CLI 'confcom katapolicygen' Extension Examples Run `az confcom katapolicygen --help` to see a list of supported arguments along with explanations. The following commands demonstrate the usage of different arguments to generate confidential computing security policies. diff --git a/src/confcom/azext_confcom/_help.py b/src/confcom/azext_confcom/_help.py index 23fd72b7383..d884aa34e6c 100644 --- a/src/confcom/azext_confcom/_help.py +++ b/src/confcom/azext_confcom/_help.py @@ -91,7 +91,7 @@ - name: --omit-id type: boolean - short-summary: 'When enabled, the generated policy will not contain the ID field. This will keep the policy from being tied to a specific image name and tag' + short-summary: 'When enabled, the generated policy will not contain the ID field. This will keep the policy from being tied to a specific image name and tag. This is helpful if the image being used will be present in multiple registries and used interchangeably' - name: --include-fragments -f type: boolean @@ -167,6 +167,10 @@ type: string short-summary: 'Path to an existing policy fragment file to be used with --generate-import. This option allows you to create import statements for the specified fragment without needing to pull it from an OCI registry' + - name: --omit-id + type: boolean + short-summary: 'When enabled, the generated policy will not contain the ID field. This will keep the policy from being tied to a specific image name and tag. This is helpful if the image being used will be present in multiple registries and used interchangeably' + - name: --generate-import -g type: boolean short-summary: 'Generate an import statement for a policy fragment' @@ -201,9 +205,15 @@ - name: Input a config file to generate a fragment with a custom namespace and debug mode enabled text: az confcom acifragmentgen --input "./config.json" --namespace "my-namespace" --debug-mode - name: Generate an import statement for a signed local fragment - text: az confcom acifragmentgen --fragment-path "./fragment.json" --generate-import --minimum-svn 1 + text: az confcom acifragmentgen --fragment-path "./fragment.rego.cose" --generate-import --minimum-svn 1 - name: Generate a fragment and COSE sign it with a key and chain - text: az confcom acifragmentgen --image mcr.microsoft.com/azuredocs/aci-helloworld --key "./key.pem" --chain "./chain.pem" --svn 1 --namespace contoso --no-print + text: az confcom acifragmentgen --input "./config.json" --key "./key.pem" --chain "./chain.pem" --svn 1 --namespace contoso --no-print + - name: Generate a fragment import from an image name + text: az confcom acifragmentgen --image --generate-import --minimum-svn 1 + - name: Attach a fragment to a specified image + text: az confcom acifragmentgen --input "./config.json" --key "./key.pem" --chain "./chain.pem" --svn 1 --namespace contoso --upload-fragment --image-target + + """ helps[ diff --git a/src/confcom/azext_confcom/_params.py b/src/confcom/azext_confcom/_params.py index c00a6503e15..f990b71017c 100644 --- a/src/confcom/azext_confcom/_params.py +++ b/src/confcom/azext_confcom/_params.py @@ -22,6 +22,8 @@ validate_fragment_path, validate_fragment_json, validate_fragment_json_policy, + validate_image_target, + validate_upload_fragment, ) @@ -230,6 +232,13 @@ def load_arguments(self, _): required=False, help="Feed for the generated policy fragment", ) + c.argument( + "image_target", + options_list=("--image-target"), + required=False, + help="Image target where the generated policy fragment is attached", + validator=validate_image_target, + ) c.argument( "key", options_list=("--key", "-k"), @@ -258,6 +267,12 @@ def load_arguments(self, _): help="Path to a policy fragment to be used with --generate-import to make import statements without having access to the fragment's OCI registry", validator=validate_fragment_path, ) + c.argument( + "omit_id", + options_list=("--omit-id"), + required=False, + help="Omit the id field in the policy. This is helpful if the image being used will be present in multiple registries and used interchangeably.", + ) c.argument( "generate_import", options_list=("--generate-import", "-g"), @@ -301,6 +316,7 @@ def load_arguments(self, _): options_list=("--upload-fragment", "-u"), required=False, help="Upload a policy fragment to a container registry", + validator=validate_upload_fragment, ) c.argument( "no_print", diff --git a/src/confcom/azext_confcom/_validators.py b/src/confcom/azext_confcom/_validators.py index 8077f8e1bf9..8941e8080c8 100644 --- a/src/confcom/azext_confcom/_validators.py +++ b/src/confcom/azext_confcom/_validators.py @@ -66,10 +66,19 @@ def validate_fragment_source(namespace): raise CLIError("Must provide either an image name or an input file to generate a fragment") +def validate_image_target(namespace): + if namespace.image_target and not namespace.upload_fragment: + raise CLIError("Must specify --upload-fragment to use --image-target") + + +def validate_upload_fragment(namespace): + if namespace.upload_fragment and not (namespace.key or namespace.chain): + raise CLIError("Must sign the fragment with --key and --chain to upload it") + + def validate_fragment_generate_import(namespace): if namespace.generate_import and sum(map(bool, [ namespace.fragment_path, - namespace.input_path, namespace.image_name ])) != 1: raise CLIError( @@ -78,6 +87,11 @@ def validate_fragment_generate_import(namespace): "an image name to generate an import statement" ) ) + if namespace.generate_import and namespace.output_filename: + raise CLIError( + "Cannot specify an output file (--output-filename) when generating an import statement." + + "Use --fragments-json (-j) to write to a file." + ) def validate_fragment_namespace_and_svn(namespace): diff --git a/src/confcom/azext_confcom/config.py b/src/confcom/azext_confcom/config.py index ac1b65bc9fc..32fb6eff5cb 100644 --- a/src/confcom/azext_confcom/config.py +++ b/src/confcom/azext_confcom/config.py @@ -85,6 +85,7 @@ ACI_FIELD_YAML_READINESS_PROBE = "readinessProbe" ACI_FIELD_YAML_STARTUP_PROBE = "startupProbe" VIRTUAL_NODE_YAML_METADATA = "metadata" +VIRTUAL_NODE_YAML_COMMAND = "command" VIRTUAL_NODE_YAML_NAME = "name" VIRTUAL_NODE_YAML_ANNOTATIONS = "annotations" VIRTUAL_NODE_YAML_LABELS = "labels" @@ -103,6 +104,7 @@ VIRTUAL_NODE_YAML_RESOURCES_HUGEPAGES = "hugepages" VIRTUAL_NODE_YAML_RESOURCES_EPHEMERAL_STORAGE = "ephemeral-storage" VIRTUAL_NODE_YAML_SPECIAL_ENV_VAR_REGEX = "^===VIRTUALNODE2.CC.THIM.(.+)===$" +VIRTUAL_NODE_YAML_READ_ONLY_MANY = "ReadOnlyMany" # output json values POLICY_FIELD_CONTAINERS = "containers" @@ -198,13 +200,18 @@ # reserved fragment names for existing pieces of Rego RESERVED_FRAGMENT_NAMES = _config["reserved_fragment_namespaces"] # fragment artifact type -ARTIFACT_TYPE = "application/x-ms-policy-frag" +ARTIFACT_TYPE = "application/x-ms-ccepolicy-frag" # customer rego file for data to be injected REGO_FILE = "./data/customer_rego_policy.txt" REGO_FRAGMENT_FILE = "./data/customer_rego_fragment.txt" script_directory = os.path.dirname(os.path.realpath(__file__)) REGO_FILE_PATH = f"{script_directory}/{REGO_FILE}" REGO_FRAGMENT_FILE_PATH = f"{script_directory}/{REGO_FRAGMENT_FILE}" +REGO_IMPORT_FILE_STRUCTURE = """ +{ + "fragments": [] +} +""" CUSTOMER_REGO_POLICY = load_str_from_file(REGO_FILE_PATH) CUSTOMER_REGO_FRAGMENT = load_str_from_file(REGO_FRAGMENT_FILE_PATH) # sidecar rego file diff --git a/src/confcom/azext_confcom/container.py b/src/confcom/azext_confcom/container.py index 31534381a16..6e2172ca42b 100644 --- a/src/confcom/azext_confcom/container.py +++ b/src/confcom/azext_confcom/container.py @@ -608,6 +608,9 @@ def get_id(self) -> str: def get_name(self) -> str: return self.containerName + def get_container_image(self) -> str: + return self.containerImage + def get_working_dir(self) -> str: return self._workingDir diff --git a/src/confcom/azext_confcom/cose_proxy.py b/src/confcom/azext_confcom/cose_proxy.py index af148b023d5..a02d4e8ba95 100644 --- a/src/confcom/azext_confcom/cose_proxy.py +++ b/src/confcom/azext_confcom/cose_proxy.py @@ -115,6 +115,10 @@ def cose_sign( payload_path, "-key", key_path, + "-salt", + "zero", + "-content-type", + "application/unknown+rego", "-out", out_path, ] @@ -183,3 +187,17 @@ def extract_payload_from_path(self, fragment_path: str) -> str: stdout = item.stdout.decode("utf-8") return stdout.split("payload:")[1] + + def extract_feed_from_path(self, fragment_path: str) -> str: + policy_bin_str = str(self.policy_bin) + if not os.path.exists(fragment_path): + eprint(f"The fragment file at {fragment_path} does not exist") + + arg_list_chain = [policy_bin_str, "check", "--in", fragment_path, "--verbose"] + + item = call_cose_sign_tool(arg_list_chain, "Error getting information from signed fragment file") + + stdout = item.stdout.decode("utf-8") + + # we want the text between the name and the next newline + return stdout.split("feed: ")[1].split("\n")[0] diff --git a/src/confcom/azext_confcom/custom.py b/src/confcom/azext_confcom/custom.py index 7bc3f2cc34d..89d727f4f21 100644 --- a/src/confcom/azext_confcom/custom.py +++ b/src/confcom/azext_confcom/custom.py @@ -11,7 +11,7 @@ from azext_confcom.config import ( DEFAULT_REGO_FRAGMENTS, POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS, - + REGO_IMPORT_FILE_STRUCTURE, ) from azext_confcom import os_util @@ -22,7 +22,8 @@ inject_policy_into_template, inject_policy_into_yaml, print_existing_policy_from_arm_template, - print_existing_policy_from_yaml + print_existing_policy_from_yaml, + get_image_name, ) from azext_confcom.fragment_util import get_all_fragment_contents from azext_confcom.init_checks import run_initial_docker_checks @@ -93,7 +94,6 @@ def acipolicygen_confcom( check_infrastructure_svn(infrastructure_svn) fragments_list = [] - fragment_policy_list = [] # gather information about the fragments being used in the new policy if include_fragments: fragments_list = os_util.load_json_from_file(fragments_json or input_path) @@ -102,7 +102,6 @@ def acipolicygen_confcom( # convert to list if it's just a dict if not isinstance(fragments_list, list): fragments_list = [fragments_list] - fragment_policy_list = get_all_fragment_contents(fragments_list) # telling the user what operation we're doing logger.warning( @@ -132,7 +131,6 @@ def acipolicygen_confcom( approve_wildcards=approve_wildcards, diff_mode=diff, rego_imports=fragments_list, - fragment_contents=fragment_policy_list, exclude_default_fragments=exclude_default_fragments, ) elif image_name: @@ -156,6 +154,21 @@ def acipolicygen_confcom( if not isinstance(container_group_policies, list): container_group_policies = [container_group_policies] + # get all of the fragments that are being used in the policy + # and associate them with each container group + + if include_fragments: + fragment_policy_list = [] + container_names = [] + fragment_imports = [] + for policy in container_group_policies: + fragment_imports.extend(policy.get_fragments()) + for container in policy.get_images(): + container_names.append(container.get_container_image()) + fragment_policy_list = get_all_fragment_contents(container_names, fragment_imports) + for policy in container_group_policies: + policy.set_fragment_contents(fragment_policy_list) + for count, policy in enumerate(container_group_policies): policy.populate_policy_content_for_all_images( individual_image=bool(image_name), tar_mapping=tar_mapping, faster_hashing=faster_hashing @@ -209,12 +222,14 @@ def acifragmentgen_confcom( key: str, chain: str, minimum_svn: int, + image_target: str = "", algo: str = "ES384", fragment_path: str = None, + omit_id: bool = False, generate_import: bool = False, disable_stdio: bool = False, debug_mode: bool = False, - output_filename: str = None, + output_filename: str = "", outraw: bool = False, upload_fragment: bool = False, no_print: bool = False, @@ -224,25 +239,40 @@ def acifragmentgen_confcom( if generate_import: cose_client = CoseSignToolProxy() - import_statement = cose_client.generate_import_from_path(fragment_path, minimum_svn=minimum_svn) + import_statements = [] + # images can have multiple fragments attached to them so we need an array to hold the import statements + if fragment_path: + import_statements = [cose_client.generate_import_from_path(fragment_path, minimum_svn=minimum_svn)] + elif image_name: + import_statements = oras_proxy.generate_imports_from_image_name(image_name, minimum_svn=minimum_svn) + + fragments_file_contents = {} + fragments_list = [] if fragments_json: + logger.info("Creating/appending import statement JSON file") if os.path.isfile(fragments_json): - logger.info("Appending import statement to JSON file") fragments_file_contents = os_util.load_json_from_file(fragments_json) + if isinstance(fragments_file_contents, list): + logger.error( + "%s %s %s %s", + "Unsupported JSON file format. ", + "Please make sure the outermost structure is not an array. ", + "An empty import file should look like: ", + REGO_IMPORT_FILE_STRUCTURE + ) + sys.exit(1) fragments_list = fragments_file_contents.get(POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS, []) - else: - logger.info("Creating import statement JSON file") - fragments_file_contents = {} - fragments_list = [] - # convert to list if it's just a dict - if not isinstance(fragments_list, list): - fragments_list = [fragments_list] - fragments_list.append(import_statement) - - fragments_file_contents[POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS] = fragments_list + + # convert to list if it's just a dict + if isinstance(fragments_list, dict): + fragments_list = [fragments_list] + fragments_list += import_statements + + fragments_file_contents[POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS] = fragments_list + if fragments_json: os_util.write_str_to_file(fragments_json, pretty_print_func(fragments_file_contents)) else: - print(pretty_print_func(import_statement)) + print(pretty_print_func(fragments_file_contents)) return tar_mapping = tar_mapping_validation(tar_mapping_location, using_config_file=bool(input_path)) @@ -258,22 +288,38 @@ def acifragmentgen_confcom( policy = security_policy.load_policy_from_config_file( input_path, debug_mode=debug_mode, disable_stdio=disable_stdio ) + # get all of the fragments that are being used in the policy + # and associate them with each container group + fragment_policy_list = [] + container_names = [] + fragment_imports = policy.get_fragments() + for container in policy.get_images(): + container_names.append(container.get_container_image()) + fragment_policy_list = get_all_fragment_contents(container_names, fragment_imports) + policy.set_fragment_contents(fragment_policy_list) policy.populate_policy_content_for_all_images( individual_image=bool(image_name), tar_mapping=tar_mapping ) # if no feed is provided, use the first image's feed # to assume it's an image-attached fragment + if not image_target: + policy_images = policy.get_images() + if not policy_images: + logger.error("No images found in the policy or all images are covered by fragments") + sys.exit(1) + image_target = policy_images[0].containerImage if not feed: - feed = policy.get_images()[0].containerImage + # strip the tag or hash off the image name so there are stable feed names + feed = get_image_name(image_target) - fragment_text = policy.generate_fragment(namespace, svn, output_type) + fragment_text = policy.generate_fragment(namespace, svn, output_type, omit_id=omit_id) if output_type != security_policy.OutputType.DEFAULT and not no_print: print(fragment_text) # take ".rego" off the end of the filename if it's there, it'll get added back later - output_filename.replace(".rego", "") + output_filename = output_filename.replace(".rego", "") filename = f"{output_filename or namespace}.rego" os_util.write_str_to_file(filename, fragment_text) @@ -284,7 +330,7 @@ def acifragmentgen_confcom( cose_proxy.cose_sign(filename, key, chain, feed, iss, algo, out_path) if upload_fragment: - oras_proxy.attach_fragment_to_image(feed, out_path) + oras_proxy.attach_fragment_to_image(image_target, out_path) def katapolicygen_confcom( diff --git a/src/confcom/azext_confcom/data/internal_config.json b/src/confcom/azext_confcom/data/internal_config.json index dc7250c299b..1d70aa60b09 100644 --- a/src/confcom/azext_confcom/data/internal_config.json +++ b/src/confcom/azext_confcom/data/internal_config.json @@ -1,5 +1,5 @@ { - "version": "1.1.1", + "version": "1.2.0", "hcsshim_config": { "maxVersion": "1.0.0", "minVersion": "0.0.1" diff --git a/src/confcom/azext_confcom/data/rules.rego b/src/confcom/azext_confcom/data/rules.rego index 481b70b0d1f..a5208cf9d3b 100644 --- a/src/confcom/azext_confcom/data/rules.rego +++ b/src/confcom/azext_confcom/data/rules.rego @@ -1,3 +1,7 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# package agent_policy import future.keywords.in @@ -6,23 +10,36 @@ import future.keywords.every import input # Default values, returned by OPA when rules cannot be evaluated to true. +default AddARPNeighborsRequest := false +default AddSwapRequest := false +default CloseStdinRequest := false default CopyFileRequest := false default CreateContainerRequest := false -default CreateSandboxRequest := true +default CreateSandboxRequest := false default DestroySandboxRequest := true default ExecProcessRequest := false default GetOOMEventRequest := true default GuestDetailsRequest := true +default ListInterfacesRequest := false +default ListRoutesRequest := false +default MemHotplugByProbeRequest := false default OnlineCPUMemRequest := true -default PullImageRequest := true +default PauseContainerRequest := false default ReadStreamRequest := false default RemoveContainerRequest := true default RemoveStaleVirtiofsShareMountsRequest := true +default ReseedRandomDevRequest := false +default ResumeContainerRequest := false +default SetGuestDateTimeRequest := false +default SetPolicyRequest := false default SignalProcessRequest := true default StartContainerRequest := true +default StartTracingRequest := false default StatsContainerRequest := true +default StopTracingRequest := false default TtyWinResizeRequest := true -default UpdateEphemeralMountsRequest := true +default UpdateContainerRequest := false +default UpdateEphemeralMountsRequest := false default UpdateInterfaceRequest := true default UpdateRoutesRequest := true default WaitProcessRequest := true @@ -34,15 +51,28 @@ default WriteStreamRequest := false # them and inspect OPA logs for the root cause of a failure. default AllowRequestsFailingPolicy := false +# Constants +S_NAME_KEY = "io.kubernetes.cri.sandbox-name" +S_NAMESPACE_KEY = "io.kubernetes.cri.sandbox-namespace" + CreateContainerRequest { + # Check if the input request should be rejected even before checking the + # policy_data.containers information. + allow_create_container_input + i_oci := input.OCI i_storages := input.storages + # Check if any element from the policy_data.containers array allows the input request. some p_container in policy_data.containers print("======== CreateContainerRequest: trying next policy container") + p_pidns := p_container.sandbox_pidns + i_pidns := input.sandbox_pidns + print("CreateContainerRequest: p_pidns =", p_pidns, "i_pidns =", i_pidns) + p_pidns == i_pidns + p_oci := p_container.OCI - p_storages := p_container.storages print("CreateContainerRequest: p Version =", p_oci.Version, "i Version =", i_oci.Version) p_oci.Version == i_oci.Version @@ -51,12 +81,46 @@ CreateContainerRequest { p_oci.Root.Readonly == i_oci.Root.Readonly allow_anno(p_oci, i_oci) + + p_storages := p_container.storages allow_by_anno(p_oci, i_oci, p_storages, i_storages) + allow_linux(p_oci, i_oci) print("CreateContainerRequest: true") } +allow_create_container_input { + print("allow_create_container_input: input =", input) + + count(input.shared_mounts) == 0 + is_null(input.string_user) + + i_oci := input.OCI + is_null(i_oci.Hooks) + is_null(i_oci.Solaris) + is_null(i_oci.Windows) + + i_linux := i_oci.Linux + count(i_linux.GIDMappings) == 0 + count(i_linux.MountLabel) == 0 + count(i_linux.Resources.Devices) == 0 + count(i_linux.RootfsPropagation) == 0 + count(i_linux.UIDMappings) == 0 + is_null(i_linux.IntelRdt) + is_null(i_linux.Resources.BlockIO) + is_null(i_linux.Resources.Network) + is_null(i_linux.Resources.Pids) + is_null(i_linux.Seccomp) + i_linux.Sysctl == {} + + i_process := i_oci.Process + count(i_process.SelinuxLabel) == 0 + count(i_process.User.Username) == 0 + + print("allow_create_container_input: true") +} + # Reject unexpected annotations. allow_anno(p_oci, i_oci) { print("allow_anno 1: start") @@ -95,16 +159,14 @@ allow_anno_key(i_key, p_oci) { print("allow_anno_key 2: true") } -# Get the value of the "io.kubernetes.cri.sandbox-name" annotation and +# Get the value of the S_NAME_KEY annotation and # correlate it with other annotations and process fields. allow_by_anno(p_oci, i_oci, p_storages, i_storages) { print("allow_by_anno 1: start") - s_name := "io.kubernetes.cri.sandbox-name" + not p_oci.Annotations[S_NAME_KEY] - not p_oci.Annotations[s_name] - - i_s_name := i_oci.Annotations[s_name] + i_s_name := i_oci.Annotations[S_NAME_KEY] print("allow_by_anno 1: i_s_name =", i_s_name) allow_by_sandbox_name(p_oci, i_oci, p_storages, i_storages, i_s_name) @@ -114,10 +176,8 @@ allow_by_anno(p_oci, i_oci, p_storages, i_storages) { allow_by_anno(p_oci, i_oci, p_storages, i_storages) { print("allow_by_anno 2: start") - s_name := "io.kubernetes.cri.sandbox-name" - - p_s_name := p_oci.Annotations[s_name] - i_s_name := i_oci.Annotations[s_name] + p_s_name := p_oci.Annotations[S_NAME_KEY] + i_s_name := i_oci.Annotations[S_NAME_KEY] print("allow_by_anno 2: i_s_name =", i_s_name, "p_s_name =", p_s_name) allow_sandbox_name(p_s_name, i_s_name) @@ -129,16 +189,14 @@ allow_by_anno(p_oci, i_oci, p_storages, i_storages) { allow_by_sandbox_name(p_oci, i_oci, p_storages, i_storages, s_name) { print("allow_by_sandbox_name: start") - s_namespace := "io.kubernetes.cri.sandbox-namespace" - - p_namespace := p_oci.Annotations[s_namespace] - i_namespace := i_oci.Annotations[s_namespace] + p_namespace := p_oci.Annotations[S_NAMESPACE_KEY] + i_namespace := i_oci.Annotations[S_NAMESPACE_KEY] print("allow_by_sandbox_name: p_namespace =", p_namespace, "i_namespace =", i_namespace) p_namespace == i_namespace allow_by_container_types(p_oci, i_oci, s_name, p_namespace) allow_by_bundle_or_sandbox_id(p_oci, i_oci, p_storages, i_storages) - allow_process(p_oci, i_oci, s_name) + allow_process(p_oci.Process, i_oci.Process, s_name) print("allow_by_sandbox_name: true") } @@ -429,36 +487,62 @@ allow_by_bundle_or_sandbox_id(p_oci, i_oci, p_storages, i_storages) { print("allow_by_bundle_or_sandbox_id: true") } -allow_process(p_oci, i_oci, s_name) { - p_process := p_oci.Process - i_process := i_oci.Process +allow_process_common(p_process, i_process, s_name) { + print("allow_process_common: p_process =", p_process) + print("allow_process_common: i_process = ", i_process) + print("allow_process_common: s_name =", s_name) - print("allow_process: i terminal =", i_process.Terminal, "p terminal =", p_process.Terminal) - p_process.Terminal == i_process.Terminal - - print("allow_process: i cwd =", i_process.Cwd, "i cwd =", p_process.Cwd) p_process.Cwd == i_process.Cwd - - print("allow_process: i noNewPrivileges =", i_process.NoNewPrivileges, "p noNewPrivileges =", p_process.NoNewPrivileges) p_process.NoNewPrivileges == i_process.NoNewPrivileges - allow_caps(p_process.Capabilities, i_process.Capabilities) allow_user(p_process, i_process) - allow_args(p_process, i_process, s_name) allow_env(p_process, i_process, s_name) + print("allow_process_common: true") +} + +# Compare the OCI Process field of a policy container with the input OCI Process from a CreateContainerRequest +allow_process(p_process, i_process, s_name) { + print("allow_process: start") + + allow_args(p_process, i_process, s_name) + allow_process_common(p_process, i_process, s_name) + allow_caps(p_process.Capabilities, i_process.Capabilities) + p_process.Terminal == i_process.Terminal + print("allow_process: true") } +# Compare the OCI Process field of a policy container with the input process field from ExecProcessRequest +allow_interactive_process(p_process, i_process, s_name) { + print("allow_interactive_process: start") + + allow_process_common(p_process, i_process, s_name) + allow_exec_caps(i_process.Capabilities) + + # These are commands enabled using ExecProcessRequest commands and/or regex from the settings file. + # They can be executed interactively so allow them to use any value for i_process.Terminal. + + print("allow_interactive_process: true") +} + +# Compare the OCI Process field of a policy container with the input process field from ExecProcessRequest +allow_probe_process(p_process, i_process, s_name) { + print("allow_probe_process: start") + + allow_process_common(p_process, i_process, s_name) + allow_exec_caps(i_process.Capabilities) + p_process.Terminal == i_process.Terminal + + print("allow_probe_process: true") +} + allow_user(p_process, i_process) { p_user := p_process.User i_user := i_process.User - # TODO: track down the reason for mcr.microsoft.com/oss/bitnami/redis:6.0.8 being - # executed with uid = 0 despite having "User": "1001" in its container image - # config. - #print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID) - #p_user.UID == i_user.UID + print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID) + p_user.UID == i_user.UID # TODO: track down the reason for registry.k8s.io/pause:3.9 being # executed with gid = 0 despite having "65535:65535" in its container image @@ -526,6 +610,7 @@ allow_env(p_process, i_process, s_name) { print("allow_env: i env =", i_process.Env) every i_var in i_process.Env { + print("allow_env: i_var =", i_var) allow_var(p_process, i_process, i_var, s_name) } @@ -534,22 +619,17 @@ allow_env(p_process, i_process, s_name) { # Allow input env variables that are present in the policy data too. allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 1: i_var =", i_var) - some p_var in p_process.Env p_var == i_var - print("allow_var 1: true") } # Match input with one of the policy variables, after substituting $(sandbox-name). allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 2: i_var =", i_var) - some p_var in p_process.Env p_var2 := replace(p_var, "$(sandbox-name)", s_name) - print("allow_var 2: p_var2 =", p_var2) + print("allow_var 2: p_var2 =", p_var2) p_var2 == i_var print("allow_var 2: true") @@ -557,24 +637,13 @@ allow_var(p_process, i_process, i_var, s_name) { # Allow input env variables that match with a request_defaults regex. allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 3: start") - some p_regex1 in policy_data.request_defaults.CreateContainerRequest.allow_env_regex - print("allow_var 3: p_regex1 =", p_regex1) - p_regex2 := replace(p_regex1, "$(ipv4_a)", policy_data.common.ipv4_a) - print("allow_var 3: p_regex2 =", p_regex2) - p_regex3 := replace(p_regex2, "$(ip_p)", policy_data.common.ip_p) - print("allow_var 3: p_regex3 =", p_regex3) - p_regex4 := replace(p_regex3, "$(svc_name)", policy_data.common.svc_name) - print("allow_var 3: p_regex4 =", p_regex4) - p_regex5 := replace(p_regex4, "$(dns_label)", policy_data.common.dns_label) - print("allow_var 3: p_regex5 =", p_regex5) - print("allow_var 3: i_var =", i_var) + print("allow_var 3: p_regex5 =", p_regex5) regex.match(p_regex5, i_var) print("allow_var 3: true") @@ -582,8 +651,6 @@ allow_var(p_process, i_process, i_var, s_name) { # Allow fieldRef "fieldPath: status.podIP" values. allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 4: i_var =", i_var) - name_value := split(i_var, "=") count(name_value) == 2 is_ip(name_value[1]) @@ -596,8 +663,6 @@ allow_var(p_process, i_process, i_var, s_name) { # Allow common fieldRef variables. allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 5: i_var =", i_var) - name_value := split(i_var, "=") count(name_value) == 2 @@ -617,8 +682,6 @@ allow_var(p_process, i_process, i_var, s_name) { # Allow fieldRef "fieldPath: status.hostIP" values. allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 6: i_var =", i_var) - name_value := split(i_var, "=") count(name_value) == 2 is_ip(name_value[1]) @@ -631,8 +694,6 @@ allow_var(p_process, i_process, i_var, s_name) { # Allow resourceFieldRef values (e.g., "limits.cpu"). allow_var(p_process, i_process, i_var, s_name) { - print("allow_var 7: i_var =", i_var) - name_value := split(i_var, "=") count(name_value) == 2 @@ -696,8 +757,9 @@ is_ip_other_byte(component) { # OCI root.Path allow_root_path(p_oci, i_oci, bundle_id) { + i_path := i_oci.Root.Path p_path1 := p_oci.Root.Path - print("allow_root_path: p_path1 =", p_path1) + print("allow_root_path: i_path =", i_path, "p_path1 =", p_path1) p_path2 := replace(p_path1, "$(cpath)", policy_data.common.cpath) print("allow_root_path: p_path2 =", p_path2) @@ -705,16 +767,17 @@ allow_root_path(p_oci, i_oci, bundle_id) { p_path3 := replace(p_path2, "$(bundle-id)", bundle_id) print("allow_root_path: p_path3 =", p_path3) - p_path3 == i_oci.Root.Path + p_path3 == i_path print("allow_root_path: true") } # device mounts allow_mount(p_oci, i_mount, bundle_id, sandbox_id) { - print("allow_mount: start") + print("allow_mount: i_mount =", i_mount) some p_mount in p_oci.Mounts + print("allow_mount: p_mount =", p_mount) check_mount(p_mount, i_mount, bundle_id, sandbox_id) # TODO: are there any other required policy checks for mounts - e.g., @@ -724,22 +787,12 @@ allow_mount(p_oci, i_mount, bundle_id, sandbox_id) { } check_mount(p_mount, i_mount, bundle_id, sandbox_id) { - print("check_mount 1: p_mount =", p_mount) - print("check_mount 1: i_mount =", i_mount) - p_mount == i_mount - print("check_mount 1: true") } check_mount(p_mount, i_mount, bundle_id, sandbox_id) { - print("check_mount 2: i destination =", i_mount.destination, "p destination =", p_mount.destination) p_mount.destination == i_mount.destination - - print("check_mount 2: i type =", i_mount.type_, "p type =", p_mount.type_) p_mount.type_ == i_mount.type_ - - print("check_mount 2: i options =", i_mount.options) - print("check_mount 2: p options =", p_mount.options) p_mount.options == i_mount.options mount_source_allows(p_mount, i_mount, bundle_id, sandbox_id) @@ -748,46 +801,51 @@ check_mount(p_mount, i_mount, bundle_id, sandbox_id) { } mount_source_allows(p_mount, i_mount, bundle_id, sandbox_id) { - print("mount_source_allows 1: i_mount.source =", i_mount.source) - regex1 := p_mount.source - print("mount_source_allows 1: regex1 =", regex1) - regex2 := replace(regex1, "$(sfprefix)", policy_data.common.sfprefix) - print("mount_source_allows 1: regex2 =", regex2) - regex3 := replace(regex2, "$(cpath)", policy_data.common.cpath) - print("mount_source_allows 1: regex3 =", regex3) - regex4 := replace(regex3, "$(bundle-id)", bundle_id) - print("mount_source_allows 1: regex4 =", regex4) + print("mount_source_allows 1: regex4 =", regex4) regex.match(regex4, i_mount.source) print("mount_source_allows 1: true") } mount_source_allows(p_mount, i_mount, bundle_id, sandbox_id) { - print("mount_source_allows 2: i_mount.source=", i_mount.source) - regex1 := p_mount.source - print("mount_source_allows 2: regex1 =", regex1) - regex2 := replace(regex1, "$(sfprefix)", policy_data.common.sfprefix) - print("mount_source_allows 2: regex2 =", regex2) - regex3 := replace(regex2, "$(cpath)", policy_data.common.cpath) - print("mount_source_allows 2: regex3 =", regex3) - regex4 := replace(regex3, "$(sandbox-id)", sandbox_id) - print("mount_source_allows 2: regex4 =", regex4) + print("mount_source_allows 2: regex4 =", regex4) regex.match(regex4, i_mount.source) print("mount_source_allows 2: true") } +mount_source_allows(p_mount, i_mount, bundle_id, sandbox_id) { + print("mount_source_allows 3: i_mount.source=", i_mount.source) + + i_source_parts = split(i_mount.source, "/") + b64_direct_vol_path = i_source_parts[count(i_source_parts) - 1] + + base64.is_valid(b64_direct_vol_path) + + source1 := p_mount.source + print("mount_source_allows 3: source1 =", source1) + + source2 := replace(source1, "$(spath)", policy_data.common.spath) + print("mount_source_allows 3: source2 =", source2) + + source3 := replace(source2, "$(b64-direct-vol-path)", b64_direct_vol_path) + print("mount_source_allows 3: source3 =", source3) + + source3 == i_mount.source + + print("mount_source_allows 3: true") +} ###################################################################### -# Storages +# Create container Storages allow_storages(p_storages, i_storages, bundle_id, sandbox_id) { p_count := count(p_storages) @@ -836,7 +894,6 @@ allow_storage(p_storages, i_storage, bundle_id, sandbox_id, layer_ids, root_hash allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) { print("allow_storage_options 1: start") - p_storage.driver != "blk" p_storage.driver != "overlayfs" p_storage.options == i_storage.options @@ -873,8 +930,8 @@ allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) { lowerdir := concat("=", ["lowerdir", p_storage.options[0]]) print("allow_storage_options 2: lowerdir =", lowerdir) - i_storage.options[i_count - 1] == lowerdir print("allow_storage_options 2: i_storage.options[i_count - 1] =", i_storage.options[i_count - 1]) + i_storage.options[i_count - 1] == lowerdir every i, policy_id in policy_ids { allow_overlay_layer(policy_id, policy_hashes[i], i_storage.options[i + 1]) @@ -907,6 +964,25 @@ allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) { print("allow_storage_options 3: true") } +allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) { + print("allow_storage_options 4: start") + + p_storage.driver == "smb" + p_opts_count := count(p_storage.options) + i_opts_count := count(i_storage.options) + i_opts_count == p_opts_count + 2 + + i_opt_matches := [i | i := idx; idx < p_opts_count; p_storage.options[idx] == i_storage.options[idx]] + count(i_opt_matches) == p_opts_count + + startswith(i_storage.options[i_opts_count-2], "addr=") + creds = split(i_storage.options[i_opts_count-1], ",") + count(creds) == 2 + startswith(creds[0], "username=") + startswith(creds[1], "password=") + + print("allow_storage_options 4: true") +} allow_overlay_layer(policy_id, policy_hash, i_option) { print("allow_overlay_layer: policy_id =", policy_id, "policy_hash =", policy_hash) @@ -927,7 +1003,6 @@ allow_overlay_layer(policy_id, policy_hash, i_option) { } allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { - print("allow_mount_point 1: i_storage.mount_point =", i_storage.mount_point) p_storage.fstype == "tar" startswith(p_storage.mount_point, "$(layer") @@ -949,7 +1024,6 @@ allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { print("allow_mount_point 1: true") } allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { - print("allow_mount_point 2: i_storage.mount_point =", i_storage.mount_point) p_storage.fstype == "fuse3.kata-overlay" mount1 := replace(p_storage.mount_point, "$(cpath)", policy_data.common.cpath) @@ -961,7 +1035,6 @@ allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { print("allow_mount_point 2: true") } allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { - print("allow_mount_point 3: i_storage.mount_point =", i_storage.mount_point) p_storage.fstype == "local" mount1 := p_storage.mount_point @@ -978,7 +1051,6 @@ allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { print("allow_mount_point 3: true") } allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { - print("allow_mount_point 4: i_storage.mount_point =", i_storage.mount_point) p_storage.fstype == "bind" mount1 := p_storage.mount_point @@ -995,7 +1067,6 @@ allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { print("allow_mount_point 4: true") } allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { - print("allow_mount_point 5: i_storage.mount_point =", i_storage.mount_point) p_storage.fstype == "tmpfs" mount1 := p_storage.mount_point @@ -1005,8 +1076,46 @@ allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { print("allow_mount_point 5: true") } +allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) { + print("allow_mount_point 6: i_storage.mount_point =", i_storage.mount_point) + allow_direct_vol_driver(p_storage, i_storage) + + mount1 := p_storage.mount_point + print("allow_mount_point 6: mount1 =", mount1) + + mount2 := replace(mount1, "$(spath)", policy_data.common.spath) + print("allow_mount_point 6: mount2 =", mount2) + + direct_vol_path := i_storage.source + mount3 := replace(mount2, "$(b64-direct-vol-path)", base64url.encode(direct_vol_path)) + print("allow_mount_point 6: mount3 =", mount3) -# process.Capabilities + mount3 == i_storage.mount_point + + print("allow_mount_point 6: true") +} + +allow_direct_vol_driver(p_storage, i_storage) { + print("allow_direct_vol_driver 1: start") + p_storage.driver == "blk" + print("allow_direct_vol_driver 1: true") +} +allow_direct_vol_driver(p_storage, i_storage) { + print("allow_direct_vol_driver 2: start") + p_storage.driver == "smb" + print("allow_direct_vol_driver 2: true") +} + +# ExecProcessRequest.process.Capabilities +allow_exec_caps(i_caps) { + not i_caps.Ambient + not i_caps.Bounding + not i_caps.Effective + not i_caps.Inheritable + not i_caps.Permitted +} + +# OCI.Process.Capabilities allow_caps(p_caps, i_caps) { print("allow_caps: policy Ambient =", p_caps.Ambient) print("allow_caps: input Ambient =", i_caps.Ambient) @@ -1060,25 +1169,120 @@ match_caps(p_caps, i_caps) { } ###################################################################### +check_directory_traversal(i_path) { + contains(i_path, "../") == false + endswith(i_path, "/..") == false +} + +check_symlink_source(i_src) { + i_src == "" + print("check_symlink_source 1: true") +} +check_symlink_source(i_src) { + i_src != "" + print("check_symlink_source 2: i_src =", i_src) + + regex.match(policy_data.common.s_source1, i_src) + + print("check_symlink_source 2: true") +} +check_symlink_source(i_src) { + i_src != "" + print("check_symlink_source 3: i_src =", i_src) + + regex.match(policy_data.common.s_source2, i_src) + check_directory_traversal(i_src) + + print("check_symlink_source 3: true") +} + +allow_sandbox_storages(i_storages) { + print("allow_sandbox_storages: i_storages =", i_storages) + + p_storages := policy_data.sandbox.storages + every i_storage in i_storages { + allow_sandbox_storage(p_storages, i_storage) + } + + print("allow_sandbox_storages: true") +} + +allow_sandbox_storage(p_storages, i_storage) { + print("allow_sandbox_storage: i_storage =", i_storage) + + some p_storage in p_storages + print("allow_sandbox_storage: p_storage =", p_storage) + i_storage == p_storage + + print("allow_sandbox_storage: true") +} + CopyFileRequest { print("CopyFileRequest: input.path =", input.path) + check_symlink_source(input.symlink_src) + check_directory_traversal(input.path) + some regex1 in policy_data.request_defaults.CopyFileRequest - regex2 := replace(regex1, "$(cpath)", policy_data.common.cpath) - regex.match(regex2, input.path) + regex2 := replace(regex1, "$(sfprefix)", policy_data.common.sfprefix) + regex3 := replace(regex2, "$(cpath)", policy_data.common.cpath) + regex4 := replace(regex3, "$(bundle-id)", "[a-z0-9]{64}") + print("CopyFileRequest: regex4 =", regex4) + + regex.match(regex4, input.path) print("CopyFileRequest: true") } +CreateSandboxRequest { + print("CreateSandboxRequest: input.guest_hook_path =", input.guest_hook_path) + count(input.guest_hook_path) == 0 + + print("CreateSandboxRequest: input.kernel_modules =", input.kernel_modules) + count(input.kernel_modules) == 0 + + i_pidns := input.sandbox_pidns + print("CreateSandboxRequest: i_pidns =", i_pidns) + i_pidns == false + + allow_sandbox_storages(input.storages) +} + +allow_exec(p_container, i_process) { + print("allow_exec: start") + + p_oci = p_container.OCI + p_s_name = p_oci.Annotations[S_NAME_KEY] + allow_probe_process(p_oci.Process, i_process, p_s_name) + + print("allow_exec: true") +} + +allow_interactive_exec(p_container, i_process) { + print("allow_interactive_exec: start") + + p_oci = p_container.OCI + p_s_name = p_oci.Annotations[S_NAME_KEY] + allow_interactive_process(p_oci.Process, i_process, p_s_name) + + print("allow_interactive_exec: true") +} + +# TODO: should other ExecProcessRequest input data fields be validated as well? ExecProcessRequest { print("ExecProcessRequest 1: input =", input) i_command = concat(" ", input.process.Args) - print("ExecProcessRequest 3: i_command =", i_command) + print("ExecProcessRequest 1: i_command =", i_command) some p_command in policy_data.request_defaults.ExecProcessRequest.commands + print("ExecProcessRequest 1: p_command =", p_command) p_command == i_command + # TODO: match p_container's ID with the input container_id. + some p_container in policy_data.containers + allow_interactive_exec(p_container, input.process) + print("ExecProcessRequest 1: true") } ExecProcessRequest { @@ -1086,15 +1290,16 @@ ExecProcessRequest { # TODO: match input container ID with its corresponding container.exec_commands. i_command = concat(" ", input.process.Args) - print("ExecProcessRequest 3: i_command =", i_command) + print("ExecProcessRequest 2: i_command =", i_command) - some container in policy_data.containers - some p_command in container.exec_commands + # TODO: match p_container's ID with the input container_id. + some p_container in policy_data.containers + some p_command in p_container.exec_commands print("ExecProcessRequest 2: p_command =", p_command) - - # TODO: should other input data fields be validated as well? p_command == i_command + allow_exec(p_container, input.process) + print("ExecProcessRequest 2: true") } ExecProcessRequest { @@ -1108,13 +1313,25 @@ ExecProcessRequest { regex.match(p_regex, i_command) + # TODO: match p_container's ID with the input container_id. + some p_container in policy_data.containers + allow_interactive_exec(p_container, input.process) + print("ExecProcessRequest 3: true") } +CloseStdinRequest { + policy_data.request_defaults.CloseStdinRequest == true +} + ReadStreamRequest { policy_data.request_defaults.ReadStreamRequest == true } +UpdateEphemeralMountsRequest { + policy_data.request_defaults.UpdateEphemeralMountsRequest == true +} + WriteStreamRequest { policy_data.request_defaults.WriteStreamRequest == true } diff --git a/src/confcom/azext_confcom/fragment_util.py b/src/confcom/azext_confcom/fragment_util.py index 61370d8eaaa..54b70a00db7 100644 --- a/src/confcom/azext_confcom/fragment_util.py +++ b/src/confcom/azext_confcom/fragment_util.py @@ -4,6 +4,9 @@ # -------------------------------------------------------------------------------------------- import yaml +import copy +from typing import List +from knack.log import get_logger from azext_confcom import config from azext_confcom import oras_proxy from azext_confcom.cose_proxy import CoseSignToolProxy @@ -12,6 +15,8 @@ extract_containers_from_text, ) +logger = get_logger(__name__) + # input is the full rego file as a string # output is all of the containers in the rego files as a list of dictionaries @@ -25,31 +30,43 @@ def combine_fragments_with_policy(all_fragments): return out_fragments -def get_all_fragment_contents(fragment_imports): +def get_all_fragment_contents( + image_names: List[str], + fragment_imports: List[dict], +) -> List[str]: + # was getting errors with pass by reference so we need to copy it + copied_fragment_imports = copy.deepcopy(fragment_imports) + fragment_feeds = [ case_insensitive_dict_get(fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED) - for fragment in fragment_imports + for fragment in copied_fragment_imports ] all_fragments_contents = [] - cose_proxy = CoseSignToolProxy() + # get all the image attached fragments + for image in image_names: + # TODO: make sure this doesn't error out if the images aren't in a registry. + # This will probably be in the discover function + fragments, feeds = oras_proxy.pull_all_image_attached_fragments(image) + for fragment, feed in zip(fragments, feeds): + if feed in fragment_feeds: + all_fragments_contents.append(fragment) + else: + logger.warning("Fragment feed %s not in list of feeds to use. Skipping fragment.", feed) - for fragment in fragment_imports: + cose_proxy = CoseSignToolProxy() + # get all the local fragments + for fragment in copied_fragment_imports: + contents = [] # pull locally if there is a path, otherwise pull from the remote registry if ( - config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH in fragment and - fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH] + fragment.get(config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH) ): contents = [ cose_proxy.extract_payload_from_path( fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_PATH] ) ] - else: - feed_name = case_insensitive_dict_get( - fragment, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED - ) - contents = oras_proxy.pull_all_image_attached_fragments(feed_name) # add the new fragments to the list of all fragments if they're not already there # the side effect of adding this way is that if we have a local path to a nested fragment @@ -68,7 +85,7 @@ def get_all_fragment_contents(fragment_imports): # it will end when there are no longer nested fragments to pull for new_fragment in fragments: if new_fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED] not in fragment_feeds: - fragment_imports.append(new_fragment[config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED]) + copied_fragment_imports.append(new_fragment) all_fragments_contents.append(content) diff --git a/src/confcom/azext_confcom/oras_proxy.py b/src/confcom/azext_confcom/oras_proxy.py index 0507b9449bc..c232a46d3f3 100644 --- a/src/confcom/azext_confcom/oras_proxy.py +++ b/src/confcom/azext_confcom/oras_proxy.py @@ -11,6 +11,7 @@ from azext_confcom.errors import eprint from azext_confcom.config import ARTIFACT_TYPE from azext_confcom.cose_proxy import CoseSignToolProxy +from azext_confcom.os_util import delete_silently host_os = platform.system() machine = platform.machine() @@ -31,9 +32,10 @@ def discover( if item.returncode == 0: json_output = json.loads(item.stdout.decode("utf-8")) - manifests = json_output["manifests"] - for manifest in manifests: - hashes.append(manifest["digest"]) + manifests = json_output.get("manifests", []) + if manifests is not None: + for manifest in manifests: + hashes.append(manifest["digest"]) # get the exit code from the subprocess else: if "401: Unauthorized" in item.stderr.decode("utf-8"): @@ -83,10 +85,12 @@ def pull_all_image_attached_fragments(image): # TODO: this will be for standalone fragments fragments = discover(image) fragment_contents = [] + feeds = [] proxy = CoseSignToolProxy() for fragment_digest in fragments: filename = pull(image, fragment_digest) text = proxy.extract_payload_from_path(filename) + feed = proxy.extract_feed_from_path(filename) # containers = extract_containers_from_text(text, REGO_CONTAINER_START) # new_fragments = extract_containers_from_text(text, REGO_FRAGMENT_START) # if new_fragments: @@ -96,7 +100,8 @@ def pull_all_image_attached_fragments(image): # if feed not in fragment_feeds: # fragment_contents.extend(pull_all_image_attached_fragments(feed, fragment_feeds=fragment_feeds)) fragment_contents.append(text) - return fragment_contents + feeds.append(feed) + return fragment_contents, feeds def check_oras_cli(): @@ -113,7 +118,14 @@ def attach_fragment_to_image(image_name: str, filename: str): if ":" not in image_name: image_name += ":latest" # attach the fragment to the image - arg_list = ["oras", "attach", "--artifact-type", ARTIFACT_TYPE, image_name, filename] + arg_list = [ + "oras", + "attach", + "--artifact-type", + ARTIFACT_TYPE, + image_name, + filename + ":application/cose-x509+rego" + ] item = call_oras_cli(arg_list, check=False) if item.returncode != 0: eprint(f"Could not attach fragment to image: {image_name}. Failed with {item.stderr}") @@ -124,3 +136,22 @@ def attach_fragment_to_image(image_name: str, filename: str): print("Attached fragment to image, but could not extract digest from output.") digest = digest_result.group(0) print(f"Fragment attached to image '{image_name}' with Digest:{digest}") + + +def generate_imports_from_image_name(image_name: str, minimum_svn: int) -> List[dict]: + cose_proxy = CoseSignToolProxy() + fragment_hashes = discover(image_name) + import_list = [] + + for fragment_hash in fragment_hashes: + filename = "" + try: + filename = pull(image_name, fragment_hash) + import_statement = cose_proxy.generate_import_from_path(filename, minimum_svn) + if import_statement not in import_list: + import_list.append(import_statement) + finally: + # clean up the fragment file + delete_silently(filename) + + return import_list diff --git a/src/confcom/azext_confcom/os_util.py b/src/confcom/azext_confcom/os_util.py index ee3d0e32c0d..0671d0556d2 100644 --- a/src/confcom/azext_confcom/os_util.py +++ b/src/confcom/azext_confcom/os_util.py @@ -225,3 +225,15 @@ def map_image_from_tar(image_name: str, tar: TarFile, tar_location: str): image_info["Architecture"] = image_info_raw.get("architecture") return image_info + + +# helper function to delete a file that may or may not exist +def delete_silently(filename: str) -> None: + try: + os.remove(filename) + except FileNotFoundError: + pass + except PermissionError: + eprint(f"Permission denied to delete file: {filename}") + except OSError as e: + eprint(f"Error deleting file: {filename}, {e}") diff --git a/src/confcom/azext_confcom/security_policy.py b/src/confcom/azext_confcom/security_policy.py index b3d6da3cf9e..34dbb7eddc9 100644 --- a/src/confcom/azext_confcom/security_policy.py +++ b/src/confcom/azext_confcom/security_policy.py @@ -35,6 +35,7 @@ get_diff_size, process_env_vars_from_yaml, convert_to_pod_spec, + get_volume_claim_templates, filter_non_pod_resources, decompose_confidential_properties, process_env_vars_from_config, @@ -151,6 +152,9 @@ def _get_rootfs_proxy(self) -> SecurityPolicyProxy: return self._rootfs_proxy + def set_fragment_contents(self, fragment_contents: List[str]) -> None: + self._fragment_contents = fragment_contents + def get_fragments(self) -> List[str]: return self._fragments or [] @@ -178,12 +182,12 @@ def get_serialized_output( # encode to base64 return os_util.str_to_base64(policy_str) - def generate_fragment(self, namespace: str, svn: str, output_type: int) -> str: + def generate_fragment(self, namespace: str, svn: str, output_type: int, omit_id: bool = False) -> str: return config.CUSTOMER_REGO_FRAGMENT % ( namespace, pretty_print_func(svn), pretty_print_func(self.get_fragments()), - self.get_serialized_output(output_type, rego_boilerplate=False, include_sidecars=False), + self.get_serialized_output(output_type, rego_boilerplate=False, include_sidecars=False, omit_id=omit_id), ) def _add_rego_boilerplate(self, output: str) -> str: @@ -380,12 +384,12 @@ def _policy_serialization(self, pretty_print=False, include_sidecars: bool = Tru policy = [] regular_container_images = self.get_images() - is_sidecars = True + # in the case where fragments cover all the customer containers, we still need the pause container + is_sidecars = all(is_sidecar(image.containerImage) for image in regular_container_images) for image in regular_container_images: - is_sidecars = is_sidecars and is_sidecar(image.containerImage) image_dict = image.get_policy_json(omit_id=omit_id) policy.append(image_dict) - if not is_sidecars and include_sidecars: + if (not is_sidecars or len(regular_container_images) == 0) and include_sidecars: # add in the default containers that have their hashes pre-computed policy += copy.deepcopy(config.DEFAULT_CONTAINERS) if self._disable_stdio: @@ -1018,6 +1022,7 @@ def load_policy_from_virtual_node_yaml_file( ) +# pylint: disable=R0912 def load_policy_from_virtual_node_yaml_str( yaml_contents_str: List[str], debug_mode: bool = False, @@ -1048,7 +1053,7 @@ def load_policy_from_virtual_node_yaml_str( yaml_contents = filter_non_pod_resources(yaml_contents) for yaml in yaml_contents: # extract existing policy and fragments for diff mode - metadata = case_insensitive_dict_get(yaml, "metadata") + metadata = case_insensitive_dict_get(yaml, config.VIRTUAL_NODE_YAML_METADATA) annotations = case_insensitive_dict_get(metadata, config.VIRTUAL_NODE_YAML_ANNOTATIONS) labels = case_insensitive_dict_get(metadata, config.VIRTUAL_NODE_YAML_LABELS) or [] use_workload_identity = ( @@ -1067,6 +1072,7 @@ def load_policy_from_virtual_node_yaml_str( existing_containers, existing_fragments = ([], []) # because there are many ways to get pod information, we normalize them so the interface is the same normalized_yaml = convert_to_pod_spec(yaml) + volume_claim_templates = get_volume_claim_templates(yaml) spec = case_insensitive_dict_get(normalized_yaml, "spec") if not spec: @@ -1076,17 +1082,17 @@ def load_policy_from_virtual_node_yaml_str( pod_security_context = case_insensitive_dict_get(spec, "securityContext") or {} policy_containers = [] - containers = case_insensitive_dict_get(spec, "containers") + containers = case_insensitive_dict_get(spec, config.ACI_FIELD_TEMPLATE_CONTAINERS) if not containers: eprint("YAML file does not contain a containers field") # NOTE: initContainers are not treated differently in the security policy # but they are treated differently in the pod spec # e.g. lifecycle and probes are not supported in initContainers - init_containers = case_insensitive_dict_get(spec, "initContainers") or [] + init_containers = case_insensitive_dict_get(spec, config.ACI_FIELD_TEMPLATE_INIT_CONTAINERS) or [] for container in containers + init_containers: # image and name - image = case_insensitive_dict_get(container, "image") + image = case_insensitive_dict_get(container, config.ACI_FIELD_TEMPLATE_IMAGE) if not image: eprint("Container does not have an image field") @@ -1101,45 +1107,70 @@ def load_policy_from_virtual_node_yaml_str( envs += config.VIRTUAL_NODE_ENV_RULES_WORKLOAD_IDENTITY # command - command = case_insensitive_dict_get(container, "command") or [] + command = case_insensitive_dict_get(container, config.VIRTUAL_NODE_YAML_COMMAND) or [] args = case_insensitive_dict_get(container, "args") or [] # mounts mounts = copy.deepcopy(config.DEFAULT_MOUNTS_VIRTUAL_NODE) volumes = case_insensitive_dict_get(spec, "volumes") or [] + # there can be implicit volumes from volumeClaimTemplates + # We need to add them to the list of volumes and note if they are readonly + for volume_claim_template in volume_claim_templates: + vct_metadata = case_insensitive_dict_get(volume_claim_template, config.VIRTUAL_NODE_YAML_METADATA) + temp_volume = { + config.VIRTUAL_NODE_YAML_NAME: + case_insensitive_dict_get(vct_metadata, config.VIRTUAL_NODE_YAML_NAME), + } + vct_spec = case_insensitive_dict_get(volume_claim_template, "spec") + if vct_spec: + vct_access_modes = case_insensitive_dict_get(vct_spec, "accessModes") + if vct_access_modes and config.VIRTUAL_NODE_YAML_READ_ONLY_MANY in vct_access_modes: + temp_volume[config.ACI_FIELD_TEMPLATE_MOUNTS_READONLY] = True + + volumes.append(temp_volume) + # set of volume types that are read-only by default read_only_types = {"configMap", "secret", "downwardAPI", "projected"} volume_mounts = case_insensitive_dict_get(container, "volumeMounts") if volume_mounts: for mount in volume_mounts: - mount_name = case_insensitive_dict_get(mount, "name") + mount_name = case_insensitive_dict_get(mount, config.VIRTUAL_NODE_YAML_NAME) mount_path = case_insensitive_dict_get(mount, "mountPath") # find the corresponding volume volume = next( - (vol for vol in volumes if case_insensitive_dict_get(vol, "name") == mount_name), + ( + vol for vol in volumes if case_insensitive_dict_get( + vol, config.VIRTUAL_NODE_YAML_NAME + ) == mount_name + ), None - ) + ) or {} # determine if this volume is one of the read-only types - read_only_default = any(key in read_only_types for key in volume.keys()) + read_only_default = ( + any(key in read_only_types for key in volume.keys()) or + volume.get(config.ACI_FIELD_TEMPLATE_MOUNTS_READONLY) + ) if read_only_default: # log warning if readOnly is explicitly set to false for a read-only volume type - if case_insensitive_dict_get(mount, "readOnly") is False: + if case_insensitive_dict_get(mount, config.ACI_FIELD_TEMPLATE_MOUNTS_READONLY) is False: logger.warning( "Volume '%s' in container '%s' is of a type that requires readOnly access (%s), " "but readOnly: false was specified. Enforcing readOnly: true for policy generation.", mount_name, - case_insensitive_dict_get(container, "name"), + case_insensitive_dict_get(container, config.VIRTUAL_NODE_YAML_NAME), ', '.join(read_only_types) ) mount_readonly = True else: # use the readOnly field or default to False for non-read-only volumes - mount_readonly = case_insensitive_dict_get(mount, "readOnly") or False + mount_readonly = case_insensitive_dict_get( + mount, config.ACI_FIELD_TEMPLATE_MOUNTS_READONLY + ) or False mounts.append({ config.ACI_FIELD_CONTAINERS_MOUNTS_TYPE: config.ACI_FIELD_YAML_MOUNT_TYPE, @@ -1148,9 +1179,11 @@ def load_policy_from_virtual_node_yaml_str( }) # container security context - container_security_context = case_insensitive_dict_get(container, "securityContext") or {} + container_security_context = case_insensitive_dict_get( + container, config.ACI_FIELD_TEMPLATE_SECURITY_CONTEXT + ) or {} - if case_insensitive_dict_get(container_security_context, "privileged") is True: + if case_insensitive_dict_get(container_security_context, config.ACI_FIELD_CONTAINERS_PRIVILEGED) is True: mounts += config.DEFAULT_MOUNTS_PRIVILEGED_VIRTUAL_NODE # security context @@ -1170,7 +1203,8 @@ def load_policy_from_virtual_node_yaml_str( policy_containers.append( { config.ACI_FIELD_CONTAINERS_ID: image, - config.ACI_FIELD_CONTAINERS_NAME: case_insensitive_dict_get(container, "name") or image, + config.ACI_FIELD_CONTAINERS_NAME: case_insensitive_dict_get( + container, config.VIRTUAL_NODE_YAML_NAME) or image, config.ACI_FIELD_CONTAINERS_CONTAINERIMAGE: image, config.ACI_FIELD_CONTAINERS_ENVS: envs, config.ACI_FIELD_CONTAINERS_COMMAND: command + args, diff --git a/src/confcom/azext_confcom/template_util.py b/src/confcom/azext_confcom/template_util.py index 829d644096e..b28424de3a0 100644 --- a/src/confcom/azext_confcom/template_util.py +++ b/src/confcom/azext_confcom/template_util.py @@ -76,6 +76,12 @@ def image_has_hash(image: str) -> bool: return "@sha256:" in image +def get_image_name(image: str) -> str: + if image_has_hash(image): + return image.split("@")[0] + return image.split(":")[0] + + def get_image_info(progress, message_queue, tar_mapping, image): image_info = None raw_image = None @@ -451,6 +457,15 @@ def convert_to_pod_spec_helper(pod_dict): return {} +def get_volume_claim_templates(pod_spec: dict) -> List[dict]: + volume_claim_templates = [] + if "spec" in pod_spec: + spec = pod_spec["spec"] + if "volumeClaimTemplates" in spec: + return spec["volumeClaimTemplates"] + return volume_claim_templates + + def filter_non_pod_resources(resources: List[dict]) -> List[dict]: """ Filter out non-pod spawning resources from a list of resources. diff --git a/src/confcom/azext_confcom/tests/latest/README.md b/src/confcom/azext_confcom/tests/latest/README.md index 7dd37c05dd5..82d4e8ac7e4 100644 --- a/src/confcom/azext_confcom/tests/latest/README.md +++ b/src/confcom/azext_confcom/tests/latest/README.md @@ -158,6 +158,7 @@ Test Name | Image Used | Purpose ---|---|--- test_fragment_user_container_customized_mounts | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if mounts are translated correctly to the appropriate source and destination locations test_fragment_user_container_mount_injected_dns | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | See if the resolvconf mount works properly +test_fragment_omit_id | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201203.1 | Check that the id field is omitted from the policy test_fragment_injected_sidecar_container_msi | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201203.1 | Make sure User mounts and env vars aren't added to sidecar containers, using JSON output format test_debug_processes | mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 | Enable exec_processes via debug_mode test_fragment_sidecar | mcr.microsoft.com/aci/msi-atlas-adapter:master_20201210.1 | See if sidecar fragments can be created by a given policy.json diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py b/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py index 39a08d6faec..8589548f38f 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_fragment.py @@ -6,6 +6,7 @@ import os import unittest import json +import errno import subprocess from knack.util import CLIError @@ -15,17 +16,25 @@ load_policy_from_config_str ) +from azext_confcom.cose_proxy import CoseSignToolProxy + import azext_confcom.config as config from azext_confcom.template_util import ( case_insensitive_dict_get, extract_containers_and_fragments_from_text, ) +from azext_confcom.os_util import ( + write_str_to_file, + load_json_from_file, + load_str_from_file, + load_json_from_str, + delete_silently, +) from azext_confcom.custom import acifragmentgen_confcom from azure.cli.testsdk import ScenarioTest TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), "..")) - class FragmentMountEnforcement(unittest.TestCase): custom_json = """ { @@ -270,6 +279,24 @@ def setUpClass(cls): aci_policy.populate_policy_content_for_all_images() cls.aci_policy = aci_policy + + def test_fragment_omit_id(self): + output = self.aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False, omit_id=True + ) + output_json = load_json_from_str(output) + + self.assertNotIn("id", output_json[0]) + + # test again with omit_id=False + output2 = self.aci_policy.get_serialized_output( + output_type=OutputType.RAW, rego_boilerplate=False + ) + output_json2 = load_json_from_str(output2) + + self.assertIn("id", output_json2[0]) + + def test_fragment_injected_sidecar_container_msi(self): image = self.aci_policy.get_images()[0] env_vars = [ @@ -500,6 +527,265 @@ def test_fragment_incorrect_sidecar(self): self.assertEqual(diff, expected_diff) +class FragmentPolicySigning(unittest.TestCase): + custom_json = """ +{ + "version": "1.0", + "fragments": [ + { + "issuer": "did:x509:0:sha256:I__iuL25oXEVFdTP_aBLx_eT1RPHbCQ_ECBQfYZpt9s::eku:1.3.6.1.4.1.311.76.59.1.3", + "feed": "contoso.azurecr.io/infra", + "minimum_svn": "1", + "includes": [ + "containers" + ] + } + ], + "containers": [ + { + "name": "my-image", + "properties": { + "image": "mcr.microsoft.com/acc/samples/aci/helloworld:2.8", + "execProcesses": [ + { + "command": [ + "echo", + "Hello World" + ] + } + ], + "volumeMounts": [ + { + "name": "azurefile", + "mountPath": "/mount/azurefile", + "mountType": "azureFile", + "readOnly": true + } + ], + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env(.*)", + "regex": true + } + ] + } + } + ] +} + """ + custom_json2 = """ +{ + "version": "1.0", + "fragments": [ + ], + "containers": [ + { + "name": "my-image", + "properties": { + "image": "mcr.microsoft.com/cbl-mariner/busybox:1.35", + "execProcesses": [ + { + "command": [ + "sleep", + "infinity" + ] + } + ], + "environmentVariables": [ + { + "name": "PATH", + "value": "/another/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV2", + "value": "test_regexp_env2(.*)", + "regex": true + } + ] + } + }, + { + "name": "my-image", + "properties": { + "image": "mcr.microsoft.com/acc/samples/aci/helloworld:2.8", + "execProcesses": [ + { + "command": [ + "echo", + "Hello World" + ] + } + ], + "volumeMounts": [ + { + "name": "azurefile", + "mountPath": "/mount/azurefile", + "mountType": "azureFile", + "readOnly": true + } + ], + "environmentVariables": [ + { + "name": "PATH", + "value": "/customized/path/value" + }, + { + "name": "TEST_REGEXP_ENV", + "value": "test_regexp_env(.*)", + "regex": true + } + ] + } + } + ] +} + """ + @classmethod + def setUpClass(cls): + # cls.key_dir_parent = os.path.join(TEST_DIR, '..', '..', '..', 'samples', 'certs') + # cls.key = os.path.join(cls.key_dir_parent, 'intermediateCA', 'private', 'ec_p384_private.pem') + # cls.chain = os.path.join(cls.key_dir_parent, 'intermediateCA', 'certs', 'www.contoso.com.chain.cert.pem') + # if not os.path.exists(cls.key) or not os.path.exists(cls.chain): + # script_path = os.path.join(cls.key_dir_parent, 'create_certchain.sh') + + # arg_list = [ + # script_path, + # ] + # os.chmod(script_path, 0o755) + + # # NOTE: this will raise an exception if it's run on windows and the key/cert files don't exist + # item = subprocess.run( + # arg_list, + # check=False, + # shell=True, + # cwd=cls.key_dir_parent, + # env=os.environ.copy(), + # ) + # if item.returncode != 0: + # raise Exception("Error creating certificate chain") + + with load_policy_from_config_str(cls.custom_json) as aci_policy: + aci_policy.populate_policy_content_for_all_images() + cls.aci_policy = aci_policy + with load_policy_from_config_str(cls.custom_json2) as aci_policy2: + aci_policy2.populate_policy_content_for_all_images() + cls.aci_policy2 = aci_policy2 + + # def test_signing(self): + # filename = "payload.rego" + # feed = "test_feed" + # algo = "ES384" + # out_path = filename + ".cose" + + # fragment_text = self.aci_policy.generate_fragment("payload", 1, OutputType.RAW) + # try: + # write_str_to_file(filename, fragment_text) + + # cose_proxy = CoseSignToolProxy() + # iss = cose_proxy.create_issuer(self.chain) + + # cose_proxy.cose_sign(filename, self.key, self.chain, feed, iss, algo, out_path) + # self.assertTrue(os.path.exists(filename)) + # self.assertTrue(os.path.exists(out_path)) + # except Exception as e: + # raise e + # finally: + # delete_silently(filename) + # delete_silently(out_path) + + # def test_generate_import(self): + # filename = "payload4.rego" + # feed = "test_feed" + # algo = "ES384" + # out_path = filename + ".cose" + + # fragment_text = self.aci_policy.generate_fragment("payload4", 1, OutputType.RAW) + # try: + # write_str_to_file(filename, fragment_text) + + # cose_proxy = CoseSignToolProxy() + # iss = cose_proxy.create_issuer(self.chain) + # cose_proxy.cose_sign(filename, self.key, self.chain, feed, iss, algo, out_path) + + # import_statement = cose_proxy.generate_import_from_path(out_path, 1) + # self.assertTrue(import_statement) + # self.assertEquals( + # import_statement.get(config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_ISSUER,""),iss + # ) + # self.assertEquals( + # import_statement.get(config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_FEED,""),feed + # ) + # self.assertEquals( + # import_statement.get(config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_MINIMUM_SVN,""),1 + # ) + # self.assertEquals( + # import_statement.get(config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS_INCLUDES,[]),[config.POLICY_FIELD_CONTAINERS, config.POLICY_FIELD_CONTAINERS_ELEMENTS_REGO_FRAGMENTS] + # ) + + # except Exception as e: + # raise e + # finally: + # delete_silently(filename) + # delete_silently(out_path) + + # def test_local_fragment_references(self): + # filename = "payload2.rego" + # filename2 = "payload3.rego" + # fragment_json = "fragment.json" + # feed = "test_feed" + # feed2 = "test_feed2" + # algo = "ES384" + # out_path = filename + ".cose" + # out_path2 = filename2 + ".cose" + + # fragment_text = self.aci_policy.generate_fragment("payload2", 1, OutputType.RAW) + + # try: + # write_str_to_file(filename, fragment_text) + # write_str_to_file(fragment_json, self.custom_json2) + + # cose_proxy = CoseSignToolProxy() + # iss = cose_proxy.create_issuer(self.chain) + # cose_proxy.cose_sign(filename, self.key, self.chain, feed, iss, algo, out_path) + + # # this will insert the import statement from the first fragment into the second one + # acifragmentgen_confcom( + # None, None, None, None, None, None, None, None, generate_import=True, minimum_svn=1, fragments_json=fragment_json, fragment_path=out_path + # ) + # # put the "path" field into the import statement + # temp_json = load_json_from_file(fragment_json) + # temp_json["fragments"][0]["path"] = out_path + + # write_str_to_file(fragment_json, json.dumps(temp_json)) + + # acifragmentgen_confcom( + # None, fragment_json, None, "payload3", 1, feed2, self.key, self.chain, None, output_filename=filename2 + # ) + + # # make sure all of our output files exist + # self.assertTrue(os.path.exists(filename2)) + # self.assertTrue(os.path.exists(out_path2)) + # self.assertTrue(os.path.exists(fragment_json)) + # # check the contents of the unsigned rego file + # rego_str = load_str_from_file(filename2) + # # see if the import statement is in the rego file + # self.assertTrue("test_feed" in rego_str) + # # make sure the image covered by the first fragment isn't in the second fragment + # self.assertFalse("mcr.microsoft.com/acc/samples/aci/helloworld:2.8" in rego_str) + # except Exception as e: + # raise e + # finally: + # delete_silently(filename) + # delete_silently(out_path) + # delete_silently(filename2) + # delete_silently(out_path2) + # delete_silently(fragment_json) + class InitialFragmentErrors(ScenarioTest): def test_invalid_input(self): with self.assertRaises(CLIError) as wrapped_exit: diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_image.py b/src/confcom/azext_confcom/tests/latest/test_confcom_image.py index e866f93db6c..bb323473a86 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_image.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_image.py @@ -47,8 +47,6 @@ def setUpClass(cls): def test_image_policy(self): # deep diff the output policies from the regular policy.json and the single image - print("self.aci_policy.get_serialized_output(): ", self.aci_policy.get_serialized_output(OutputType.PRETTY_PRINT)) - print("self.custom_policy.get_serialized_output(): ", self.custom_policy.get_serialized_output(OutputType.PRETTY_PRINT)) self.assertEqual(self.aci_policy.get_serialized_output(), self.custom_policy.get_serialized_output()) diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py b/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py index 0bc7c6d980f..cfd3bf4d849 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_kata.py @@ -63,39 +63,39 @@ def test_invalid_config_map_path(self): os.remove(filename) self.assertNotEqual(wrapped_exit.exception.code, 0) - def test_valid_settings(self): - filename = "pod2.yaml" - try: - with open(filename, "w") as f: - f.write(KataPolicyGen.pod_string) - if host_os_linux: - katapolicygen_confcom( - filename, None - ) - else: - with self.assertRaises(SystemExit) as wrapped_exit: - katapolicygen_confcom( - filename, None - ) - self.assertNotEqual(wrapped_exit.exception.code, 0) - return + # def test_valid_settings(self): + # filename = "pod2.yaml" + # try: + # with open(filename, "w") as f: + # f.write(KataPolicyGen.pod_string) + # if host_os_linux: + # katapolicygen_confcom( + # filename, None + # ) + # else: + # with self.assertRaises(SystemExit) as wrapped_exit: + # katapolicygen_confcom( + # filename, None + # ) + # self.assertNotEqual(wrapped_exit.exception.code, 0) + # return - with open(filename, "r") as f: - content = f.read() - finally: - if os.path.exists(filename): - os.remove(filename) - if host_os_linux: - self.assertNotEqual(content, KataPolicyGen.pod_string, "Policy content not changed in yaml") + # with open(filename, "r") as f: + # content = f.read() + # finally: + # if os.path.exists(filename): + # os.remove(filename) + # if host_os_linux: + # self.assertNotEqual(content, KataPolicyGen.pod_string, "Policy content not changed in yaml") - def test_print_version(self): - if host_os_linux: - katapolicygen_confcom( - None, None, print_version=True - ) - else: - with self.assertRaises(SystemExit) as wrapped_exit: - katapolicygen_confcom( - None, None, print_version=True - ) - self.assertNotEqual(wrapped_exit.exception.code, 0) + # def test_print_version(self): + # if host_os_linux: + # katapolicygen_confcom( + # None, None, print_version=True + # ) + # else: + # with self.assertRaises(SystemExit) as wrapped_exit: + # katapolicygen_confcom( + # None, None, print_version=True + # ) + # self.assertNotEqual(wrapped_exit.exception.code, 0) diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py b/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py index 6cb6375ddee..94d1601b687 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_scenario.py @@ -577,8 +577,8 @@ def test_image_layers_python(self): aci_policy.populate_policy_content_for_all_images() layers = aci_policy.get_images()[0]._layers expected_layers = [ - "6750e14f6156783394b6837e615252668e74de3df7b1b7281deabdcf5d07c329", - "28551577cdd5c3971cbf47c119cee9c376f2d1b633dc5a3df5f01d4e9cb51aff" + "069a26cef9f2cd9c8bdbc38adc786292096e80a3f45d1da99532d8c349e3e852", + "07d84d8efe92985be1e67f32429ced5a5053d20c57fcb6c7b36b7395310163a1" ] self.assertEqual(len(layers), len(expected_layers)) for i in range(len(expected_layers)): diff --git a/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py b/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py index c3cb207539e..ad4f3c80ba5 100644 --- a/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py +++ b/src/confcom/azext_confcom/tests/latest/test_confcom_virtual_node.py @@ -236,6 +236,41 @@ class PolicyGeneratingVirtualNode(unittest.TestCase): - python3 """ + custom_yaml_volume_claim = """ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web +spec: + serviceName: "nginx" + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadOnlyMany" ] + resources: + requests: + storage: 1Gi +""" + def test_compare_policy_sources(self): custom_policy = load_policy_from_str(self.custom_json) custom_policy.populate_policy_content_for_all_images() @@ -328,3 +363,21 @@ def test_workload_identity(self): for var in config.VIRTUAL_NODE_ENV_RULES_WORKLOAD_IDENTITY: self.assertTrue(var['name'] in env_rule_names) + + def test_volume_claim(self): + virtual_node_policy = load_policy_from_virtual_node_yaml_str(self.custom_yaml_volume_claim)[0] + virtual_node_policy.populate_policy_content_for_all_images() + container_start = "containers := " + containers = json.loads(extract_containers_from_text(virtual_node_policy.get_serialized_output(OutputType.PRETTY_PRINT), container_start)) + # get the volume mount from the first container + mounts = [ + mount + for mount in + containers[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS] + ] + self.assertTrue("/usr/share/nginx/html" in [mount[config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_DESTINATION] for mount in mounts]) + mount = [mount for mount in mounts if mount[config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_DESTINATION] == "/usr/share/nginx/html"][0] + self.assertTrue("ro" in mount[config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS_OPTIONS]) + + # get the nginx mount and make sure it is readonly + containers[0][config.POLICY_FIELD_CONTAINERS_ELEMENTS_MOUNTS] \ No newline at end of file diff --git a/src/confcom/samples/certs/README.md b/src/confcom/samples/certs/README.md index 05314791542..248bb28d687 100644 --- a/src/confcom/samples/certs/README.md +++ b/src/confcom/samples/certs/README.md @@ -23,13 +23,13 @@ The image in `fragment_config.json` must be updated from `` to the i ./create_certchain.sh ``` -You will need to select (y) for four prompts to sign the certs needed to create a cert chain. - After completion, this will create the following files to be used in the confcom signing process: - `intermediate/private/ec_p384_private.pem` - `intermediateCA/certs/www.contoso.com.chain.cert.pem` +Note that for consecutive runs, the script will not completely overwrite the existing key and cert files. It is recommended to either delete the existing files or modify the path to create the new files elsewhere. + ## Run confcom *This step will generate the fragment policy, sign it with the certs created in the previous step, and upload the fragment to the container registry.* @@ -37,7 +37,7 @@ After completion, this will create the following files to be used in the confcom You may need to change the path to the chain and key files in the following command: ```bash -az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --config ./samples/config.json --upload-fragment +az confcom acifragmentgen --chain ./samples/certs/intermediateCA/certs/www.contoso.com.chain.cert.pem --key ./samples/certs/intermediateCA/private/ec_p384_private.pem --svn 1 --namespace contoso --input ./samples/config.json --upload-fragment ``` After running the command, there will be the following files created: @@ -65,20 +65,25 @@ The fragment can be seen in the Azure portal under the container repo's artifact To create an import statement for the newly created rego fragment, run the following command: ```bash -az confcom acifragmentgen --generate-import -p ./contoso.rego.cose --minimum-svn 1 +az confcom acifragmentgen --generate-import -p ./contoso.rego.cose --minimum-svn 1 --fragments-json fragments.json ``` -Which will output the fragment's import in json format. **Place this import statement into a new `fragments.json` file.** +Which will output the fragment's import in json format to the file `fragments.json`. example output: ```json { - "issuer": "did:x509:0:sha256:I__iuL25oXEVFdTP_aBLx_eT1RPHbCQ_ECBQfYZpt9s::eku:1.3.6.1.4.1.311.76.59.1.3", - "feed": "contoso.azurecr.io/infra", - "minimum_svn": "1", - "includes": [ - "containers" + "fragments": [ + { + "feed": "mcr.microsoft.com/acc/samples/aci/helloworld", + "includes": [ + "containers", + "fragments" + ], + "issuer": "did:x509:0:sha256:0NWnhcxjUwmwLCd7A-PubQRq08ig3icQxpW5d2f4Rbc::subject:CN:Contoso", + "minimum_svn": "1" + } ] } ``` diff --git a/src/confcom/samples/certs/create_certchain.sh b/src/confcom/samples/certs/create_certchain.sh index 5bdaa3ca299..80cb7ed17d5 100755 --- a/src/confcom/samples/certs/create_certchain.sh +++ b/src/confcom/samples/certs/create_certchain.sh @@ -1,6 +1,9 @@ +#!/bin/bash # Following guide from: https://www.golinuxcloud.com/openssl-create-certificate-chain-linux/ +OriginalPath=`pwd` -RootPath=/home//azure-cli-extensions/src/confcom/samples/certs +RootPath=`realpath $(dirname $0)` +cd $RootPath # create dirs for root CA mkdir -p $RootPath/rootCA/{certs,crl,newcerts,private,csr} @@ -29,23 +32,23 @@ chmod 400 $RootPath/rootCA/private/ca.key.pem openssl req -config openssl_root.cnf -key $RootPath/rootCA/private/ca.key.pem -new -x509 -days 7300 -sha256 -extensions v3_ca -out $RootPath/rootCA/certs/ca.cert.pem -subj "/C=US/ST=Georgia/L=Atlanta/O=Microsoft/OU=ACCCT/CN=Root CA" # change permissions on root key so it's not globally readable -chmod 444 $RootPath/rootCA/certs/ca.cert.pem +chmod 644 $RootPath/rootCA/certs/ca.cert.pem # verify root cert openssl x509 -noout -text -in $RootPath/rootCA/certs/ca.cert.pem # generate intermediate key openssl genrsa -out $RootPath/intermediateCA/private/intermediate.key.pem 4096 -chmod 400 $RootPath/intermediateCA/private/intermediate.key.pem +chmod 600 $RootPath/intermediateCA/private/intermediate.key.pem # make CSR for intermediate openssl req -config openssl_intermediate.cnf -key $RootPath/intermediateCA/private/intermediate.key.pem -new -sha256 -out $RootPath/intermediateCA/certs/intermediate.csr.pem -subj "/C=US/ST=Georgia/L=Atlanta/O=Microsoft/OU=ACCCT/CN=Intermediate CA" # sign intermediate cert with root -openssl ca -config openssl_root.cnf -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in $RootPath/intermediateCA/certs/intermediate.csr.pem -out $RootPath/intermediateCA/certs/intermediate.cert.pem +openssl ca -config openssl_root.cnf -extensions v3_intermediate_ca -days 3650 -notext -md sha256 -in $RootPath/intermediateCA/certs/intermediate.csr.pem -out $RootPath/intermediateCA/certs/intermediate.cert.pem -batch # make it readable by everyone -chmod 444 $RootPath/intermediateCA/certs/intermediate.cert.pem +chmod 644 $RootPath/intermediateCA/certs/intermediate.cert.pem # print the cert # openssl x509 -noout -text -in $RootPath/intermediateCA/certs/intermediate.cert.pem @@ -63,13 +66,13 @@ openssl verify -CAfile $RootPath/intermediateCA/certs/ca-chain.cert.pem $RootPat openssl ecparam -out $RootPath/intermediateCA/private/www.contoso.com.key.pem -name secp384r1 -genkey openssl pkcs8 -topk8 -nocrypt -in $RootPath/intermediateCA/private/www.contoso.com.key.pem -out $RootPath/intermediateCA/private/ec_p384_private.pem -chmod 400 $RootPath/intermediateCA/private/www.contoso.com.key.pem +chmod 600 $RootPath/intermediateCA/private/www.contoso.com.key.pem # create csr for server openssl req -config openssl_intermediate.cnf -key $RootPath/intermediateCA/private/www.contoso.com.key.pem -new -sha384 -out $RootPath/intermediateCA/csr/www.contoso.com.csr.pem -batch # sign server cert with intermediate key -openssl ca -config openssl_intermediate.cnf -extensions server_cert -days 375 -notext -md sha384 -in $RootPath/intermediateCA/csr/www.contoso.com.csr.pem -out $RootPath/intermediateCA/certs/www.contoso.com.cert.pem +openssl ca -config openssl_intermediate.cnf -extensions server_cert -days 375 -notext -md sha384 -in $RootPath/intermediateCA/csr/www.contoso.com.csr.pem -out $RootPath/intermediateCA/certs/www.contoso.com.cert.pem -batch # print the cert # openssl x509 -noout -text -in $RootPath/intermediateCA/certs/www.contoso.com.cert.pem @@ -78,4 +81,6 @@ openssl ca -config openssl_intermediate.cnf -extensions server_cert -days 375 -n # openssl x509 -pubkey -noout -in $RootPath/intermediateCA/certs/www.contoso.com.cert.pem -out $RootPath/intermediateCA/certs/pubkey.pem # create chain file -cat $RootPath/intermediateCA/certs/www.contoso.com.cert.pem $RootPath/intermediateCA/certs/intermediate.cert.pem $RootPath/rootCA/certs/ca.cert.pem > $RootPath/intermediateCA/certs/www.contoso.com.chain.cert.pem \ No newline at end of file +cat $RootPath/intermediateCA/certs/www.contoso.com.cert.pem $RootPath/intermediateCA/certs/intermediate.cert.pem $RootPath/rootCA/certs/ca.cert.pem > $RootPath/intermediateCA/certs/www.contoso.com.chain.cert.pem + +cd $OriginalPath \ No newline at end of file diff --git a/src/confcom/setup.py b/src/confcom/setup.py index e13a7d5bc4a..7fdb7b68c3b 100644 --- a/src/confcom/setup.py +++ b/src/confcom/setup.py @@ -19,7 +19,7 @@ logger.warn("Wheel is not available, disabling bdist_wheel hook") -VERSION = "1.1.1" +VERSION = "1.2.0" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers