diff --git a/config/registry/helm/index.md b/config/registry/helm/index.md new file mode 100644 index 000000000..07b71d0a7 --- /dev/null +++ b/config/registry/helm/index.md @@ -0,0 +1,14 @@ +--- +title: "helm" +linkTitle: "helm" +date: 2022-04-11 +# we don't need to expose this in the documentation as a cloud provider +# this is an implementation detail when using byok +draft: true +weight: 1 +description: Bring Your Own Kubernetes +--- + +# Helm provider: Bring Your Own Kubernetes + +Use Opta with an existing Kubernetes. diff --git a/config/registry/helm/index.yaml b/config/registry/helm/index.yaml new file mode 100644 index 000000000..3d67128eb --- /dev/null +++ b/config/registry/helm/index.yaml @@ -0,0 +1,24 @@ +required_providers: + helm: + source: "hashicorp/helm" + version: "2.4.1" +backend: + local: + # this is consistent with terraform generator + path: "./tfstate/{layer_name}.tfstate" +validator: + name: str() + org_name: regex('^[a-z0-9-]{,15}$', name="Valid identifier, regex='[a-z0-9-]{,15}'") + modules: list(module()) + input_variables: list(map(), required=False) +service_validator: + - name: str() + modules: list(module()) + input_variables: list(map(), required=False) +module_aliases: + k8s-service: local-k8s-service + +output_providers: + helm: + kubernetes: + config_path: "{kubeconfig}" diff --git a/examples/README.md b/examples/README.md index f1383181c..19c949190 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,6 +4,7 @@ This directory holds a compiled list of advanced, standalone, usages of Opta, mo - [airflow](/examples/airflow): Deploy Apache Airflow on AWS - [aws-lambda](/examples/aws-lambda): Deploy Apache Airflow on AWS +- [byok-eks](/examples/byok-eks): Bring Your Own Kubernetes, use Opta with an existing EKS - [flyte](/examples/flyte): Deploy Flyte on AWS - [full-stack-example](/examples/full-stack-example): Deploy a a todo list (including frontend, api, database, monitoring) locally or on AWS - [ghost](/examples/ghost): Deploy Ghost app on AWS diff --git a/examples/byo-eks/README.md b/examples/byo-eks/README.md deleted file mode 100644 index fddfe2bd5..000000000 --- a/examples/byo-eks/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Bring Your Own Cluster - -This is an example of using [Opta](https://github.com/run-x/opta) with an existing EKS cluster. - -# What does this do? - -If you already have an EKS cluster, and would like to try out Opta, follow these instructions to configure your cluster to work with Opta. - -If you don't have an EKS cluster, Opta can also create it, check [Getting Started](https://docs.opta.dev/getting-started/) instead. - -# What is included? - -By running terraform on an existing EKS cluster, your cluster will be configured to have the target [Network Architecture](https://docs.opta.dev/features/networking/network_overview/). - -The following components will be installed: - -- [Ingress Nginx](https://github.com/kubernetes/ingress-nginx) to expose services to the public -- [Linkerd](https://linkerd.io/) as the service mesh. -- [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/) to manage the ELB for the Kubernetes cluster. - -Here is the break down of the terraform files: - - . - └── terraform - ├──aws-lb-iam-policy.json # The IAM policy for the load balancer - └──aws-load-balancer-controller.tf # Create IAM role and install the AWS Load Balancer Controller - └──data.tf # Data fetched from providers - └──ingress-nginx.tf # Install the Nginx Ingres Controller - └──linkerd.tf # Install Linkerd - └──outputs.tf # Terraform outputs - └──providers.tf # Terraform providers - └──variables.tf # Terraform variables - -# Requirements - -To configure the cluster (this guide), you need to use an AWS user with permissions to create AWS policies and roles, and admin permission on the target EKS cluster. - -# Configure the cluster for Opta - -This step configures the networking stack (nginx/linkerd/load balancer) on an existing EKS cluster. - -- Init terraform -```shell -cd ./terraform -terraform init -``` - -- [Optional] Configure a Terraform backend. By default, Terraform stores the state as a local file on disk. If you want to use a different backend such as S3, add this file locally. -```terraform -# ./terraform/backend.tf -terraform { - backend "s3" { - bucket = "mybucket" - key = "path/to/my/key" - region = "us-east-1" - } -} -``` -Check this [page](https://www.terraform.io/language/settings/backends) for more information or other backends. - -- Run terraformm plan -``` -terraform plan -var kubeconfig=~/.kube/config -var cluster_name=my-cluster -var oidc_provider_url=https://oidc.eks.... -out=tf.plan - -Plan: XX to add, 0 to change, 0 to destroy. -``` - -For the target EKS cluster: -- For `cluster_name`, run `aws eks list-clusters` to see the availables clusters. -- For `oidc_provider_url`, see `OpenID Connect provider URL` in the EKS cluster page in the AWS console. For more information, check the [official documentation](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) -- For `kubeconfig`, check the [official documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html) if you don't have one yet. - - -At this time, nothing was changed yet, you can review what will be created by terraform. - -- Run terraformm apply -``` -terraform apply tf.plan - -Apply complete! Resources: XX added, 0 changed, 0 destroyed. - -Outputs: - -load_balancer_raw_dns = "xxx" - -``` - -Note the load balancer DNS, this is the public endpoint to access your Kubernetes cluster. - -# Additional cluster configuration - -These steps are not automated with the terraform step, but you can configure them using these guides. -- Configure DNS - - Follow this guide: [Routing traffic to an ELB load balancer](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) - - Using the TODO.. -- Configure a public certificate: - - Follow this guide: [Requesting a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) - - Using the certificate ARN, run the terraform commands with `-var load_balancer_cert_arn=...` - -# Deploy Kubernetes services with Opta - -Coming soon. - -# Uninstallation - -- Run terraformm destroy to remove the configuration added for Opta - -``` -terraform destroy -var kubeconfig=~/.kube/config -var cluster_name=my-cluster -var oidc_provider_url=https://oidc.eks.... -``` - diff --git a/examples/byok-eks/README.md b/examples/byok-eks/README.md new file mode 100644 index 000000000..9bab46bb4 --- /dev/null +++ b/examples/byok-eks/README.md @@ -0,0 +1,199 @@ +# Bring Your Own Cluster + +This is an example of using [Opta](https://github.com/run-x/opta) with an existing EKS cluster. + +# What does this do? + +This example provides Terraform files to configure Linkerd and Ingress Nginx controller in your EKS cluster to have the target [Network Architecture](https://docs.opta.dev/features/networking/network_overview/). +Once EKS is configured, you will be able to use Opta to deploy your service to Kubernetes. +Opta will generate the Terraform files and Helm chart for you, you only need to maintain the Opta file. + +![bring your own kubernetes](./img/byok.png) + + +# When to use this instead of full Opta? + +- Use this guide if you already have an EKS cluster, and would like to use Opta to deploy your Kubernetes services. +- If you don't have an EKS cluser, Opta can create it, check [Getting Started](https://docs.opta.dev/getting-started/) instead. + + +# What is included? + +The following components will be installed: + +- [Ingress Nginx](https://github.com/kubernetes/ingress-nginx) to expose services to the public +- [Linkerd](https://linkerd.io/) as the service mesh. +- [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/) to manage the ELB for the Kubernetes cluster. + +Here is the break down of the terraform files: + + . + └── terraform + ├──aws-lb-iam-policy.json # The IAM policy for the load balancer + └──aws-load-balancer-controller.tf # Create IAM role and install the AWS Load Balancer Controller + └──data.tf # Data fetched from providers + └──ingress-nginx.tf # Install the Nginx Ingres Controller + └──linkerd.tf # Install Linkerd + └──outputs.tf # Terraform outputs + └──providers.tf # Terraform providers + └──variables.tf # Terraform variables + +# Requirements + +To configure the cluster (this guide), you need to use an AWS user with permissions to create AWS policies and roles, and admin permission on the target EKS cluster. + +Additionally, Opta only supports Linkerd for the service mesh at this time. + +# Configure the cluster for Opta + +This step configures the networking stack (nginx/linkerd/load balancer) on an existing EKS cluster. + +- Init terraform +```shell +cd ./terraform +terraform init +``` + +- [Optional] Configure a Terraform backend. By default, Terraform stores the state as a local file on disk. If you want to use a different backend such as S3, add this file locally. +```terraform +# ./terraform/backend.tf +terraform { + backend "s3" { + bucket = "mybucket" + key = "path/to/my/key" + region = "us-east-1" + } +} +``` +Check this [page](https://www.terraform.io/language/settings/backends) for more information or other backends. + +- Run terraformm plan +``` +terraform plan -var kubeconfig=~/.kube/config -var cluster_name=my-cluster -var oidc_provider_url=https://oidc.eks.... -out=tf.plan + +Plan: XX to add, 0 to change, 0 to destroy. +``` + +For the target EKS cluster: +- For `cluster_name`, run `aws eks list-clusters` to see the availables clusters. +- For `oidc_provider_url`, see `OpenID Connect provider URL` in the EKS cluster page in the AWS console. For more information, check the [official documentation](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) +- For `kubeconfig`, check the [official documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html) if you don't have one yet. + + +At this time, nothing was changed yet, you can review what will be created by terraform. + +- Run terraformm apply +``` +terraform apply tf.plan + +Apply complete! Resources: XX added, 0 changed, 0 destroyed. + +Outputs: + +load_balancer_raw_dns = "xxx" + +``` + +Note the load balancer DNS, this is the public endpoint to access your Kubernetes cluster. + +# Additional cluster configuration + +These steps are not automated with the terraform step, but you can configure them using these guides. +- Configure DNS + - Follow this guide: [Routing traffic to an ELB load balancer](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) +- Configure a public certificate: + - Follow this guide: [Requesting a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) + - Using the certificate ARN, run the terraform commands with `-var load_balancer_cert_arn=...` + +# Configure your service to build and push a docker image + +Depending on which CI system you would like to use, configure your service to build and push docker images at each commit. +Here are some pointers for some popular systems: [Github Actions](https://github.com/marketplace/actions/build-and-push-docker-images), [CircleCI](https://circleci.com/docs/2.0/ecs-ecr/), [Jenkins](https://www.jenkins.io/doc/book/pipeline/docker/), [GitLab](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) + +# Deploy a service to Kubernetes with Opta + +Instead of having to define a Helm chart folder for each service, you can define one opta file that will take care of generating the Helm chart and use Terraform to apply the changes. + +1. Create the opta file + ```yaml + # hello.yaml + name: hello + org_name: runx + input_variables: + - name: image + modules: + - type: k8s-service + name: hello + port: + http: 80 + image: "{vars.image}" + healthcheck_path: "/" + public_uri: "/hello" + + ``` + +2. Run opta to deploy your service to your Kubernetes cluster + ```shell + # when running in CI, the image would be the one that was just pushed + opta apply -c hello.yaml --var image=ghcr.io/run-x/hello-opta/hello-opta:main + + ╒══════════╤══════════════════════════╤══════════╤════════╤══════════╕ + │ module │ resource │ action │ risk │ reason │ + ╞══════════╪══════════════════════════╪══════════╪════════╪══════════╡ + │ hello │ helm_release.k8s-service │ create │ LOW │ creation │ + ╘══════════╧══════════════════════════╧══════════╧════════╧══════════╛ + + ... + + Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + + Outputs: + + # the image that is deployed will be confirmed here + current_image = "ghcr.io/run-x/hello-opta/hello-opta:main" + + Opta updates complete! + ``` + This step will: + - Generate the helm chart for your service + - Use the terraform helm provider to release the service + - Generate the terraform state files in `tfstate` - please commit these files after each apply. + +3. You can test that your service is deployed by using `kubectl` or `curl` to the public endpoint. +```shell +kubectl -n hello get svc,pod +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/hello ClusterIP 172.20.160.83 80/TCP 6m42s + +NAME READY STATUS RESTARTS AGE +pod/hello-hello-k8s-service-6d55889d55-cvjn8 2/2 Running 0 6m42s + +# use "load_balancer_raw_dns" from the step "Configure the cluster for Opta" +curl -k https://$load_balancer_raw_dns/hello +Hello from Opta! +``` + +# Delete a service from Kubernetes + +Use the opta `destroy` command to destroy your service. +All the Kubernetes resources for this service will be deleted. + +```shell +opta destroy -c hello.yaml + +╒══════════╤══════════════════════════╤══════════╤════════╤══════════╕ +│ module │ resource │ action │ risk │ reason │ +╞══════════╪══════════════════════════╪══════════╪════════╪══════════╡ +│ hello │ helm_release.k8s-service │ delete │ HIGH │ N/A │ +╘══════════╧══════════════════════════╧══════════╧════════╧══════════╛ + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. + +``` +# Uninstall Opta configuration from EKS + +- Run terraformm destroy to remove the configuration added for Opta + +``` +terraform destroy -var kubeconfig=~/.kube/config -var cluster_name=my-cluster -var oidc_provider_url=https://oidc.eks.... +``` diff --git a/examples/byok-eks/img/byok.png b/examples/byok-eks/img/byok.png new file mode 100644 index 000000000..429d81d2f Binary files /dev/null and b/examples/byok-eks/img/byok.png differ diff --git a/examples/byo-eks/terraform/aws-lb-iam-policy.json b/examples/byok-eks/terraform/aws-lb-iam-policy.json similarity index 100% rename from examples/byo-eks/terraform/aws-lb-iam-policy.json rename to examples/byok-eks/terraform/aws-lb-iam-policy.json diff --git a/examples/byo-eks/terraform/aws-load-balancer-controller.tf b/examples/byok-eks/terraform/aws-load-balancer-controller.tf similarity index 100% rename from examples/byo-eks/terraform/aws-load-balancer-controller.tf rename to examples/byok-eks/terraform/aws-load-balancer-controller.tf diff --git a/examples/byo-eks/terraform/data.tf b/examples/byok-eks/terraform/data.tf similarity index 100% rename from examples/byo-eks/terraform/data.tf rename to examples/byok-eks/terraform/data.tf diff --git a/examples/byo-eks/terraform/ingress-nginx.tf b/examples/byok-eks/terraform/ingress-nginx.tf similarity index 100% rename from examples/byo-eks/terraform/ingress-nginx.tf rename to examples/byok-eks/terraform/ingress-nginx.tf diff --git a/examples/byo-eks/terraform/linkerd.tf b/examples/byok-eks/terraform/linkerd.tf similarity index 100% rename from examples/byo-eks/terraform/linkerd.tf rename to examples/byok-eks/terraform/linkerd.tf diff --git a/examples/byo-eks/terraform/outputs.tf b/examples/byok-eks/terraform/outputs.tf similarity index 100% rename from examples/byo-eks/terraform/outputs.tf rename to examples/byok-eks/terraform/outputs.tf diff --git a/examples/byo-eks/terraform/providers.tf b/examples/byok-eks/terraform/providers.tf similarity index 100% rename from examples/byo-eks/terraform/providers.tf rename to examples/byok-eks/terraform/providers.tf diff --git a/examples/byo-eks/terraform/variables.tf b/examples/byok-eks/terraform/variables.tf similarity index 100% rename from examples/byo-eks/terraform/variables.tf rename to examples/byok-eks/terraform/variables.tf diff --git a/opta/commands/apply.py b/opta/commands/apply.py index 9beb76334..e463c78de 100644 --- a/opta/commands/apply.py +++ b/opta/commands/apply.py @@ -20,6 +20,7 @@ from opta.core.cloud_client import CloudClient from opta.core.gcp import GCP from opta.core.generator import gen, gen_opta_resource_tags +from opta.core.helm_cloud_client import HelmCloudClient from opta.core.kubernetes import cluster_exist, tail_module_log, tail_namespace_events from opta.core.local import Local from opta.core.plan_displayer import PlanDisplayer @@ -180,6 +181,8 @@ def _apply( if local: # boolean passed via cli pass cloud_client = Local(layer) + elif layer.cloud == "helm": + cloud_client = HelmCloudClient(layer) else: raise Exception(f"Cannot handle upload config for cloud {layer.cloud}") diff --git a/opta/commands/destroy.py b/opta/commands/destroy.py index 0d8d0c3ba..32e7aa50c 100644 --- a/opta/commands/destroy.py +++ b/opta/commands/destroy.py @@ -170,6 +170,8 @@ def _fetch_children_layers(layer: "Layer") -> List[str]: opta_configs = _azure_get_configs(layer) elif layer.cloud == "local": opta_configs = _local_get_configs(layer) + elif layer.cloud == "helm": + return [] else: raise Exception(f"Not handling deletion for cloud {layer.cloud}") diff --git a/opta/core/helm_cloud_client.py b/opta/core/helm_cloud_client.py new file mode 100644 index 000000000..e05ad02d1 --- /dev/null +++ b/opta/core/helm_cloud_client.py @@ -0,0 +1,57 @@ +import os +from typing import TYPE_CHECKING, Dict, Optional + +from opta.constants import REGISTRY +from opta.core.cloud_client import CloudClient +from opta.exceptions import LocalNotImplemented +from opta.nice_subprocess import nice_run +from opta.utils import logger + +if TYPE_CHECKING: + from opta.layer import Layer, StructuredConfig + + +class HelmCloudClient(CloudClient): + def __init__(self, layer: "Layer"): + super().__init__(layer) + + def get_remote_config(self) -> Optional["StructuredConfig"]: + return None + + def upload_opta_config(self) -> None: + if "local" in REGISTRY[self.layer.cloud]["backend"]: + providers = self.layer.gen_providers(0) + local_path = providers["terraform"]["backend"]["local"]["path"] + real_path = os.path.dirname(os.path.realpath(local_path)) + logger.warning( + f"The terraform state is stored locally, make sure to keep the files in {real_path}" + ) + return None + + def delete_opta_config(self) -> None: + return None + + def delete_remote_state(self) -> None: + return None + + def get_terraform_lock_id(self) -> str: + return "" + + def get_all_remote_configs(self) -> Dict[str, Dict[str, "StructuredConfig"]]: + raise LocalNotImplemented( + "get_all_remote_configs: Feature Unsupported for the helm provider" + ) + + def set_kube_config(self) -> None: + # do nothing, the user brings their own + pass + + def cluster_exist(self) -> bool: + # "kubectl version" returns an error code if it can't connect to a cluster + nice_run(["kubectl", "version"], check=True) + return True + + def get_kube_context_name(self) -> str: + return nice_run( + ["kubectl", "config", "current-context"], check=True, capture_output=True + ).stdout.strip() diff --git a/opta/core/terraform.py b/opta/core/terraform.py index cdfd33902..9248d6713 100644 --- a/opta/core/terraform.py +++ b/opta/core/terraform.py @@ -195,6 +195,8 @@ def verify_storage(cls, layer: "Layer") -> bool: return cls._azure_verify_storage(layer) elif layer.cloud == "local": return cls._local_verify_storage(layer) + elif layer.cloud == "helm": + return True else: raise Exception(f"Can not verify state storage for cloud {layer.cloud}") @@ -389,6 +391,16 @@ def download_state(cls, layer: "Layer") -> bool: except Exception: UserErrors(f"Could copy local state file to {state_file}") + elif layer.cloud == "helm": + if "local" in providers["terraform"]["backend"]: + try: + tf_file = providers["terraform"]["backend"]["local"]["path"] + if os.path.exists(tf_file): + copyfile(tf_file, state_file) + else: + return False + except Exception: + UserErrors(f"Could not copy terraform state file to {state_file}") else: raise UserErrors("Need to get state from S3 or GCS or Azure storage") @@ -830,6 +842,9 @@ def delete_state_storage(cls, layer: "Layer") -> None: cloud_client = Azure(layer) elif layer.cloud == "local": cloud_client = Local(layer) + elif layer.cloud == "helm": + # There is no opta managed storage to delete + return else: raise Exception( f"Can not handle opta config deletion for cloud {layer.cloud}" diff --git a/opta/core/validator.py b/opta/core/validator.py index c9ac020ed..91e2fad89 100644 --- a/opta/core/validator.py +++ b/opta/core/validator.py @@ -58,6 +58,10 @@ class LocalModule(Module): cloud = "local" +class HelmModule(Module): + cloud = "helm" + + class Opta(Validator): """Opta Yaml Validator""" @@ -126,6 +130,12 @@ class LocalOpta(Opta): service_schema_dicts = REGISTRY["local"]["service_validator"] +class HelmOpta(Opta): + extra_validators = [HelmModule] + environment_schema_dict = REGISTRY["helm"]["validator"] + service_schema_dicts = REGISTRY["helm"]["service_validator"] + + def _get_yamale_errors( data: Any, schema_path: str, extra_validators: Optional[List[Type[Validator]]] = None ) -> List[str]: @@ -157,6 +167,8 @@ def _get_yamale_errors( azure_validators[AureOpta.tag] = AureOpta local_validators = DefaultValidators.copy() local_validators[LocalOpta.tag] = LocalOpta +helm_validators = DefaultValidators.copy() +helm_validators[HelmOpta.tag] = HelmOpta with NamedTemporaryFile(mode="w") as f: yaml.dump(REGISTRY["validator"], f) @@ -176,6 +188,9 @@ def _get_yamale_errors( local_main_schema = yamale.make_schema( f.name, validators=local_validators, parser="ruamel" ) + helm_main_schema = yamale.make_schema( + f.name, validators=helm_validators, parser="ruamel" + ) def _print_errors(errors: List[str]) -> None: @@ -201,6 +216,7 @@ def validate_yaml( "google": gcp_main_schema, "azurerm": azure_main_schema, "local": local_main_schema, + "helm": helm_main_schema, } DEFAULT_SCHEMA = vanilla_main_schema data = yamale.make_data(config_file_path, parser="ruamel") diff --git a/opta/layer.py b/opta/layer.py index 30321cf36..cf615e9de 100644 --- a/opta/layer.py +++ b/opta/layer.py @@ -21,6 +21,7 @@ from google.auth import default from google.auth.exceptions import DefaultCredentialsError from google.oauth2 import service_account +from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION from modules.base import ModuleProcessor from modules.runx.runx import RunxProcessor @@ -29,6 +30,7 @@ from opta.core.azure import Azure from opta.core.cloud_client import CloudClient from opta.core.gcp import GCP +from opta.core.helm_cloud_client import HelmCloudClient from opta.core.local import Local from opta.core.validator import validate_yaml from opta.crash_reporter import CURRENT_CRASH_REPORTER @@ -136,8 +138,17 @@ def __init__( self.original_spec = original_spec self.parent = parent self.path = path - if parent is None and org_name is None: - raise UserErrors("Config must have org name or a parent who has an org name") + self.cloud: str + if parent is None: + if len(providers) == 0: + # no parent, no provider = we are in helm (byok) mode + self.cloud = "helm" + # read the provider from the registry instead - the opta file doesn't define any with byok + providers = REGISTRY[self.cloud]["output_providers"] + elif org_name is None: + raise UserErrors( + "Config must have org name or a parent who has an org name" + ) self.org_name = org_name if self.parent and self.org_name is None: self.org_name = self.parent.org_name @@ -145,7 +156,6 @@ def __init__( total_base_providers = deep_merge( self.providers, self.parent.providers if self.parent else {} ) - self.cloud: str if "google" in total_base_providers and "aws" in total_base_providers: raise UserErrors( "You can have AWS as the cloud provider, or google, but not both" @@ -158,7 +168,8 @@ def __init__( self.cloud = "azurerm" elif "local" in total_base_providers: self.cloud = "local" - else: + + if not hasattr(self, "cloud"): raise UserErrors( "No cloud provider (AWS, GCP, or Azure) found, \n" + " or did you miss providing the --local flag for local deployment?" @@ -195,6 +206,8 @@ def get_cloud_client(self) -> CloudClient: return Azure(self) elif self.cloud == "local": return Local(self) + elif self.cloud == "helm": + return HelmCloudClient(self) else: raise Exception( f"Unknown cloud {self.cloud}. Can not handle getting the cloud client" @@ -532,6 +545,7 @@ def metadata_hydration(self) -> Dict[Any, Any]: "layer_name": self.name, "state_storage": self.state_storage(), "env": self.get_env(), + "kubeconfig": KUBE_CONFIG_DEFAULT_LOCATION, **provider_hydration, } @@ -639,7 +653,7 @@ def gen_providers(self, module_idx: int, clean: bool = True) -> Dict[Any, Any]: providers = deep_merge(providers, self.parent.providers) for cloud, provider in providers.items(): provider = self.handle_special_providers(cloud, provider, clean) - ret["provider"][cloud] = provider + ret["provider"][cloud] = hydrate(provider, hydration) if cloud in REGISTRY: ret["terraform"] = hydrate( {x: REGISTRY[cloud][x] for x in ["required_providers", "backend"]}, diff --git a/opta/registry.py b/opta/registry.py index 330cc4aa5..8dbade71a 100644 --- a/opta/registry.py +++ b/opta/registry.py @@ -25,7 +25,7 @@ def make_registry_dict() -> Dict[Any, Any]: with open(os.path.join(registry_path, "index.md"), "r") as f: registry_dict["text"] = f.read() module_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "modules") - for cloud in ["aws", "azurerm", "google", "local"]: + for cloud in ["aws", "azurerm", "google", "local", "helm"]: cloud_path = os.path.join(registry_path, cloud) cloud_dict = yaml.load(open(os.path.join(cloud_path, "index.yaml"))) cloud_dict["modules"] = {} @@ -35,6 +35,9 @@ def make_registry_dict() -> Dict[Any, Any]: alt_cloudname = "azure" elif cloud == "google": alt_cloudname = "gcp" + elif cloud == "helm": + # use the local modules since byok is cloud agnostic, we don't want to create resources like IAM and such + alt_cloudname = "local" else: alt_cloudname = cloud cloud_dict["modules"] = {**_make_module_registry_dict(module_path, alt_cloudname)} diff --git a/tests/fixtures/sample_opta_files/byok_service.yaml b/tests/fixtures/sample_opta_files/byok_service.yaml new file mode 100644 index 000000000..0ac03989c --- /dev/null +++ b/tests/fixtures/sample_opta_files/byok_service.yaml @@ -0,0 +1,10 @@ +name: hello +org_name: opta-tests +modules: + - type: k8s-service + name: hello + port: + http: 80 + image: ghcr.io/run-x/hello-opta/hello-opta:main + healthcheck_path: "/" + public_uri: "/hello" diff --git a/tests/test_byok.py b/tests/test_byok.py new file mode 100644 index 000000000..aa1896fb6 --- /dev/null +++ b/tests/test_byok.py @@ -0,0 +1,46 @@ +# type: ignore +import os + +from modules.local_k8s_service.local_k8s_service import LocalK8sServiceProcessor +from opta.layer import Layer + + +class TestByok: + def test_all_good(self): + layer = Layer.load_from_yaml( + os.path.join( + os.getcwd(), + "tests", + "fixtures", + "sample_opta_files", + "byok_service.yaml", + ), + None, + ) + idx = len(layer.modules) + app_module = layer.get_module("hello", idx) + LocalK8sServiceProcessor(app_module, layer).process(idx) + + assert app_module.data["type"] == "k8s-service" + assert app_module.data["name"] == "hello" + assert app_module.data["image"] == "ghcr.io/run-x/hello-opta/hello-opta:main" + assert app_module.data["public_uri"] == ["all/hello"] + assert app_module.data["env_name"] == "hello" + assert app_module.data["module_name"] == "hello" + + # check that the helm provider is generated correctly + assert layer.cloud == "helm" + assert layer.providers == { + "helm": {"kubernetes": {"config_path": "{kubeconfig}"}} + } + os.environ["KUBECONFIG"] = "~/.kube/custom-config" + gen_provider = layer.gen_providers(0) + assert gen_provider["provider"] == { + "helm": {"kubernetes": {"config_path": "~/.kube/config"}} + } + assert gen_provider["terraform"]["required_providers"] == { + "helm": {"source": "hashicorp/helm", "version": "2.4.1"} + } + assert gen_provider["terraform"]["backend"] == { + "local": {"path": "./tfstate/hello.tfstate"} + } diff --git a/tests/test_layer.py b/tests/test_layer.py index 907f5e8d5..34c523631 100644 --- a/tests/test_layer.py +++ b/tests/test_layer.py @@ -43,6 +43,7 @@ def test_hydration_aws(self, mocker: MockFixture): assert layer.metadata_hydration() == { "aws": SimpleNamespace(region="us-east-1", account_id="011111111111"), "env": "dummy-parent", + "kubeconfig": "~/.kube/config", "layer_name": "dummy-config-1", "parent": SimpleNamespace( kms_account_key_arn="${data.terraform_remote_state.parent.outputs.kms_account_key_arn}", @@ -83,6 +84,7 @@ def test_hydration_gcp(self, mocker: MockFixture): ) assert layer.metadata_hydration() == { "env": "gcp-dummy-parent", + "kubeconfig": "~/.kube/config", "google": SimpleNamespace(region="us-central1", project="jds-throwaway-1"), "layer_name": "gcp-dummy-config", "parent": SimpleNamespace(