From f929add97f907f66c3680dd8e22aed276deca444 Mon Sep 17 00:00:00 2001 From: xblack Date: Thu, 14 Nov 2024 11:50:26 +0530 Subject: [PATCH] fetch additional chart dependencies and install via helm --- deployment.yaml | 18 ++++++++---- src/index.ts | 75 +++++++++++++++++++++++++++++-------------------- 2 files changed, 57 insertions(+), 36 deletions(-) diff --git a/deployment.yaml b/deployment.yaml index c5dfde3..ee6345f 100644 --- a/deployment.yaml +++ b/deployment.yaml @@ -21,7 +21,7 @@ spec: [ "sh", "-c", - "until nc -z $POSTGRES_HOST ${POSTGRES_PORT:-5432} && nc -z $REDIS_HOST ${REDIS_PORT:-6379}; do echo waiting for postgres and redis; if ! nc -z $POSTGRES_HOST ${POSTGRES_PORT:-5432}; then echo postgres not ready; fi; if ! nc -z $REDIS_HOST ${REDIS_PORT:-6379}; then echo redis not ready; fi; sleep 2; done;", + "until nc -z $POSTGRES_HOST ${POSTGRES_PORT} && nc -z $REDIS_HOST ${REDIS_PORT}; do echo waiting for postgres and redis; if ! nc -z $POSTGRES_HOST ${POSTGRES_PORT:-5432}; then echo postgres not ready; fi; if ! nc -z $REDIS_HOST ${REDIS_PORT:-6379}; then echo redis not ready; fi; sleep 2; done;", ] env: - name: POSTGRES_HOST @@ -234,7 +234,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: redis-gravity - namespace: + namespace: ${NAMESPACE} spec: replicas: 1 selector: @@ -347,7 +347,13 @@ metadata: name: gravity-job-agent-role rules: - apiGroups: [""] - resources: ["pods", "pods/log", "pods/exec", "services", "secrets", "configmaps", "persistentvolumeclaims"] + resources: ["pods", "pods/log", "pods/exec", "services", "secrets", "configmaps", "persistentvolumeclaims", "serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["namespaces"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: ["apps"] resources: ["deployments", "statefulsets", "replicasets"] @@ -356,11 +362,11 @@ rules: resources: ["jobs"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: ["networking.k8s.io"] - resources: ["ingresses"] + resources: ["ingresses", "networkpolicies"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["roles", "rolebindings"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: ["policy"] - resources: ["podsecuritypolicies"] - verbs: ["use"] \ No newline at end of file + resources: ["podsecuritypolicies", "poddisruptionbudgets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] \ No newline at end of file diff --git a/src/index.ts b/src/index.ts index 1408c31..aa1360d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -19,6 +19,11 @@ interface ServiceChange { lastCommitSha?: string } +interface PipelineCharts { + charts: any[], + branch: string +} + interface DeployRun { id: string; name: string; @@ -630,42 +635,12 @@ const processJob = async () => { const serviceContext = path.join(gitRepoPath, service.servicePath) const dockerfilePath = path.join(serviceContext, 'Dockerfile') - // let dockerBuildCommand = "" - - // let cacheExists = false - // const imageCacheBasePath = process.env.ENV === "production" ? "/image-cache" : "./image-cache" - // try { - // await customExec(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `ls ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar`) - // cacheExists = true - // } catch (error) { - // syncLogsToGravityViaWebsocket(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `No cache found for ${owner}/${serviceName}:latest, proceeding with fresh build`) - // } - - // if (cacheExists) { - // syncLogsToGravityViaWebsocket(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `Cache found for ${owner}/${serviceName}:latest, using it for build`) - // if (process.env.ENV === "developement") { - // await customExec(deploymentRunId, "DOCKER_CACHE_LOAD", serviceName, `${dockerBuildCli} load -i ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar`) - // } else { - // dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext} ${process.env.ENV === "production" ? `--cache-from=type=tar,dest=${imageCacheBasePath}/${owner}-${serviceName}-latest.tar` : `--cache-from ${owner}/${serviceName}:latest`}` - // } - // } else { - // if (process.env.ENV === "developement") { - // dockerBuildCommand = `${dockerBuildCli} build --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}` - // } else { - // dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext} --cache-to=type=tar,dest=${imageCacheBasePath}/${owner}-${serviceName}-latest.tar` - // } - // } - const dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}` await customExec(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, dockerBuildCommand) sendSlackNotification("Docker Build Completed", `Docker build completed for ${serviceName} / ${lastRunBranch} in ${repository}`) - // if (process.env.ENV === "developement") { - // await customExec(deploymentRunId, "DOCKER_IMAGE_CACHE", serviceName, `${dockerBuildCli} save -o ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar ${owner}/${serviceName}:latest`) - // } - // Continue with existing AWS deployment logic const newValuesFiles: string[] = [] @@ -893,6 +868,46 @@ const processJob = async () => { [JSON.stringify(newValuesFiles), "COMPLETED", deploymentRunId] ) + const matchedAllowedRegex = process.env.GIT_BRANCHES_ALLOWED?.split(',').find(allowedBranch => { + // Check for exact match first + if (allowedBranch === lastRunBranch) { + return allowedBranch; + } + // Check for wildcard pattern match + if (allowedBranch.endsWith('.*')) { + const prefix = allowedBranch.slice(0, -2).replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`^${prefix}.*$`); + if (pattern.test(lastRunBranch)) { + return allowedBranch; + } + } + }); + + + if (matchedAllowedRegex && process.env.GRAVITY_API_URL) { + syncLogsToGravityViaWebsocket(deploymentRunId, "CHART_DEPENDENCIES", serviceName, `Fetching chart dependecies for branch: ${matchedAllowedRegex}`, false) + const pipelineCharts: PipelineCharts = await axios.post(`${process.env.GRAVITY_API_URL}/api/v1/pipeline-charts`, { + awsAccountId: process.env.AWS_ACCOUNT_ID, + env: process.env.ENV, + branch: lastRunBranch + }) + + syncLogsToGravityViaWebsocket(deploymentRunId, "CHART_DEPENDENCIES", serviceName, `Found following charts: ${JSON.stringify(pipelineCharts?.charts)}`, false) + if (pipelineCharts?.charts?.length > 0) { + // save the new values file locally and pass in helm command + const tempDir = os.tmpdir() + const valuesFilePath = path.join(tempDir, `${serviceName}-values-${lastRunBranch}.yaml`) + fs.writeFileSync(valuesFilePath, JSON.stringify(newValuesFiles)) + + syncLogsToGravityViaWebsocket(deploymentRunId, "CHART_DEPLOYMENT", serviceName, `Deploying charts: ${JSON.stringify(pipelineCharts?.charts)}`, false) + await Promise.all(pipelineCharts?.charts?.map(async (chart: any) => { + syncLogsToGravityViaWebsocket(deploymentRunId, "CHART_DEPLOYMENT", serviceName, `Deploying chart: ${JSON.stringify(chart)}`, false) + const helmChartInstallCommand = `helm upgrade --install ${chart.name} ${chart.repository} --namespace ${lastRunBranch} --version ${chart.version} -f ${valuesFilePath}` + await customExec(deploymentRunId, "CHART_DEPLOYMENT", serviceName, helmChartInstallCommand, true) + })) + } + } + syncLogsToGravityViaWebsocket(deploymentRunId, "PIPELINE_COMPLETED", serviceName, JSON.stringify({ newValuesFiles }), false) // Cleanup