Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Develop #4

Merged
merged 7 commits into from
Nov 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,4 @@ jobs:
file: ./Dockerfile
push: true
tags: ${{ github.ref == 'refs/heads/main' && 'gravitycloud/gravity-ci-cd-agent:latest' || 'gravitycloud/gravity-ci-cd-agent:dev' }}
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
20 changes: 12 additions & 8 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM node:20-bullseye
FROM node:20-bookworm

# Install dependencies for Buildah and Docker
RUN apt-get update \
Expand All @@ -12,24 +12,28 @@ RUN apt-get update \
python3-pip python3-dev unzip \
iptables

# Add the official repositories for Buildah
RUN . /etc/os-release \
&& echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_$VERSION_ID/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list \
&& curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_$VERSION_ID/Release.key | apt-key add -
RUN echo "deb http://deb.debian.org/debian sid main" | tee /etc/apt/sources.list.d/sid.list

# Install Buildah
RUN apt-get update \
&& apt-get -y install buildah
&& apt-get -t sid install -y buildah

# Verify Buildah installation
RUN buildah --version
RUN echo "Buildah version: $(buildah --version)"

# Install AWS CLI
RUN pip3 install awscli --upgrade
RUN pip3 install awscli --upgrade --break-system-packages

# Set AWS CLI pager to empty
RUN aws configure set cli_pager ""

# Install Helm
RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 \
&& chmod 700 get_helm.sh \
&& ./get_helm.sh

RUN helm version --short

# Create the working directory
WORKDIR /usr/src/app

Expand Down
62 changes: 53 additions & 9 deletions deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: gravity-ci-cd-agent
namespace: gravity
namespace: ${NAMESPACE}
spec:
replicas: 1
selector:
Expand Down Expand Up @@ -87,14 +87,15 @@ apiVersion: batch/v1
kind: Job
metadata:
name: gravity-job-agent
namespace: gravity
namespace: ${NAMESPACE}
spec:
template:
metadata:
labels:
app: gravity-job-agent
spec:
restartPolicy: OnFailure
serviceAccountName: gravity-job-agent-sa
containers:
- name: gravity-job-agent
image: gravitycloud/gravity-ci-cd-agent:latest
Expand Down Expand Up @@ -175,7 +176,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres-gravity
namespace: gravity
namespace: ${NAMESPACE}
spec:
replicas: 1
selector:
Expand Down Expand Up @@ -220,7 +221,7 @@ apiVersion: v1
kind: Service
metadata:
name: postgres-gravity-service
namespace: gravity
namespace: ${NAMESPACE}
spec:
selector:
app: postgres-gravity
Expand All @@ -233,7 +234,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-gravity
namespace: gravity
namespace:
spec:
replicas: 1
selector:
Expand Down Expand Up @@ -273,7 +274,7 @@ apiVersion: v1
kind: Service
metadata:
name: redis-gravity-service
namespace: gravity
namespace: ${NAMESPACE}
spec:
selector:
app: redis-gravity
Expand All @@ -286,7 +287,7 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-gravity-pvc
namespace: gravity
namespace: ${NAMESPACE}
spec:
accessModes:
- ReadWriteOnce
Expand All @@ -299,7 +300,7 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-gravity-pvc
namespace: gravity
namespace: ${NAMESPACE}
spec:
accessModes:
- ReadWriteOnce
Expand All @@ -312,11 +313,54 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: agent-gravity-pvc
namespace: gravity
namespace: ${NAMESPACE}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: standard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gravity-job-agent-sa
namespace: ${NAMESPACE}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gravity-job-agent-rolebinding
subjects:
- kind: ServiceAccount
name: gravity-job-agent-sa
namespace: ${NAMESPACE}
roleRef:
kind: ClusterRole
name: gravity-job-agent-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gravity-job-agent-role
rules:
- apiGroups: [""]
resources: ["pods", "pods/log", "pods/exec", "services", "secrets", "configmaps", "persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
1 change: 1 addition & 0 deletions example.env
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@ ARGOCD_TOKEN=XXXX
REDIS_HOST=redis-gravity-service
REDIS_PORT=6379
REDIS_PASSWORD=
NAMESPACE=gravity
53 changes: 37 additions & 16 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,13 @@ if (!process.env.PROCESS_JOB) {
setInterval(syncGitRepo, 30000)
}

syncGitRepo()
if (process.env.ENV === "development") {
redisClient.on('error', (err: any) => console.error(err));
redisClient.on('ready', () => console.info(`[APP] Connected to Redis`));
redisClient.connect();
syncGitRepo()

}

// ##########################################################
// Below is the agent job code that runs the CI/CD pipeline, this gets deployed with PROCESS_JOB ENV to indicate that the agent job should be run
Expand Down Expand Up @@ -624,26 +630,41 @@ const processJob = async () => {
const serviceContext = path.join(gitRepoPath, service.servicePath)
const dockerfilePath = path.join(serviceContext, 'Dockerfile')

let dockerBuildCommand = ""

const cacheExists = await customExec(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `ls /image-cache/${owner}-${serviceName}-latest.tar`)
if (cacheExists) {
syncLogsToGravityViaWebsocket(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `Cache found for ${owner}/${serviceName}:latest, using it for build`)
await customExec(deploymentRunId, "DOCKER_CACHE_LOAD", serviceName, `${dockerBuildCli} load -i /image-cache/${owner}-${serviceName}-latest.tar`)

dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext} --cache-from ${owner}/${serviceName}:latest`
} else {
dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}`

await customExec(deploymentRunId, "DOCKER_IMAGE_CACHE", serviceName, `${dockerBuildCli} save -o /image-cache/${owner}-${serviceName}-latest.tar ${owner}/${serviceName}:latest`)
}

// const dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}`
// let dockerBuildCommand = ""

// let cacheExists = false
// const imageCacheBasePath = process.env.ENV === "production" ? "/image-cache" : "./image-cache"
// try {
// await customExec(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `ls ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar`)
// cacheExists = true
// } catch (error) {
// syncLogsToGravityViaWebsocket(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `No cache found for ${owner}/${serviceName}:latest, proceeding with fresh build`)
// }

// if (cacheExists) {
// syncLogsToGravityViaWebsocket(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, `Cache found for ${owner}/${serviceName}:latest, using it for build`)
// if (process.env.ENV === "developement") {
// await customExec(deploymentRunId, "DOCKER_CACHE_LOAD", serviceName, `${dockerBuildCli} load -i ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar`)
// } else {
// dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext} ${process.env.ENV === "production" ? `--cache-from=type=tar,dest=${imageCacheBasePath}/${owner}-${serviceName}-latest.tar` : `--cache-from ${owner}/${serviceName}:latest`}`
// }
// } else {
// if (process.env.ENV === "developement") {
// dockerBuildCommand = `${dockerBuildCli} build --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}`
// } else {
// dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext} --cache-to=type=tar,dest=${imageCacheBasePath}/${owner}-${serviceName}-latest.tar`
// }
// }

const dockerBuildCommand = `${dockerBuildCli} ${process.env.ENV === "production" ? "bud --isolation chroot" : "build"} --platform=linux/amd64 -t ${owner}/${serviceName}:latest -f ${dockerfilePath} ${serviceContext}`

await customExec(deploymentRunId, "DOCKER_IMAGE_BUILD", serviceName, dockerBuildCommand)

sendSlackNotification("Docker Build Completed", `Docker build completed for ${serviceName} / ${lastRunBranch} in ${repository}`)

// if (process.env.ENV === "developement") {
// await customExec(deploymentRunId, "DOCKER_IMAGE_CACHE", serviceName, `${dockerBuildCli} save -o ${imageCacheBasePath}/${owner}-${serviceName}-latest.tar ${owner}/${serviceName}:latest`)
// }

// Continue with existing AWS deployment logic
const newValuesFiles: string[] = []
Expand Down
Loading