diff --git a/.config.sample.env b/.config.sample.env new file mode 100644 index 000000000..6d233c185 --- /dev/null +++ b/.config.sample.env @@ -0,0 +1,94 @@ +# +# Cluster related variables +# + +# The repo you created from this template +# e.g. https://github.com/onedr0p/home-cluster +export BOOTSTRAP_GIT_REPOSITORY="" + +# To enable Flux to update your cluster on `git push` set the following to one of: +# `generated` - this will generate a token and print it in the logs +# Set this to any other string and it will be used for the secret +export BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET="generated" # NOTE: Must only contain alphanumeric characters and dashes + +# The Weave GitOps dashboard admin password +# `generated` - this will generate a token and print it in the logs +# Set this to any other string and it will be used for the secret +export BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD="generated" # NOTE: Must only contain alphanumeric characters and dashes + +# Choose one of your cloudflare domains +# e.g. onedr0p.com +export BOOTSTRAP_CLOUDFLARE_DOMAIN="" +# The email you use to sign into Cloudflare with +export BOOTSTRAP_CLOUDFLARE_EMAIL="" +# Your global Cloudflare API Key +export BOOTSTRAP_CLOUDFLARE_APIKEY="" + +# Pick a range of unused IPs that are on the same network as your nodes +# You don't need many IPs, just choose 10 IPs to start with +# e.g. 192.168.1.220-192.168.1.230 +export BOOTSTRAP_METALLB_LB_RANGE="" +# The load balancer IP for k8s_gateway, choose from one of the available IPs above +# e.g. 192.168.1.220 +export BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR="" +# The load balancer IP for the ingress controller, choose from one of the available IPs above +# that doesn't conflict with any other IP addresses here +# e.g. 192.168.1.221 +export BOOTSTRAP_METALLB_INGRESS_ADDR="" + +# Age Public Key - string should start with age +# e.g. age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta +export BOOTSTRAP_AGE_PUBLIC_KEY="" + +# The IP Address to use with kube-vip +# Pick a unused IP that is on the same network as your nodes +# and outside the ${BOOTSTRAP_METALLB_LB_RANGE} range +# and doesn't conflict with any other IP addresses here +# e.g. 192.168.1.254 +export BOOTSTRAP_KUBE_VIP_ADDR="" + +# Choose your timezone +# e.g. America/New_York +export BOOTSTRAP_TIMEZONE="Etc/UTC" + +# +# Ansible related variables +# + +# +# Default prefixes for hostnames assigned by Ansible +# These are unused on nodes where BOOTSTRAP_ANSIBLE_HOSTNAME_ is provided +# + +export BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX="k8s-" # NOTE: Must only contain alphanumeric characters and dashes +export BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX="k8s-" # NOTE: Must only contain alphanumeric characters and dashes + +# +# Ansible hosts - repeat this block as many times as you need, +# incrementing the last digit on the variable name for each node +# + +# Host IP Address to the control plane node +# That doesn't conflict with any other IP addresses here +# e.g. 192.168.1.200 +export BOOTSTRAP_ANSIBLE_HOST_ADDR_0="" +# User Ansible will log into the nodes +export BOOTSTRAP_ANSIBLE_SSH_USERNAME_0="" # NOTE: Must only contain alphanumeric characters and dashes +# Password Ansible will use to escalate to sudo +export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_0="" # NOTE: Must only contain alphanumeric characters and dashes +# Set this node as a control node (true/false) +export BOOTSTRAP_ANSIBLE_CONTROL_NODE_0="" +# Optional: Set the hostname of the node, if set this will override the *_HOSTNAME_PREFIX vars above +export BOOTSTRAP_ANSIBLE_HOSTNAME_0="" + +# export BOOTSTRAP_ANSIBLE_HOST_ADDR_1="" +# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_1="" # NOTE: Must only contain alphanumeric characters and dashes +# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_1="" # NOTE: Must only contain alphanumeric characters and dashes +# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_1="" +# export BOOTSTRAP_ANSIBLE_HOSTNAME_1="" + +# export BOOTSTRAP_ANSIBLE_HOST_ADDR_2="" +# export BOOTSTRAP_ANSIBLE_SSH_USERNAME_2="" # NOTE: Must only contain alphanumeric characters and dashes +# export BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_2="" # NOTE: Must only contain alphanumeric characters and dashes +# export BOOTSTRAP_ANSIBLE_CONTROL_NODE_2="" +# export BOOTSTRAP_ANSIBLE_HOSTNAME_2="" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..547304ee3 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,18 @@ +# editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[Makefile] +indent_style = space +indent_size = 4 + +[*.{bash,sh}] +indent_style = space +indent_size = 4 diff --git a/.envrc b/.envrc new file mode 100644 index 000000000..2b0891962 --- /dev/null +++ b/.envrc @@ -0,0 +1,5 @@ +#shellcheck disable=SC2148,SC2155 +export KUBECONFIG=$(expand_path ./kubeconfig) +export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg) +export ANSIBLE_HOST_KEY_CHECKING="False" +export SOPS_AGE_KEY_FILE=$(expand_path ~/.config/sops/age/keys.txt) diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..17e945311 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.sops.* diff=sopsdiffer +*.sops.toml linguist-language=JSON diff --git a/.github/labeler.yaml b/.github/labeler.yaml new file mode 100644 index 000000000..4ce4fcbab --- /dev/null +++ b/.github/labeler.yaml @@ -0,0 +1,11 @@ +--- +area/ansible: + - "ansible/**/*" +area/github: + - ".github/**/*" +area/kubernetes: + - "kubernetes/**/*" +area/terraform: + - "terraform/**/*" +area/templates: + - "tmpl/**/*" diff --git a/.github/labels.yaml b/.github/labels.yaml new file mode 100644 index 000000000..def5eb19d --- /dev/null +++ b/.github/labels.yaml @@ -0,0 +1,59 @@ +--- +# Area +- name: area/ansible + color: "72ccf3" + description: >- + Changes made in the ansible directory +- name: area/github + color: "72ccf3" + description: >- + Changes made in the github directory +- name: area/kubernetes + color: "72ccf3" + description: >- + Changes made in the kubernetes directory +- name: area/template + color: "72ccf3" + description: >- + Changes made in the tmpl directory +- name: area/terraform + color: "72ccf3" + description: >- + Changes made in the terraform directory +# Renovate +- name: renovate/ansible + color: "ffc300" +- name: renovate/container + color: "ffc300" +- name: renovate/github-action + color: "ffc300" +- name: renovate/github-release + color: "ffc300" +- name: renovate/helm + color: "ffc300" +- name: renovate/terraform + color: "ffc300" +# Semantic Type +- name: type/patch + color: "FFEC19" +- name: type/minor + color: "FF9800" +- name: type/major + color: "F6412D" +- name: type/break + color: "F6412D" +# Uncategorized +- name: bug + color: "ee0701" +- name: do-not-merge + color: "ee0701" +- name: docs + color: "F4D1B7" +- name: enhancement + color: "84b6eb" +- name: broken-links + color: "7B55D7" +- name: question + color: "cc317c" +- name: community + color: "0e8a16" diff --git a/.github/release-drafter.yaml b/.github/release-drafter.yaml new file mode 100644 index 000000000..3741c28b6 --- /dev/null +++ b/.github/release-drafter.yaml @@ -0,0 +1,32 @@ +--- +name-template: "Release v$RESOLVED_VERSION" +tag-template: "v$RESOLVED_VERSION" +change-template: "- $TITLE @$AUTHOR (#$NUMBER)" +change-title-escapes: '\<*_&' +categories: + - title: "Community Contributions" + labels: ["community"] + - title: "Kubernetes" + labels: ["area/kubernetes"] + - title: "Github" + labels: ["area/github"] + - title: "Ansible" + labels: ["area/ansible"] + - title: "Terraform" + labels: ["area/terraform"] + - title: "Maintenance" + labels: ["docs"] +version-resolver: + major: + labels: ["type/break"] + minor: + labels: ["type/major", "type/minor"] + patch: + labels: ["type/patch"] + default: patch +template: | + ## What's Changed + + $CHANGES + + **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100644 index 000000000..01465322d --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,48 @@ +{ + "extends": [ + "config:base", + "docker:enableMajor", + ":disableRateLimiting", + ":dependencyDashboard", + ":semanticCommits", + ":enablePreCommit", + ":automergeDigest", + ":automergeBranch", + "github>onedr0p/flux-cluster-template//.github/renovate/autoMerge.json5", + "github>onedr0p/flux-cluster-template//.github/renovate/commitMessage.json5", + "github>onedr0p/flux-cluster-template//.github/renovate/labels.json5", + "github>onedr0p/flux-cluster-template//.github/renovate/semanticCommits.json5", + "helpers:pinGitHubActionDigests" + ], + "dependencyDashboard": true, + "dependencyDashboardTitle": "Renovate Dashboard 🤖", + "suppressNotifications": ["prIgnoreNotification"], + "rebaseWhen": "conflicted", + "schedule": ["every saturday"], + "pre-commit": { + "enabled": true + }, + "flux": { + "fileMatch": ["kubernetes/.+\\.ya?ml$"] + }, + "helm-values": { + "fileMatch": ["kubernetes/.+\\.ya?ml$"] + }, + "kubernetes": { + "fileMatch": ["kubernetes/.+\\.ya?ml$"] + }, + "regexManagers": [ + { + "description": "Process various other dependencies", + "fileMatch": [ + "ansible/.+\\.ya?ml$", + "kubernetes/.+\\.ya?ml$" + ], + "matchStrings": [ + "datasource=(?\\S+) depName=(?\\S+)( versioning=(?\\S+))?\n.*?\"(?.*)\"\n" + ], + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}", + "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" + } + ] +} diff --git a/.github/renovate/autoMerge.json5 b/.github/renovate/autoMerge.json5 new file mode 100644 index 000000000..c6ab4bd50 --- /dev/null +++ b/.github/renovate/autoMerge.json5 @@ -0,0 +1,13 @@ +{ + "packageRules": [ + { + "description": "Auto merge GitHub Actions", + "matchManagers": ["github-actions"], + "matchDatasources": ["github-tags"], + "automerge": true, + "automergeType": "branch", + "ignoreTests": true, + "matchUpdateTypes": ["minor", "patch"] + } + ] +} diff --git a/.github/renovate/commitMessage.json5 b/.github/renovate/commitMessage.json5 new file mode 100644 index 000000000..a69173fec --- /dev/null +++ b/.github/renovate/commitMessage.json5 @@ -0,0 +1,15 @@ +{ + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "to {{newVersion}}", + "commitMessageSuffix": "", + "packageRules": [ + { + "matchDatasources": ["helm"], + "commitMessageTopic": "chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "image {{depName}}" + } + ] +} diff --git a/.github/renovate/labels.json5 b/.github/renovate/labels.json5 new file mode 100644 index 000000000..48a59a557 --- /dev/null +++ b/.github/renovate/labels.json5 @@ -0,0 +1,40 @@ +{ + "packageRules": [ + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "addLabels": ["renovate/ansible"] + }, + { + "matchDatasources": ["terraform-provider"], + "addLabels": ["renovate/terraform"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ] +} diff --git a/.github/renovate/semanticCommits.json5 b/.github/renovate/semanticCommits.json5 new file mode 100644 index 000000000..867c29aa7 --- /dev/null +++ b/.github/renovate/semanticCommits.json5 @@ -0,0 +1,114 @@ +{ + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(container)!: " + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["digest"], + "semanticCommitType": "chore", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(helm)!: " + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "helm" + }, + + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(ansible)!: " + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "ansible" + }, + + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(terraform)!: " + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "terraform" + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "terraform" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-release)!: " + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-release" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-release" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-action)!: " + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-action" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-action" + } + ] +} diff --git a/.github/workflows/link-check.yaml b/.github/workflows/link-check.yaml new file mode 100644 index 000000000..d9893fe59 --- /dev/null +++ b/.github/workflows/link-check.yaml @@ -0,0 +1,54 @@ +--- +name: "Link Check" + +on: + workflow_dispatch: + schedule: + - cron: "0 * * * *" + +jobs: + link-check: + name: Link Check + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0 + + - name: Restore lychee cache + uses: actions/cache@4723a57e26efda3a62cbde1812113b730952852d # v3.2.2 + with: + path: .lycheecache + key: cache-lychee-${{ github.sha }} + restore-keys: cache-lychee- + + - name: Link Checker + uses: lycheeverse/lychee-action@4dcb8bee2a0a4531cba1a1f392c54e8375d6dd81 # renovate: tag=v1.5.4 + id: lychee + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + with: + args: > + --cache + --max-cache-age 1d + --verbose + --no-progress + --exclude-mail + './**/*.md' + + - name: Find Link Checker Issue + id: link-checker-issue + uses: micalevisk/last-issue-action@044e1cb7e9a4dde20e22969cb67818bfca0797be # renovate: tag=2.0.0 + with: + state: open + labels: | + broken-links + + - name: Update Issue + uses: peter-evans/create-issue-from-file@433e51abf769039ee20ba1293a088ca19d573b7f # renovate: tag=v4.0.1 + with: + title: Broken links detected 🔗 + issue-number: "${{ steps.link-checker-issue.outputs.issue-number }}" + content-filepath: ./lychee/out.md + token: "${{ secrets.GITHUB_TOKEN }}" + labels: | + broken-links diff --git a/.github/workflows/meta-labeler.yaml b/.github/workflows/meta-labeler.yaml new file mode 100644 index 000000000..1beb08d61 --- /dev/null +++ b/.github/workflows/meta-labeler.yaml @@ -0,0 +1,18 @@ +--- +name: "Meta Labeler" + +on: + workflow_dispatch: + pull_request: + branches: ["main"] + +jobs: + labeler: + name: Labeler + runs-on: ubuntu-latest + steps: + - name: Labeler + uses: actions/labeler@5c7539237e04b714afd8ad9b4aed733815b9fab4 # renovate: tag=v4.0.2 + with: + configuration-path: .github/labeler.yaml + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/meta-sync-labels.yaml b/.github/workflows/meta-sync-labels.yaml new file mode 100644 index 000000000..9069e6c61 --- /dev/null +++ b/.github/workflows/meta-sync-labels.yaml @@ -0,0 +1,23 @@ +--- +name: "Meta Sync labels" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/labels.yaml"] + +jobs: + labels: + name: Sync Labels + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0 + + - name: Sync Labels + uses: EndBug/label-sync@da00f2c11fdb78e4fae44adac2fdd713778ea3e8 # renovate: tag=v2.3.2 + with: + config-file: .github/labels.yaml + token: "${{ secrets.GITHUB_TOKEN }}" + delete-other-labels: true diff --git a/.github/workflows/release-drafter.yaml b/.github/workflows/release-drafter.yaml new file mode 100644 index 000000000..796d7eb04 --- /dev/null +++ b/.github/workflows/release-drafter.yaml @@ -0,0 +1,17 @@ +--- +name: "Draft Release" + +on: + workflow_dispatch: + push: + branches: ["main"] + +jobs: + update: + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@6df64e4ba4842c203c604c1f45246c5863410adb # v5.21.1 + with: + config-name: release-drafter.yaml + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..b11059102 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,16 @@ +--- +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 * * 0" + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Publish Latest Release + uses: ivangabriele/publish-latest-release@df1a4afd8aea9d1f0ba5ebeb89452aeac7bca0a9 # renovate: tag=v3 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..28309de38 --- /dev/null +++ b/.gitignore @@ -0,0 +1,17 @@ +# Trash +.DS_Store +Thumbs.db +# k8s +kubeconfig +# vscode-sops +.decrypted~*.yaml +.config.env +*.agekey +*.pub +*.key +# Ansible +xanmanning.k3s* +# Terraform +.terraform +.terraform.tfstate* +terraform.tfstate* diff --git a/.lycheeignore b/.lycheeignore new file mode 100644 index 000000000..84425234b --- /dev/null +++ b/.lycheeignore @@ -0,0 +1 @@ +https://dash.cloudflare.com/profile/api-tokens diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..7f27c5ecb --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +--- +fail_fast: false +repos: + - repo: https://github.com/adrienverge/yamllint + rev: v1.28.0 + hooks: + - args: + - --config-file + - .yamllint.yaml + id: yamllint + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.17 + hooks: + - id: terraform-fmt diff --git a/.taskfiles/AnsibleTasks.yml b/.taskfiles/AnsibleTasks.yml new file mode 100644 index 000000000..e07ece621 --- /dev/null +++ b/.taskfiles/AnsibleTasks.yml @@ -0,0 +1,68 @@ +--- +version: "3" + +env: + ANSIBLE_CONFIG: "{{.PROJECT_DIR}}/ansible.cfg" + +vars: + ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks" + ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory" + +tasks: + + init: + desc: Install / Upgrade Ansible galaxy deps + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible-galaxy install -r requirements.yml --roles-path ~/.ansible/roles --force + - ansible-galaxy collection install -r requirements.yml --collections-path ~/.ansible/collections --force + + list: + desc: List all the hosts + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --list-hosts + + prepare: + desc: Prepare all the k8s nodes for running k3s + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-prepare.yml + + install: + desc: Install Kubernetes on the nodes + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-installation.yml + + nuke: + desc: Uninstall Kubernetes on the nodes + dir: "{{.ANSIBLE_DIR}}" + interactive: true + cmds: + - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-nuke.yml + - task: reboot + + ping: + desc: Ping all the hosts + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -m 'ping' + + uptime: + desc: Uptime of all the hosts + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -a 'uptime' + + reboot: + desc: Reboot all the k8s nodes + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-reboot.yml + + poweroff: + desc: Shutdown all the k8s nodes + dir: "{{.ANSIBLE_DIR}}" + cmds: + - ansible kubernetes -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml -a '/usr/bin/systemctl poweroff' --become diff --git a/.taskfiles/ClusterTasks.yml b/.taskfiles/ClusterTasks.yml new file mode 100644 index 000000000..1f79debcf --- /dev/null +++ b/.taskfiles/ClusterTasks.yml @@ -0,0 +1,90 @@ +--- +version: "3" + +tasks: + + verify: + desc: Verify flux meets the prerequisites + cmds: + - flux check --pre + + install: + desc: Install Flux into your cluster + cmds: + - kubectl apply --kustomize {{.KUBERNETES_DIR}}/bootstrap + - cat {{.SOPS_AGE_KEY_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin + - sops --decrypt {{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml | kubectl apply -f - + - kubectl apply -f {{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml + - kubectl apply --kustomize {{.KUBERNETES_DIR}}/flux/config + - task: reconcile + preconditions: + - sh: test -f {{.SOPS_AGE_KEY_FILE}} + msg: | + Age key file is not found. Did you forget to create it? + vars: + SOPS_AGE_KEY_FILE: ~/.config/sops/age/keys.txt + + reconcile: + desc: Force update Flux to pull in changes from your Git repository + cmds: + - flux reconcile -n flux-system source git home-kubernetes + - flux reconcile -n flux-system kustomization cluster + + hr-restart: + desc: Restart all failed Helm Releases + cmds: + - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -l bash -c 'flux suspend hr $0 -n $1' + - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -l bash -c 'flux resume hr $0 -n $1' + + nodes: + desc: List all the nodes in your cluster + cmds: + - kubectl get nodes {{.CLI_ARGS | default "-o wide"}} + + pods: + desc: List all the pods in your cluster + cmds: + - kubectl get pods {{.CLI_ARGS | default "-A"}} + + kustomizations: + desc: List all the kustomizations in your cluster + cmds: + - kubectl get kustomizations {{.CLI_ARGS | default "-A"}} + + helmreleases: + desc: List all the helmreleases in your cluster + cmds: + - kubectl get helmreleases {{.CLI_ARGS | default "-A"}} + + helmrepositories: + desc: List all the helmrepositories in your cluster + cmds: + - kubectl get helmrepositories {{.CLI_ARGS | default "-A"}} + + gitrepositories: + desc: List all the gitrepositories in your cluster + cmds: + - kubectl get gitrepositories {{.CLI_ARGS | default "-A"}} + + certificates: + desc: List all the certificates in your cluster + cmds: + - kubectl get certificates {{.CLI_ARGS | default "-A"}} + - kubectl get certificaterequests {{.CLI_ARGS | default "-A"}} + + ingresses: + desc: List all the ingresses in your cluster + cmds: + - kubectl get ingress {{.CLI_ARGS | default "-A"}} + + resources: + desc: Gather common resources in your cluster, useful when asking for support + cmds: + - task: nodes + - task: kustomizations + - task: helmreleases + - task: helmrepositories + - task: gitrepositories + - task: certificates + - task: ingresses + - task: pods diff --git a/.taskfiles/PrecommitTasks.yml b/.taskfiles/PrecommitTasks.yml new file mode 100644 index 000000000..52c27a950 --- /dev/null +++ b/.taskfiles/PrecommitTasks.yml @@ -0,0 +1,19 @@ +--- +version: "3" + +tasks: + + init: + desc: Initialize pre-commit hooks + cmds: + - pre-commit install --install-hooks + + run: + desc: Run pre-commit + cmds: + - pre-commit run --all-files + + update: + desc: Update pre-commit hooks + cmds: + - pre-commit autoupdate diff --git a/.taskfiles/SopsTasks.yml b/.taskfiles/SopsTasks.yml new file mode 100644 index 000000000..1b2ac2f04 --- /dev/null +++ b/.taskfiles/SopsTasks.yml @@ -0,0 +1,14 @@ +--- +version: "3" + +tasks: + + e: + desc: Encrypt sops file 'to use must include -- before path to file.' eg "task sops:e -- file.yaml" + cmds: + - sops --encrypt --in-place {{.CLI_ARGS}} + + d: + desc: Decrypt sops file 'to use must include -- before path to file.' eg "task sops:d -- file.yaml" + cmds: + - sops --decrypt --in-place {{.CLI_ARGS}} diff --git a/.taskfiles/TerraformTasks.yml b/.taskfiles/TerraformTasks.yml new file mode 100644 index 000000000..f11a1981d --- /dev/null +++ b/.taskfiles/TerraformTasks.yml @@ -0,0 +1,22 @@ +--- +version: "3" + +tasks: + + init: + desc: Initialize terraform dependencies + dir: "{{.TERRAFORM_DIR}}/cloudflare" + cmds: + - terraform init {{.CLI_ARGS}} + + plan: + desc: Show the changes terraform will make + dir: "{{.TERRAFORM_DIR}}/cloudflare" + cmds: + - terraform plan {{.CLI_ARGS}} + + apply: + desc: Apply the changes to Cloudflare + dir: "{{.TERRAFORM_DIR}}/cloudflare" + cmds: + - terraform apply {{.CLI_ARGS}} diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..a91c63d49 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,17 @@ +{ + "recommendations": [ + "albert.TabOut", + "britesnow.vscode-toggle-quotes", + "fcrespo82.markdown-table-formatter", + "mitchdenny.ecdc", + "ms-kubernetes-tools.vscode-kubernetes-tools", + "redhat.ansible", + "signageos.signageos-vscode-sops", + "will-stone.in-any-case", + "BriteSnow.vscode-toggle-quotes", + "EditorConfig.editorconfig", + "HashiCorp.terraform", + "PKief.material-icon-theme", + "Weaveworks.vscode-gitops-tools" + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..abc72ddff --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,20 @@ +{ + "files.associations": { + "*.json5": "jsonc", + "**/ansible/**/*.yml": "ansible", + "**/ansible/**/*.sops.yml": "yaml", + "**/ansible/**/inventory/**/*.yml": "yaml", + "**/terraform/**/*.tf": "terraform", + "**/kubernetes/**/*.sops.toml": "plaintext" + }, + "yaml.schemas": { + "ansible": "ansible/*.yml", + "Kubernetes": "kubernetes/*.yaml" + }, + "editor.bracketPairColorization.enabled": true, + "editor.guides.bracketPairs": true, + "editor.guides.bracketPairsHorizontal": true, + "editor.guides.highlightActiveBracketPair": true, + "editor.hover.delay": 1500, + "files.trimTrailingWhitespace": true, +} diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 000000000..76f1e8f9b --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,17 @@ +--- +ignore: | + *.sops.* +extends: default +rules: + truthy: + allowed-values: ["true", "false", "on"] + comments: + min-spaces-from-content: 1 + line-length: disable + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + indentation: enable diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..15182e975 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 k8s@home + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 000000000..3f0c1d143 --- /dev/null +++ b/README.md @@ -0,0 +1,539 @@ +# Template for deploying k3s backed by Flux + +Highly opinionated template for deploying a single [k3s](https://k3s.io) cluster with [Ansible](https://www.ansible.com) and [Terraform](https://www.terraform.io) backed by [Flux](https://toolkit.fluxcd.io/) and [SOPS](https://toolkit.fluxcd.io/guides/mozilla-sops/). + +The purpose here is to showcase how you can deploy an entire Kubernetes cluster and show it off to the world using the [GitOps](https://www.weave.works/blog/what-is-gitops-really) tool [Flux](https://toolkit.fluxcd.io/). When completed, your Git repository will be driving the state of your Kubernetes cluster. In addition with the help of the [Ansible](https://github.com/ansible-collections/community.sops), [Terraform](https://github.com/carlpett/terraform-provider-sops) and [Flux](https://toolkit.fluxcd.io/guides/mozilla-sops/) SOPS integrations you'll be able to commit [Age](https://github.com/FiloSottile/age) encrypted secrets to your public repo. + +## Overview + +- [Introduction](https://github.com/onedr0p/flux-cluster-template#-introduction) +- [Prerequisites](https://github.com/onedr0p/flux-cluster-template#-prerequisites) +- [Repository structure](https://github.com/onedr0p/flux-cluster-template#-repository-structure) +- [Lets go!](https://github.com/onedr0p/flux-cluster-template#-lets-go) +- [Post installation](https://github.com/onedr0p/flux-cluster-template#-post-installation) +- [Troubleshooting](https://github.com/onedr0p/flux-cluster-template#-troubleshooting) +- [What's next](https://github.com/onedr0p/flux-cluster-template#-whats-next) +- [Thanks](https://github.com/onedr0p/flux-cluster-template#-thanks) + +## 👋 Introduction + +The following components will be installed in your [k3s](https://k3s.io/) cluster by default. Most are only included to get a minimum viable cluster up and running. + +- [flux](https://toolkit.fluxcd.io/) - GitOps operator for managing Kubernetes clusters from a Git repository +- [kube-vip](https://kube-vip.io/) - Load balancer for the Kubernetes control plane nodes +- [metallb](https://metallb.universe.tf/) - Load balancer for Kubernetes services +- [cert-manager](https://cert-manager.io/) - Operator to request SSL certificates and store them as Kubernetes resources +- [calico](https://www.tigera.io/project-calico/) - Container networking interface for inter pod and service networking +- [external-dns](https://github.com/kubernetes-sigs/external-dns) - Operator to publish DNS records to Cloudflare (and other providers) based on Kubernetes ingresses +- [k8s_gateway](https://github.com/ori-edge/k8s_gateway) - DNS resolver that provides local DNS to your Kubernetes ingresses +- [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) - Kubernetes ingress controller used for a HTTP reverse proxy of Kubernetes ingresses +- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) - provision persistent local storage with Kubernetes + +_Additional applications include [hajimari](https://github.com/toboshii/hajimari), [error-pages](https://github.com/tarampampam/error-pages), [echo-server](https://github.com/Ealenn/Echo-Server), [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller), [reloader](https://github.com/stakater/Reloader), and [kured](https://github.com/weaveworks/kured)_ + +For provisioning the following tools will be used: + +- [Fedora 36 Server](https://getfedora.org/en/server/download/) - Universal operating system that supports running all kinds of home related workloads in Kubernetes and has a faster release cycle +- [Ubuntu 22.04 Server](https://ubuntu.com/download/server) - Alternative operating system, limited community support +- [Ansible](https://www.ansible.com) - Provision Fedora Server and install k3s +- [Terraform](https://www.terraform.io) - Provision an already existing Cloudflare domain and certain DNS records to be used with your k3s cluster + +## 📝 Prerequisites + +**Note:** _This template has not been tested on cloud providers like AWS EC2, Hetzner, Scaleway etc... Those cloud offerings probably have a better way of provsioning a Kubernetes cluster and it's advisable to use those instead of the Ansible playbooks included here. This repository can still be tweaked for the GitOps/Flux portion if there's a cluster working in one those environments._ + +### 📚 Reading material + +- [Organizing Cluster Access Using kubeconfig Files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) + +### 💻 Systems + +- One or more nodes with a fresh install of [Fedora Server 36](https://getfedora.org/en/server/download/). + - These nodes can be ARM64/AMD64 bare metal or VMs. + - An odd number of control plane nodes, greater than or equal to 3 is required if deploying more than one control plane node. +- A [Cloudflare](https://www.cloudflare.com/) account with a domain, this will be managed by Terraform and external-dns. You can [register new domains](https://www.cloudflare.com/products/registrar/) directly thru Cloudflare. +- Some experience in debugging problems and a positive attitude ;) + +📍 It is recommended to have 3 master nodes for a highly available control plane. + +### 🔧 Workstation Tools + +📍 Install the **most recent version** of the CLI tools below. If you are **having trouble with future steps**, it is very likely you don't have the most recent version of these CLI tools, **!especially sops AND yq!**. + +1. Install the following CLI tools on your workstation, if you are using [Homebrew](https://brew.sh/) on MacOS or Linux skip to steps 3 and 4. + + * Required: [age](https://github.com/FiloSottile/age), [ansible](https://www.ansible.com), [flux](https://toolkit.fluxcd.io/), [weave-gitops](https://docs.gitops.weave.works/docs/installation/weave-gitops/), [go-task](https://github.com/go-task/task), [ipcalc](http://jodies.de/ipcalc), [jq](https://stedolan.github.io/jq/), [kubectl](https://kubernetes.io/docs/tasks/tools/), [pre-commit](https://github.com/pre-commit/pre-commit), [sops](https://github.com/mozilla/sops), [terraform](https://www.terraform.io), [yq v4](https://github.com/mikefarah/yq) + + * Recommended: [direnv](https://github.com/direnv/direnv), [helm](https://helm.sh/), [kustomize](https://github.com/kubernetes-sigs/kustomize), [prettier](https://github.com/prettier/prettier), [stern](https://github.com/stern/stern), [yamllint](https://github.com/adrienverge/yamllint) + +2. This guide heavily relies on [go-task](https://github.com/go-task/task) as a framework for setting things up. It is advised to learn and understand the commands it is running under the hood. + +3. Install [go-task](https://github.com/go-task/task) via Brew + + ```sh + brew install go-task/tap/go-task + ``` + +4. Install workstation dependencies via Brew + + ```sh + task init + ``` + +### ⚠️ pre-commit + +It is advisable to install [pre-commit](https://pre-commit.com/) and the pre-commit hooks that come with this repository. + +1. Enable Pre-Commit + + ```sh + task precommit:init + ``` + +2. Update Pre-Commit, though it will occasionally make mistakes, so verify its results. + + ```sh + task precommit:update + ``` + +## 📂 Repository structure + +The Git repository contains the following directories under `kubernetes` and are ordered below by how Flux will apply them. + +```sh +📁 kubernetes # Kubernetes cluster defined as code +├─📁 bootstrap # Flux installation +├─📁 flux # Main Flux configuration of repository +└─📁 apps # Apps deployed into the cluster grouped by namespace +``` + +## 🚀 Lets go + +Very first step will be to create a new repository by clicking the **Use this template** button on this page. + +Clone the repo to you local workstation and `cd` into it. + +📍 **All of the below commands** are run on your **local** workstation, **not** on any of your cluster nodes. + +### 🔐 Setting up Age + +📍 Here we will create a Age Private and Public key. Using [SOPS](https://github.com/mozilla/sops) with [Age](https://github.com/FiloSottile/age) allows us to encrypt secrets and use them in Ansible, Terraform and Flux. + +1. Create a Age Private / Public Key + + ```sh + age-keygen -o age.agekey + ``` + +2. Set up the directory for the Age key and move the Age file to it + + ```sh + mkdir -p ~/.config/sops/age + mv age.agekey ~/.config/sops/age/keys.txt + ``` + +3. Export the `SOPS_AGE_KEY_FILE` variable in your `bashrc`, `zshrc` or `config.fish` and source it, e.g. + + ```sh + export SOPS_AGE_KEY_FILE=~/.config/sops/age/keys.txt + source ~/.bashrc + ``` + +4. Fill out the Age public key in the `.config.env` under `BOOTSTRAP_AGE_PUBLIC_KEY`, **note** the public key should start with `age`... + +### ☁️ Global Cloudflare API Key + +In order to use Terraform and `cert-manager` with the Cloudflare DNS challenge you will need to create a API key. + +1. Head over to Cloudflare and create a API key by going [here](https://dash.cloudflare.com/profile/api-tokens). + +2. Under the `API Keys` section, create a global API Key. + +3. Use the API Key in the configuration section below. + +📍 You may wish to update this later on to a Cloudflare **API Token** which can be scoped to certain resources. I do not recommend using a Cloudflare **API Key**, however for the purposes of this template it is easier getting started without having to define which scopes and resources are needed. For more information see the [Cloudflare docs on API Keys and Tokens](https://developers.cloudflare.com/api/). + +### 📄 Configuration + +📍 The `.config.env` file contains necessary configuration that is needed by Ansible, Terraform and Flux. + +1. Copy the `.config.sample.env` to `.config.env` and start filling out all the environment variables. + + **All are required** unless otherwise noted in the comments. + + ```sh + cp .config.sample.env .config.env + ``` + +2. Once that is done, verify the configuration is correct by running: + + ```sh + task verify + ``` + +3. If you do not encounter any errors run start having the script wire up the templated files and place them where they need to be. + + ```sh + task configure + ``` + +### ⚡ Preparing Fedora Server with Ansible + +📍 Here we will be running a Ansible Playbook to prepare Fedora Server for running a Kubernetes cluster. + +📍 Nodes are not security hardened by default, you can do this with [dev-sec/ansible-collection-hardening](https://github.com/dev-sec/ansible-collection-hardening) or similar if it supports Fedora Server. + +1. Ensure you are able to SSH into your nodes from your workstation using a private SSH key **without a passphrase**. This is how Ansible is able to connect to your remote nodes. + + [How to configure SSH key-based authentication](https://www.digitalocean.com/community/tutorials/how-to-configure-ssh-key-based-authentication-on-a-linux-server) + +2. Install the Ansible deps + + ```sh + task ansible:init + ``` + +3. Verify Ansible can view your config + + ```sh + task ansible:list + ``` + +4. Verify Ansible can ping your nodes + + ```sh + task ansible:ping + ``` + +5. Run the Fedora Server Ansible prepare playbook + + ```sh + task ansible:prepare + ``` + +6. Reboot the nodes + + ```sh + task ansible:reboot + ``` + +### ⛵ Installing k3s with Ansible + +📍 Here we will be running a Ansible Playbook to install [k3s](https://k3s.io/) with [this](https://galaxy.ansible.com/xanmanning/k3s) wonderful k3s Ansible galaxy role. After completion, Ansible will drop a `kubeconfig` in `./kubeconfig` for use with interacting with your cluster with `kubectl`. + +☢️ If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over. + +1. Verify Ansible can view your config + + ```sh + task ansible:list + ``` + +2. Verify Ansible can ping your nodes + + ```sh + task ansible:ping + ``` + +3. Install k3s with Ansible + + ```sh + task ansible:install + ``` + +4. Verify the nodes are online + + ```sh + task cluster:nodes + # NAME STATUS ROLES AGE VERSION + # k8s-0 Ready control-plane,master 4d20h v1.21.5+k3s1 + # k8s-1 Ready worker 4d20h v1.21.5+k3s1 + ``` + +### ☁️ Configuring Cloudflare DNS with Terraform + +📍 Review the Terraform scripts under `./terraform/cloudflare/` and make sure you understand what it's doing (no really review it). + +If your domain already has existing DNS records **be sure to export those DNS settings before you continue**. + +1. Pull in the Terraform deps + + ```sh + task terraform:init + ``` + +2. Review the changes Terraform will make to your Cloudflare domain + + ```sh + task terraform:plan + ``` + +3. Have Terraform apply your Cloudflare settings + + ```sh + task terraform:apply + ``` + +If Terraform was ran successfully you can log into Cloudflare and validate the DNS records are present. + +The cluster application [external-dns](https://github.com/kubernetes-sigs/external-dns) will be managing the rest of the DNS records you will need. + +### 🔹 GitOps with Flux + +📍 Here we will be installing [flux](https://toolkit.fluxcd.io/) after some quick bootstrap steps. + +1. Verify Flux can be installed + + ```sh + task cluster:verify + # ► checking prerequisites + # ✔ kubectl 1.21.5 >=1.18.0-0 + # ✔ Kubernetes 1.21.5+k3s1 >=1.16.0-0 + # ✔ prerequisites checks passed + ``` + +2. Push you changes to git + + 📍 **Verify** all the `*.sops.yaml` and `*.sops.yml` files under the `./ansible`, `./kubernetes`, and `./terraform` folders are **encrypted** with SOPS + + ```sh + git add -A + git commit -m "Initial commit :rocket:" + git push + ``` + +3. Install Flux and sync the cluster to the Git repository + + ```sh + task cluster:install + # namespace/flux-system configured + # customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created + ``` + +4. Verify Flux components are running in the cluster + + ```sh + task cluster:pods -- -n flux-system + # NAME READY STATUS RESTARTS AGE + # helm-controller-5bbd94c75-89sb4 1/1 Running 0 1h + # kustomize-controller-7b67b6b77d-nqc67 1/1 Running 0 1h + # notification-controller-7c46575844-k4bvr 1/1 Running 0 1h + # source-controller-7d6875bcb4-zqw9f 1/1 Running 0 1h + ``` + +### 🎤 Verification Steps + +_Mic check, 1, 2_ - In a few moments applications should be lighting up like a Christmas tree 🎄 + +You are able to run all the commands below with one task + +```sh +task cluster:resources +``` + +1. View the Flux Git Repositories + + ```sh + task cluster:gitrepositories + ``` + +2. View the Flux kustomizations + + ```sh + task cluster:kustomizations + ``` + +3. View all the Flux Helm Releases + + ```sh + task cluster:helmreleases + ``` + +4. View all the Flux Helm Repositories + + ```sh + task cluster:helmrepositories + ``` + +5. View all the Pods + + ```sh + task cluster:pods + ``` + +6. View all the certificates and certificate requests + + ```sh + task cluster:certificates + ``` + +7. View all the ingresses + + ```sh + task cluster:ingresses + ``` + +🏆 **Congratulations** if all goes smooth you'll have a Kubernetes cluster managed by Flux, your Git repository is driving the state of your cluster. + +☢️ If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over. + +🧠 Now it's time to pause and go get some coffee ☕ because next is describing how DNS is handled. + +## 📣 Post installation + +### 🌐 DNS + +📍 The [external-dns](https://github.com/kubernetes-sigs/external-dns) application created in the `kube-system` namespace will handle creating public DNS records. By default, `echo-server` is the only public domain exposed on your Cloudflare domain. In order to make additional applications public you must set an ingress annotation like in the `HelmRelease` for `echo-server`. You do not need to use Terraform to create additional DNS records unless you need a record outside the purposes of your Kubernetes cluster (e.g. setting up MX records). + +[k8s_gateway](https://github.com/ori-edge/k8s_gateway) is deployed on the IP choosen for `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}`. Inorder to test DNS you can point your clients DNS to the `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}` IP address and load `https://hajimari.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` in your browser. + +You can also try debugging with the command `dig`, e.g. `dig @${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR} hajimari.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` and you should get a valid answer containing your `${BOOTSTRAP_METALLB_INGRESS_ADDR}` IP address. + +If your router (or Pi-Hole, Adguard Home or whatever) supports conditional DNS forwarding (also know as split-horizon DNS) you may have DNS requests for `${SECRET_DOMAIN}` only point to the `${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}` IP address. This will ensure only DNS requests for `${SECRET_DOMAIN}` will only get routed to your [k8s_gateway](https://github.com/ori-edge/k8s_gateway) service thus providing DNS resolution to your cluster applications/ingresses. + +To access services from the outside world port forwarded `80` and `443` in your router to the `${BOOTSTRAP_METALLB_INGRESS_ADDR}` IP, in a few moments head over to your browser and you _should_ be able to access `https://echo-server.${BOOTSTRAP_CLOUDFLARE_DOMAIN}` from a device outside your LAN. + +Now if nothing is working, that is expected. This is DNS after all! + +### 🔐 SSL + +By default in this template Kubernetes ingresses are set to use the [Let's Encrypt Staging Environment](https://letsencrypt.org/docs/staging-environment/). This will hopefully reduce issues from ACME on requesting certificates until you are ready to use this in "Production". + +Once you have confirmed there are no issues requesting your certificates replace `letsencrypt-staging` with `letsencrypt-production` in your ingress annotations for `cert-manager.io/cluster-issuer` + +### 🤖 Renovatebot + +[Renovatebot](https://www.mend.io/free-developer-tools/renovate/) will scan your repository and offer PRs when it finds dependencies out of date. Common dependencies it will discover and update are Flux, Ansible Galaxy Roles, Terraform Providers, Kubernetes Helm Charts, Kubernetes Container Images, Pre-commit hooks updates, and more! + +The base Renovate configuration provided in your repository can be view at [.github/renovate.json5](https://github.com/onedr0p/flux-cluster-template/blob/main/.github/renovate.json5). If you notice this only runs on weekends and you can [change the schedule to anything you want](https://docs.renovatebot.com/presets-schedule/) or simply remove it. + +To enable Renovate on your repository, click the 'Configure' button over at their [Github app page](https://github.com/apps/renovate) and choose your repository. Over time Renovate will create PRs for out-of-date dependencies it finds. Any merged PRs that are in the kubernetes directory Flux will deploy. + +### 🪝 Github Webhook + +Flux is pull-based by design meaning it will periodically check your git repository for changes, using a webhook you can enable Flux to update your cluster on `git push`. In order to configure Github to send `push` events from your repository to the Flux webhook receiver you will need two things: + +1. Webhook URL - Your webhook receiver will be deployed on `https://flux-receiver.${BOOTSTRAP_CLOUDFLARE_DOMAIN}/hook/:hookId`. In order to find out your hook id you can run the following command: + + ```sh + kubectl -n flux-system get receiver/github-receiver --kubeconfig=./kubeconfig + # NAME AGE READY STATUS + # github-receiver 6h8m True Receiver initialized with URL: /hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123 + ``` + + So if my domain was `onedr0p.com` the full url would look like this: + + ```text + https://flux-receiver.onedr0p.com/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123 + ``` + +2. Webhook secret - Your webhook secret can be found by decrypting the `secret.sops.yaml` using the following command: + + ```sh + sops -d ./kubernetes/flux/config/webhooks/github/secret.sops.yaml | yq .stringData.token + ``` + + **Note:** Don't forget to update the `BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET` variable in your `.config.env` file so it matches the generated secret if applicable + +Now that you have the webhook url and secret, it's time to set everything up on the Github repository side. Navigate to the settings of your repository on Github, under "Settings/Webhooks" press the "Add webhook" button. Fill in the webhook url and your secret. + +### 💾 Storage + +Rancher's `local-path-provisioner` is a great start for storage but soon you might find you need more features like replicated block storage, or to connect to a NFS/SMB/iSCSI server. Check out the projects below to read up more on some storage solutions that might work for you. + +- [rook-ceph](https://github.com/rook/rook) +- [longhorn](https://github.com/longhorn/longhorn) +- [openebs](https://github.com/openebs/openebs) +- [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) +- [democratic-csi](https://github.com/democratic-csi/democratic-csi) +- [csi-driver-nfs](https://github.com/kubernetes-csi/csi-driver-nfs) +- [synology-csi](https://github.com/SynologyOpenSource/synology-csi) + +### 🔏 Authenticate Flux over SSH + +Authenticating Flux to your git repository has a couple benefits like using a private git repository and/or using the Flux [Image Automation Controllers](https://fluxcd.io/docs/components/image/). + +By default this template only works on a public GitHub repository, it is advised to keep your repository public. + +The benefits of a public repository include: + +* Debugging or asking for help, you can provide a link to a resource you are having issues with. +* Adding a topic to your repository of `k8s-at-home` to be included in the [k8s-at-home-search](https://whazor.github.io/k8s-at-home-search/). This search helps people discover different configurations of Helm charts across others Flux based repositories. + +
+ Expand to read guide on adding Flux SSH authentication + + 1. Generate new SSH key: + ```sh + ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f ./kubernetes/bootstrap/github-deploy.key -q -P "" + ``` + 2. Paste public key in the deploy keys section of your repository settings + 3. Create sops secret in `./kubernetes/bootstrap/github-deploy-key.sops.yaml` with the contents of: + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: github-deploy-key + namespace: flux-system + stringData: + # 3a. Contents of github-deploy-key + identity: | + -----BEGIN OPENSSH PRIVATE KEY----- + ... + -----END OPENSSH PRIVATE KEY----- + # 3b. Output of curl --silent https://api.github.com/meta | jq --raw-output '"github.com "+.ssh_keys[]' + known_hosts: | + github.com ssh-ed25519 ... + github.com ecdsa-sha2-nistp256 ... + github.com ssh-rsa ... + ``` + 4. Encrypt secret: + ```sh + sops --encrypt --in-place ./kubernetes/bootstrap/github-deploy-key.sops.yaml + ``` + 5. Apply secret to cluster: + ```sh + sops --decrypt ./kubernetes/bootstrap/github-deploy-key.sops.yaml | kubectl apply -f - + ``` + 6. Update `./kubernetes/flux/config/cluster.yaml`: + ```yaml + apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: GitRepository + metadata: + name: home-kubernetes + namespace: flux-system + spec: + interval: 10m + # 6a: Change this to your user and repo names + url: ssh://git@github.com/$user/$repo + ref: + branch: main + secretRef: + name: github-deploy-key + ``` + 7. Commit and push changes + 8. Force flux to reconcile your changes + ```sh + task cluster:reconcile + ``` + 9. Verify git repository is now using SSH: + ```sh + task cluster:gitrepositories + ``` + 10. Optionally set your repository to Private in your repository settings. +
+ +## 👉 Help + +- [Discussions](https://github.com/onedr0p/flux-cluster-template/discussions) +- [Discord](https://discord.gg/k8s-at-home) + +## ❔ What's next + +The world is your cluster, have at it! + +## 🤝 Thanks + +Big shout out to all the authors and contributors to the projects that we are using in this repository. + +[@whazor](https://github.com/whazor) created [this website](https://nanne.dev/k8s-at-home-search/) as a creative way to search Helm Releases across GitHub. You may use it as a means to get ideas on how to configure an applications' Helm values. diff --git a/Taskfile.yml b/Taskfile.yml new file mode 100644 index 000000000..2b0a84764 --- /dev/null +++ b/Taskfile.yml @@ -0,0 +1,64 @@ +--- +version: "3" + +vars: + PROJECT_DIR: + sh: "git rev-parse --show-toplevel" + KUBERNETES_DIR: "{{.PROJECT_DIR}}/kubernetes" + ANSIBLE_DIR: "{{.PROJECT_DIR}}/ansible" + TERRAFORM_DIR: "{{.PROJECT_DIR}}/terraform" + +dotenv: [".config.env"] + +env: + KUBECONFIG: "{{.PROJECT_DIR}}/kubeconfig" + +includes: + ansible: .taskfiles/AnsibleTasks.yml + cluster: .taskfiles/ClusterTasks.yml + precommit: .taskfiles/PrecommitTasks.yml + terraform: .taskfiles/TerraformTasks.yml + sops: .taskfiles/SopsTasks.yml + +tasks: + + init: + desc: Initialize workstation dependencies with Brew + cmds: + - brew install {{.DEPS}} {{.CLI_ARGS}} + preconditions: + - sh: command -v brew + msg: | + Homebrew is not installed. Using MacOS, Linux or WSL? + Head over to https://brew.sh to get up and running. + vars: + DEPS: >- + age + ansible + direnv + fluxcd/tap/flux + go-task/tap/go-task + helm + ipcalc + jq + kubernetes-cli + kustomize + pre-commit + prettier + sops + stern + terraform + tflint + weaveworks/tap/gitops + yamllint + yq + + verify: + desc: Verify env settings + cmds: + - ./configure --verify + + configure: + desc: Configure repository from env settings + cmds: + - ./configure diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 000000000..d7f3ef6d1 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,35 @@ +[defaults] +# General settings +nocows = True +executable = /bin/bash +stdout_callback = yaml +force_valid_group_names = ignore +# File/Directory settings +log_path = ~/.ansible/ansible.log +inventory = ./ansible/inventory +roles_path = ~/.ansible/roles:./ansible/roles +collections_path = ~/.ansible/collections +remote_tmp = /tmp +local_tmp = ~/.ansible/tmp +# Fact Caching settings +fact_caching = jsonfile +fact_caching_connection = ~/.ansible/facts_cache +# SSH settings +remote_port = 22 +timeout = 60 +host_key_checking = False +# Plugin settings +vars_plugins_enabled = host_group_vars,community.sops.sops + +[inventory] +unparsed_is_failed = true + +[privilege_escalation] +become = True + +[ssh_connection] +scp_if_ssh = smart +retries = 3 +ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s +pipelining = True +control_path = %(directory)s/%%h-%%r diff --git a/ansible/inventory/group_vars/kubernetes/k3s.yml b/ansible/inventory/group_vars/kubernetes/k3s.yml new file mode 100644 index 000000000..87a4f47f4 --- /dev/null +++ b/ansible/inventory/group_vars/kubernetes/k3s.yml @@ -0,0 +1,60 @@ +--- +# +# Below vars are for the xanmanning.k3s role +# ...see https://github.com/PyratLabs/ansible-role-k3s +# + +# (string) Use a specific version of k3s +# renovate: datasource=github-releases depName=k3s-io/k3s +k3s_release_version: "v1.24.8+k3s1" + +# (bool) Install using hard links rather than symbolic links. +k3s_install_hard_links: true + +# (bool) Escalate user privileges for all tasks +k3s_become: true + +# (bool) Enable debug logging on the k3s service +k3s_debug: false + +# (bool) Enable etcd embedded datastore +k3s_etcd_datastore: true + +# (bool) Allow the use of unsupported configurations in k3s +k3s_use_unsupported_config: true + +# (string) Control Plane registration address +k3s_registration_address: "{{ kubevip_address }}" + +# (list) A list of URLs to deploy on the primary control plane. Read notes below. +k3s_server_manifests_urls: + # Kube-vip + - url: https://kube-vip.io/manifests/rbac.yaml + filename: kube-vip-rbac.yaml + # Tigera Operator + # https://github.com/projectcalico/calico/issues/7003 + - url: https://raw.githubusercontent.com/projectcalico/calico/v3.24.2/manifests/tigera-operator.yaml + filename: tigera-operator.yaml + # Prometheus Operator + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml + filename: alertmanagerconfigs.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml + filename: alertmanagers.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + filename: podmonitors.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml + filename: probes.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml + filename: prometheuses.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + filename: prometheusrules.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + filename: servicemonitors.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml + filename: thanosrulers.yaml + +# (list) A flat list of templates to deploy on the primary control plane +# /var/lib/rancher/k3s/server/manifests +k3s_server_manifests_templates: + - calico-installation.yaml.j2 + - kube-vip-daemonset.yaml.j2 diff --git a/ansible/inventory/group_vars/kubernetes/os.yml b/ansible/inventory/group_vars/kubernetes/os.yml new file mode 100644 index 000000000..7d5fd602a --- /dev/null +++ b/ansible/inventory/group_vars/kubernetes/os.yml @@ -0,0 +1,35 @@ +--- +# (string) Timezone for the servers +# timezone: "America/New_York" + +# (list) Additional ssh public keys to add to the nodes +# ssh_authorized_keys: + +fedora: + packages: + - dnf-plugin-system-upgrade + - dnf-utils + - hdparm + - htop + - intel-gpu-tools + - ipvsadm + - lm_sensors + - nano + - nvme-cli + - socat + - python3-kubernetes + - python3-libselinux + - python3-pyyaml + +ubuntu: + packages: + - hdparm + - htop + - intel-gpu-tools + - ipvsadm + - lm-sensors + - nano + - nvme-cli + - socat + - python3-kubernetes + - python3-yaml diff --git a/ansible/inventory/group_vars/master/k3s.yml b/ansible/inventory/group_vars/master/k3s.yml new file mode 100644 index 000000000..320350353 --- /dev/null +++ b/ansible/inventory/group_vars/master/k3s.yml @@ -0,0 +1,45 @@ +--- +# https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ +# https://github.com/PyratLabs/ansible-role-k3s + +# (bool) Specify if a host (or host group) are part of the control plane +k3s_control_node: true + +# (dict) k3s settings for all control-plane nodes +k3s_server: + node-ip: "{{ ansible_host }}" + tls-san: + - "{{ kubevip_address }}" + # Disable Docker - this will use the default containerd CRI + docker: false + flannel-backend: "none" # This needs to be in quotes + disable: + # Disable flannel - replaced with Calico + - flannel + # Disable traefik - replaced with ingress-nginx + - traefik + # Disable servicelb - replaced with metallb and install with Flux + - servicelb + # Disable metrics-server - installed with Flux + - metrics-server + disable-network-policy: true + disable-cloud-controller: true + write-kubeconfig-mode: "644" + # Network CIDR to use for pod IPs + cluster-cidr: "10.42.0.0/16" + # Network CIDR to use for service IPs + service-cidr: "10.43.0.0/16" + kube-controller-manager-arg: + # Required to monitor kube-controller-manager with kube-prometheus-stack + - "bind-address=0.0.0.0" + kube-proxy-arg: + # Required to monitor kube-proxy with kube-prometheus-stack + - "metrics-bind-address=0.0.0.0" + kube-scheduler-arg: + # Required to monitor kube-scheduler with kube-prometheus-stack + - "bind-address=0.0.0.0" + # Required to monitor etcd with kube-prometheus-stack + etcd-expose-metrics: true + kube-apiserver-arg: + # Required for HAProxy health-checks + - "anonymous-auth=true" diff --git a/ansible/inventory/group_vars/worker/k3s.yml b/ansible/inventory/group_vars/worker/k3s.yml new file mode 100644 index 000000000..d2a44e054 --- /dev/null +++ b/ansible/inventory/group_vars/worker/k3s.yml @@ -0,0 +1,10 @@ +--- +# https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ +# https://github.com/PyratLabs/ansible-role-k3s + +# (bool) Specify if a host (or host group) are part of the control plane +k3s_control_node: false + +# (dict) k3s settings for all worker nodes +k3s_agent: + node-ip: "{{ ansible_host }}" diff --git a/ansible/inventory/host_vars/.gitkeep b/ansible/inventory/host_vars/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ansible/playbooks/cluster-installation.yml b/ansible/playbooks/cluster-installation.yml new file mode 100644 index 000000000..7f93264a0 --- /dev/null +++ b/ansible/playbooks/cluster-installation.yml @@ -0,0 +1,119 @@ +--- +- hosts: + - master + - worker + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Check if cluster is installed + check_mode: false + ansible.builtin.stat: + path: "/etc/rancher/k3s/config.yaml" + register: k3s_check_installed + + - name: Set manifest facts + ansible.builtin.set_fact: + k3s_server_manifests_templates: [] + k3s_server_manifests_urls: [] + when: k3s_check_installed.stat.exists + + - name: Install Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: installed + + - name: Get absolute path to this Git repository + delegate_to: localhost + become: false + run_once: true + check_mode: false + ansible.builtin.command: |- + git rev-parse --show-toplevel + register: repo_abs_path + + - name: Copy kubeconfig project directory + run_once: true + ansible.builtin.fetch: + src: "/etc/rancher/k3s/k3s.yaml" + dest: "{{ repo_abs_path.stdout }}/kubeconfig" + flat: true + when: + - k3s_control_node is defined + - k3s_control_node + + - name: Update kubeconfig with the correct IPv4 address + delegate_to: localhost + become: false + run_once: true + ansible.builtin.replace: + path: "{{ repo_abs_path.stdout }}/kubeconfig" + regexp: "https://127.0.0.1:6443" + replace: "https://{{ k3s_registration_address }}:6443" + + - name: Resource Readiness Check + run_once: true + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" + wait: true + wait_sleep: 10 + wait_timeout: 360 + loop: + - kind: Deployment + name: tigera-operator + namespace: tigera-operator + - kind: DaemonSet + name: kube-vip + namespace: kube-system + - kind: Installation + name: default + - kind: CustomResourceDefinition + name: alertmanagerconfigs.monitoring.coreos.com + - kind: CustomResourceDefinition + name: alertmanagers.monitoring.coreos.com + - kind: CustomResourceDefinition + name: podmonitors.monitoring.coreos.com + - kind: CustomResourceDefinition + name: probes.monitoring.coreos.com + - kind: CustomResourceDefinition + name: prometheuses.monitoring.coreos.com + - kind: CustomResourceDefinition + name: prometheusrules.monitoring.coreos.com + - kind: CustomResourceDefinition + name: servicemonitors.monitoring.coreos.com + - kind: CustomResourceDefinition + name: thanosrulers.monitoring.coreos.com + when: + - k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0 + - k3s_control_node is defined + - k3s_control_node + + # Cleaning up the manifests from the /var/lib/rancher/k3s/server/manifests + # directory is needed because k3s has an awesome "feature" to always deploy + # these on restarting the k3s systemd service. Removing them does NOT + # uninstall the manifests. + + # Removing them means we can manage the lifecycle of these components + # outside of the /var/lib/rancher/k3s/server/manifests directory + + - name: Remove deployed manifest templates + ansible.builtin.file: + path: "{{ k3s_server_manifests_dir }}/{{ item | basename | regex_replace('\\.j2$', '') }}" + state: absent + loop: "{{ k3s_server_manifests_templates | default([]) }}" + + - name: Remove deployed manifest urls + ansible.builtin.file: + path: "{{ k3s_server_manifests_dir }}/{{ item.filename }}" + state: absent + loop: "{{ k3s_server_manifests_urls | default([]) }}" diff --git a/ansible/playbooks/cluster-nuke.yml b/ansible/playbooks/cluster-nuke.yml new file mode 100644 index 000000000..eada19707 --- /dev/null +++ b/ansible/playbooks/cluster-nuke.yml @@ -0,0 +1,41 @@ +--- +- hosts: + - master + - worker + become: true + gather_facts: true + any_errors_fatal: true + vars_prompt: + - name: nuke + prompt: |- + Are you sure you want to nuke this cluster? + Type YES I WANT TO DESTROY THIS CLUSTER to proceed + default: "n" + private: false + pre_tasks: + - name: Check for confirmation + ansible.builtin.fail: + msg: Aborted nuking the cluster + when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER' + + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Uninstall k3s + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: uninstalled + - name: Gather list of CNI files + ansible.builtin.find: + paths: /etc/cni/net.d + patterns: "*" + hidden: true + register: directory_contents + - name: Delete CNI files + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ directory_contents.files }}" diff --git a/ansible/playbooks/cluster-prepare.yml b/ansible/playbooks/cluster-prepare.yml new file mode 100644 index 000000000..2d822638e --- /dev/null +++ b/ansible/playbooks/cluster-prepare.yml @@ -0,0 +1,135 @@ +--- +- hosts: + - master + - worker + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Locale + block: + - name: Locale | Set timezone + community.general.timezone: + name: "{{ timezone | default('Etc/UTC') }}" + - name: Networking + block: + - name: Networking | Set hostname to inventory hostname + ansible.builtin.hostname: + name: "{{ inventory_hostname }}" + - name: Networking | Update /etc/hosts to include inventory hostname + ansible.builtin.blockinfile: + path: /etc/hosts + block: | + 127.0.1.1 {{ inventory_hostname }} + - name: Packages | Fedora + block: + - name: Packages | Install required packages + ansible.builtin.dnf: + name: "{{ fedora.packages | default([]) }}" + state: present + update_cache: true + - name: Packages | Remove leaf packages + ansible.builtin.dnf: + autoremove: true + when: ansible_facts['distribution'] == 'Fedora' + - name: Packages | Ubuntu + block: + - name: Packages | Install required packages + ansible.builtin.apt: + name: "{{ ubuntu.packages | default([]) }}" + state: present + update_cache: true + - name: Packages | Remove leaf packages + ansible.builtin.apt: + autoremove: true + when: ansible_facts['distribution'] == 'Ubuntu' + - name: User Configuration + block: + - name: User Configuration | Add additional SSH public keys + ansible.posix.authorized_key: + user: "{{ ansible_user }}" + key: "{{ item }}" + loop: "{{ public_ssh_keys | default([]) }}" + - name: System Configuration (1) + block: + - name: System Configuration (1) | Disable firewalld | Fedora + ansible.builtin.systemd: + service: firewalld.service + enabled: false + masked: true + state: stopped + when: ansible_facts['distribution'] == 'Fedora' + - name: System Configuration (1) | Disable ufw | Ubuntu + ansible.builtin.systemd: + service: ufw.service + enabled: false + masked: true + state: stopped + when: ansible_facts['distribution'] == 'Ubuntu' + - name: System Configuration (1) | Enable fstrim + ansible.builtin.systemd: + service: fstrim.timer + enabled: true + - name: System Configuration (2) + block: + - name: System Configuration (2) | Enable kernel modules now + community.general.modprobe: + name: "{{ item }}" + state: present + loop: [br_netfilter, overlay, rbd] + - name: System Configuration (2) | Enable kernel modules on boot + ansible.builtin.copy: + mode: 0644 + content: "{{ item }}" + dest: "/etc/modules-load.d/{{ item }}.conf" + loop: [br_netfilter, overlay, rbd] + - name: System Configuration (2) | Set sysctls + ansible.posix.sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_file: /etc/sysctl.d/99-kubernetes.conf + reload: true + with_dict: "{{ sysctl_config }}" + vars: + sysctl_config: + net.ipv4.ip_forward: 1 + net.ipv4.conf.all.forwarding: 1 + net.ipv4.conf.all.rp_filter: 0 + net.ipv4.conf.default.rp_filter: 0 + net.ipv6.conf.all.forwarding: 1 + net.bridge.bridge-nf-call-iptables: 1 + net.bridge.bridge-nf-call-ip6tables: 1 + fs.inotify.max_user_watches: 524288 + fs.inotify.max_user_instances: 512 + - name: System Configuration (2) | Disable swap | Fedora + ansible.builtin.dnf: + name: zram-generator-defaults + state: absent + when: ansible_facts['distribution'] == 'Fedora' + - name: System Configuration (2) | Disable swap at runtime | Ubuntu + ansible.builtin.command: swapoff -a + when: + - ansible_facts['distribution'] == 'Ubuntu' + - ansible_swaptotal_mb > 0 + - name: System Configuration (2) | Disable swap at boot | Ubuntu + ansible.posix.mount: + name: "{{ item }}" + fstype: swap + state: absent + loop: ["none", "swap"] + when: ansible_facts['distribution'] == 'Ubuntu' + - name: System Configuration (2) | Permissive SELinux | Fedora + ansible.posix.selinux: + state: permissive + policy: targeted + when: ansible_facts['distribution'] == 'Fedora' + notify: Reboot + + handlers: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes diff --git a/ansible/playbooks/cluster-reboot.yml b/ansible/playbooks/cluster-reboot.yml new file mode 100644 index 000000000..4f109d3ac --- /dev/null +++ b/ansible/playbooks/cluster-reboot.yml @@ -0,0 +1,15 @@ +--- +- hosts: + - master + - worker + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes diff --git a/ansible/playbooks/templates/calico-installation.yaml.j2 b/ansible/playbooks/templates/calico-installation.yaml.j2 new file mode 100644 index 000000000..8e28a9225 --- /dev/null +++ b/ansible/playbooks/templates/calico-installation.yaml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + registry: quay.io + imagePath: calico + calicoNetwork: + # https://projectcalico.docs.tigera.io/networking/ip-autodetection + nodeAddressAutodetectionV4: + cidrs: + - "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ansible.utils.ipaddr('network/prefix') }}" + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: 26 + cidr: "{{ k3s_server['cluster-cidr'] }}" + encapsulation: "VXLANCrossSubnet" + natOutgoing: Enabled + nodeSelector: all() + nodeMetricsPort: 9091 + typhaMetricsPort: 9093 diff --git a/ansible/playbooks/templates/kube-vip-daemonset.yaml.j2 b/ansible/playbooks/templates/kube-vip-daemonset.yaml.j2 new file mode 100644 index 000000000..374add90a --- /dev/null +++ b/ansible/playbooks/templates/kube-vip-daemonset.yaml.j2 @@ -0,0 +1,72 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-vip + namespace: kube-system + labels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip +spec: + selector: + matchLabels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip + template: + metadata: + labels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.5.7 + imagePullPolicy: IfNotPresent + args: ["manager"] + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: svc_enable + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: "{{ k3s_registration_address }}" + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + serviceAccountName: kube-vip + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/ansible/requirements.yml b/ansible/requirements.yml new file mode 100644 index 000000000..49d1b82ea --- /dev/null +++ b/ansible/requirements.yml @@ -0,0 +1,16 @@ +--- +collections: + - name: community.general + version: 6.1.0 + - name: community.sops + version: 1.5.0 + - name: ansible.posix + version: 1.4.0 + - name: ansible.utils + version: 2.8.0 + - name: kubernetes.core + version: 2.3.2 +roles: + - name: xanmanning.k3s + src: https://github.com/PyratLabs/ansible-role-k3s.git + version: v3.3.1 diff --git a/configure b/configure new file mode 100755 index 000000000..69f6cec27 --- /dev/null +++ b/configure @@ -0,0 +1,518 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# shellcheck disable=SC2155 +export PROJECT_DIR=$(git rev-parse --show-toplevel) + +# shellcheck disable=SC2155 +export SOPS_AGE_KEY_FILE=~/.config/sops/age/keys.txt + +# shellcheck disable=SC1091 +source "${PROJECT_DIR}/.config.env" + +show_help() { +cat << EOF +Usage: $(basename "$0") + -h, --help Display help + --verify Verify .config.env settings +EOF +} + +main() { + local verify= + + parse_command_line "$@" + + verify_binaries + + if [[ "${verify}" == 1 ]]; then + verify_ansible_hosts + verify_metallb + verify_kubevip + verify_addressing + verify_age + verify_git_repository + verify_cloudflare + verify_success + else + # generate sops configuration file + envsubst < "${PROJECT_DIR}/tmpl/.sops.yaml" \ + > "${PROJECT_DIR}/.sops.yaml" + + # generate cluster settings + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/flux/cluster-settings.yaml" \ + > "${PROJECT_DIR}/kubernetes/flux/vars/cluster-settings.yaml" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/flux/cluster.yaml" \ + > "${PROJECT_DIR}/kubernetes/flux/config/cluster.yaml" + + # generate secrets + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/cluster-secrets.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/flux/vars/cluster-secrets.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/flux/vars/cluster-secrets.sops.yaml" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/cert-manager-secret.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/cloudflare-ddns-secret.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/apps/networking/cloudflare-ddns/app/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/apps/networking/cloudflare-ddns/app/secret.sops.yaml" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/external-dns-secret.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/apps/networking/external-dns/app/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/apps/networking/external-dns/app/secret.sops.yaml" + + envsubst < "${PROJECT_DIR}/tmpl/terraform/secret.sops.yaml" \ + > "${PROJECT_DIR}/terraform/cloudflare/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/terraform/cloudflare/secret.sops.yaml" + + # generate ansible settings + envsubst < "${PROJECT_DIR}/tmpl/ansible/kube-vip.yml" \ + > "${PROJECT_DIR}/ansible/inventory/group_vars/kubernetes/kube-vip.yml" + + # generate ansible hosts file and secrets + generate_ansible_hosts + generate_ansible_host_secrets + setup_github_webhook + setup_weave_gitops + success + fi +} + +parse_command_line() { + while :; do + case "${1:-}" in + -h|--help) + show_help + exit + ;; + --verify) + verify=1 + ;; + *) + break + ;; + esac + + shift + done + + if [[ -z "$verify" ]]; then + verify=0 + fi +} + +_has_binary() { + command -v "${1}" >/dev/null 2>&1 || { + _log "ERROR" "${1} is not installed or not found in \$PATH" + exit 1 + } +} + +_has_optional_envar() { + local option="${1}" + # shellcheck disable=SC2015 + [[ "${!option}" == "" ]] && { + _log "WARN" "Unset optional variable ${option}" + } || { + _log "INFO" "Found variable '${option}' with value '${!option}'" + } +} + +_has_envar() { + local option="${1}" + # shellcheck disable=SC2015 + [[ "${!option}" == "" ]] && { + _log "ERROR" "Unset variable ${option}" + exit 1 + } || { + _log "INFO" "Found variable '${option}' with value '${!option}'" + } +} + +_has_valid_ip() { + local ip="${1}" + local variable_name="${2}" + + if ! ipcalc "${ip}" | awk 'BEGIN{FS=":"; is_invalid=0} /^INVALID/ {is_invalid=1; print $1} END{exit is_invalid}' >/dev/null 2>&1; then + _log "INFO" "Variable '${variable_name}' has an invalid IP address '${ip}'" + exit 1 + else + _log "INFO" "Variable '${variable_name}' has a valid IP address '${ip}'" + fi +} + +verify_addressing() { + local found_kube_vip="false" + local found_k8s_gateway="false" + local found_ingress="false" + + # Verify the metallb min and metallb ceiling are in the same network + metallb_subnet_min=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1 | cut -d. -f1,2,3) + metallb_subnet_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2 | cut -d. -f1,2,3) + if [[ "${metallb_subnet_min}" != "${metallb_subnet_ceil}" ]]; then + _log "ERROR" "The provided MetalLB IP range '${BOOTSTRAP_METALLB_LB_RANGE}' is not in the same subnet" + exit 1 + fi + + # Verify the node IP addresses are on the same network as the metallb range + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + node_subnet=$(echo "${!var}" | cut -d. -f1,2,3) + if [[ "${node_subnet}" != "${metallb_subnet_min}" ]]; then + _log "ERROR" "The subnet for node '${!var}' is not in the same subnet as the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi + done + + # Verify the kube-vip IP is in the same network as the metallb range + kubevip_subnet=$(echo "${BOOTSTRAP_KUBE_VIP_ADDR}" | cut -d. -f1,2,3) + if [[ "${kubevip_subnet}" != "${metallb_subnet_min}" ]]; then + _log "ERROR" "The subnet for kupe-vip '${BOOTSTRAP_KUBE_VIP_ADDR}' is not the same subnet as the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi + + # Depending on the IP address, verify if it should be in the metallb range or not + metallb_octet_min=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1 | cut -d. -f4) + metallb_octet_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2 | cut -d. -f4) + for (( octet=metallb_octet_min; octet<=metallb_octet_ceil; octet++ )); do + addr="${metallb_subnet_min}.${octet}" + if [[ "${addr}" == "${BOOTSTRAP_KUBE_VIP_ADDR}" ]]; then + found_kube_vip="true" + fi + if [[ "${addr}" == "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" ]]; then + found_k8s_gateway="true" + fi + if [[ "${addr}" == "${BOOTSTRAP_METALLB_INGRESS_ADDR}" ]]; then + found_ingress="true" + fi + + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + if [[ "${!var}" == "${addr}" ]]; then + _log "ERROR" "The IP for node '${!var}' should NOT be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi + done + done + + if [[ "${found_kube_vip}" == "true" ]]; then + _log "ERROR" "The IP for kube-vip '${BOOTSTRAP_KUBE_VIP_ADDR}' should NOT be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi + + if [[ "${found_k8s_gateway}" == "false" ]]; then + _log "ERROR" "The IP for k8s_gateway '${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}' should be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi + + if [[ "${found_ingress}" == "false" ]]; then + _log "ERROR" "The IP for ingress '${BOOTSTRAP_METALLB_INGRESS_ADDR}' should be in the provided metallb range '${BOOTSTRAP_METALLB_LB_RANGE}'" + exit 1 + fi +} + +verify_age() { + _has_envar "BOOTSTRAP_AGE_PUBLIC_KEY" + _has_envar "SOPS_AGE_KEY_FILE" + + if [[ ! "$BOOTSTRAP_AGE_PUBLIC_KEY" =~ ^age.* ]]; then + _log "ERROR" "BOOTSTRAP_AGE_PUBLIC_KEY does not start with age" + exit 1 + else + _log "INFO" "Age public key is in the correct format" + fi + + if [[ ! -f ~/.config/sops/age/keys.txt ]]; then + _log "ERROR" "Unable to find Age file keys.txt in ~/.config/sops/age" + exit 1 + else + _log "INFO" "Found Age public key '${BOOTSTRAP_AGE_PUBLIC_KEY}'" + fi +} + +verify_binaries() { + _has_binary "ansible" + _has_binary "envsubst" + _has_binary "flux" + _has_binary "git" + _has_binary "age" + _has_binary "helm" + _has_binary "ipcalc" + _has_binary "jq" + _has_binary "gitops" + _has_binary "yq" + _has_binary "sops" + _has_binary "ssh" + _has_binary "task" + _has_binary "terraform" + _has_binary "tflint" + + if ! [[ "$(sops --version)" =~ 3\.[0-9]+\.[0-9]+ ]]; then + _log "ERROR" "Incompatible sops version, make sure you are using the latest release of github.com/mozilla/sops" + exit 1 + fi + + if ! [[ "$(yq --version)" =~ 4\.[0-9]+\.[0-9]+ ]]; then + _log "ERROR" "Incompatible yq version, make sure you are using the latest release of github.com/mikefarah/yq" + exit 1 + fi +} + +verify_kubevip() { + _has_envar "BOOTSTRAP_KUBE_VIP_ADDR" + _has_valid_ip "${BOOTSTRAP_KUBE_VIP_ADDR}" "BOOTSTRAP_KUBE_VIP_ADDR" +} + +verify_metallb() { + local ip_floor= + local ip_ceil= + _has_envar "BOOTSTRAP_METALLB_LB_RANGE" + _has_envar "BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR" + _has_envar "BOOTSTRAP_METALLB_INGRESS_ADDR" + + ip_floor=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f1) + ip_ceil=$(echo "${BOOTSTRAP_METALLB_LB_RANGE}" | cut -d- -f2) + + _has_valid_ip "${ip_floor}" "BOOTSTRAP_METALLB_LB_RANGE" + _has_valid_ip "${ip_ceil}" "BOOTSTRAP_METALLB_LB_RANGE" + _has_valid_ip "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" "BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR" + _has_valid_ip "${BOOTSTRAP_METALLB_INGRESS_ADDR}" "BOOTSTRAP_METALLB_INGRESS_ADDR" +} + +verify_git_repository() { + _has_envar "BOOTSTRAP_GIT_REPOSITORY" + + export GIT_TERMINAL_PROMPT=0 + pushd "$(mktemp -d)" >/dev/null 2>&1 + [ "$(git ls-remote "${BOOTSTRAP_GIT_REPOSITORY}" 2> /dev/null)" ] || { + _log "ERROR" "Unable to find the remote Git repository '${BOOTSTRAP_GIT_REPOSITORY}'" + exit 1 + } + popd >/dev/null 2>&1 + export GIT_TERMINAL_PROMPT=1 +} + +setup_github_webhook() { + _has_envar "BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET" + + WEBHOOK_SECRET="${BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET}" + + if [[ "${WEBHOOK_SECRET}" == "generated" ]]; then + WEBHOOK_SECRET="$(openssl rand -base64 30)" + fi + + export BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET="${WEBHOOK_SECRET}" + _log "INFO" "Using GitHub Token '${WEBHOOK_SECRET}' for Flux" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/github-webhook-token-secret.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/apps/flux-system/addons/webhooks/github/secret.sops.yaml" +} + +setup_weave_gitops() { + _has_envar "BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD" + + WEAVE_GITOPS_ADMIN_PASSWORD="${BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD}" + + if [[ "${WEAVE_GITOPS_ADMIN_PASSWORD}" == "generated" ]]; then + WEAVE_GITOPS_ADMIN_PASSWORD="$(openssl rand -base64 30)" + fi + + export BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD="${WEAVE_GITOPS_ADMIN_PASSWORD}" + _log "INFO" "Using admin password '${WEAVE_GITOPS_ADMIN_PASSWORD}' for Weave Gitops" + + # Convert password to bcrypt hash + export BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD="$(echo -n "${BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD}" | gitops get bcrypt-hash)" + + envsubst < "${PROJECT_DIR}/tmpl/kubernetes/weave-gitops-secret.sops.yaml" \ + > "${PROJECT_DIR}/kubernetes/apps/flux-system/weave-gitops/app/secret.sops.yaml" + sops --encrypt --in-place "${PROJECT_DIR}/kubernetes/apps/flux-system/weave-gitops/app/secret.sops.yaml" +} + +verify_cloudflare() { + local account_zone= + local errors= + + _has_envar "BOOTSTRAP_CLOUDFLARE_APIKEY" + _has_envar "BOOTSTRAP_CLOUDFLARE_DOMAIN" + _has_envar "BOOTSTRAP_CLOUDFLARE_EMAIL" + + # Try to retrieve zone information from Cloudflare's API + account_zone=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=${BOOTSTRAP_CLOUDFLARE_DOMAIN}&status=active" \ + -H "X-Auth-Email: ${BOOTSTRAP_CLOUDFLARE_EMAIL}" \ + -H "X-Auth-Key: ${BOOTSTRAP_CLOUDFLARE_APIKEY}" \ + -H "Content-Type: application/json" + ) + + if [[ "$(echo "${account_zone}" | jq ".success")" == "true" ]]; then + _log "INFO" "Verified Cloudflare Account and Zone information" + else + errors=$(echo "${account_zone}" | jq -c ".errors") + _log "ERROR" "Unable to get Cloudflare Account and Zone information ${errors}" + exit 1 + fi +} + +verify_ansible_hosts() { + local node_id= + local node_addr= + local node_username= + local node_password= + local node_control= + local node_hostname= + local default_control_node_prefix= + local default_worker_node_prefix= + + default_control_node_prefix="BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX" + default_worker_node_prefix="BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX" + _has_optional_envar "${default_control_node_prefix}" + _has_optional_envar "${default_worker_node_prefix}" + + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + node_id=$(echo "${var}" | awk -F"_" '{print $5}') + node_addr="BOOTSTRAP_ANSIBLE_HOST_ADDR_${node_id}" + node_username="BOOTSTRAP_ANSIBLE_SSH_USERNAME_${node_id}" + node_password="BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_${node_id}" + node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}" + node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}" + _has_envar "${node_addr}" + _has_envar "${node_username}" + _has_envar "${node_password}" + _has_envar "${node_control}" + _has_optional_envar "${node_hostname}" + + if [[ "${!node_addr}" == "${BOOTSTRAP_KUBE_VIP_ADDR}" && "${BOOTSTRAP_KUBE_VIP_ENABLED}" == "true" ]]; then + _log "ERROR" "The kube-vip IP '${BOOTSTRAP_KUBE_VIP_ADDR}' should not be the same as the IP for node '${!node_addr}'" + exit 1 + fi + + if [[ "${!node_addr}" == "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" ]]; then + _log "ERROR" "The k8s-gateway load balancer IP '${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}' should not be the same as the IP for node '${!node_addr}'" + exit 1 + fi + + if [[ "${!node_addr}" == "${BOOTSTRAP_METALLB_INGRESS_ADDR}" ]]; then + _log "ERROR" "The ingress load balancer IP '${BOOTSTRAP_METALLB_INGRESS_ADDR}' should not be the same as the IP for node '${!node_addr}'" + exit 1 + fi + + if ssh -q -o BatchMode=yes -o ConnectTimeout=5 "${!node_username}"@"${!var}" "true"; then + _log "INFO" "SSH into host '${!var}' with username '${!node_username}' was successfull" + else + _log "ERROR" "SSH into host '${!var}' with username '${!node_username}'was NOT successful, did you copy over your SSH key?" + exit 1 + fi + done +} + +verify_success() { + _log "INFO" "All checks passed!" + _log "INFO" "Run the script without --verify to template all the files out" + exit 0 +} + +generate_ansible_host_secrets() { + local node_id= + local node_username= + local node_password= + local node_hostname= + local node_control= + default_control_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX:-k8s-} + default_worker_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX:-k8s-} + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + node_id=$(echo "${var}" | awk -F"_" '{print $5}') + node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}" + if [[ "${!node_control}" == "true" ]]; then + node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}" + host_key="${!node_hostname:-${default_control_node_prefix}}" + if [ "${host_key}" == "${default_control_node_prefix}" ]; then + node_hostname=${default_control_node_prefix}${node_id} + else + node_hostname=${!node_hostname} + fi + else + node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}" + host_key="${!node_hostname:-${default_worker_node_prefix}}" + if [ "${host_key}" == "${default_worker_node_prefix}" ]; then + node_hostname=${default_worker_node_prefix}${node_id} + else + node_hostname=${!node_hostname} + fi + fi + { + node_username="BOOTSTRAP_ANSIBLE_SSH_USERNAME_${node_id}" + node_password="BOOTSTRAP_ANSIBLE_SUDO_PASSWORD_${node_id}" + printf "kind: Secret\n" + printf "ansible_user: %s\n" "${!node_username}" + printf "ansible_become_pass: %s\n" "${!node_password}" + } > "${PROJECT_DIR}/ansible/inventory/host_vars/${node_hostname}.sops.yml" + sops --encrypt --in-place "${PROJECT_DIR}/ansible/inventory/host_vars/${node_hostname}.sops.yml" + done +} + +generate_ansible_hosts() { + local worker_node_count= + default_control_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_CONTROL_NODE_HOSTNAME_PREFIX:-k8s-} + default_worker_node_prefix=${BOOTSTRAP_ANSIBLE_DEFAULT_NODE_HOSTNAME_PREFIX:-k8s-} + { + printf -- "---\n" + printf "kubernetes:\n" + printf " children:\n" + printf " master:\n" + printf " hosts:\n" + master_node_count=0 + worker_node_count=0 + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + node_id=$(echo "${var}" | awk -F"_" '{print $5}') + node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}" + if [[ "${!node_control}" == "true" ]]; then + master_node_count=$((master_node_count+1)) + node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}" + host_key="${!node_hostname:-${default_control_node_prefix}}" + if [ "${host_key}" == "${default_control_node_prefix}" ]; then + node_hostname=${default_control_node_prefix}${node_id} + else + node_hostname=${!node_hostname} + fi + printf " %s:\n" "${node_hostname}" + printf " ansible_host: %s\n" "${!var}" + else + worker_node_count=$((worker_node_count+1)) + fi + done + if [[ ${worker_node_count} -gt 0 ]]; then + printf " worker:\n" + printf " hosts:\n" + for var in "${!BOOTSTRAP_ANSIBLE_HOST_ADDR_@}"; do + node_id=$(echo "${var}" | awk -F"_" '{print $5}') + node_control="BOOTSTRAP_ANSIBLE_CONTROL_NODE_${node_id}" + if [[ "${!node_control}" == "false" ]]; then + node_hostname="BOOTSTRAP_ANSIBLE_HOSTNAME_${node_id}" + host_key="${!node_hostname:-${default_worker_node_prefix}}" + if [ "${host_key}" == "${default_worker_node_prefix}" ]; then + node_hostname=${default_worker_node_prefix}${node_id} + else + node_hostname=${!node_hostname} + fi + printf " %s:\n" "${node_hostname}" + printf " ansible_host: %s\n" "${!var}" + fi + done + fi + } > "${PROJECT_DIR}/ansible/inventory/hosts.yml" +} + +success() { + _log "INFO" "All files have been templated, proceed to the next steps outlined in the README" + exit 0 +} + +_log() { + local type="${1}" + local msg="${2}" + printf "[%s] [%s] %s\n" "$(date -u)" "${type}" "${msg}" +} + +main "$@" diff --git a/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml new file mode 100644 index 000000000..47e32b331 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 15m + chart: + spec: + chart: cert-manager + version: v1.10.1 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + installCRDs: true + replicaCount: 3 + webhook: + replicaCount: 3 + cainjector: + replicaCount: 3 + extraArgs: + - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 + - --dns01-recursive-nameservers-only + podDnsPolicy: None + podDnsConfig: + nameservers: + - "1.1.1.1" + - "9.9.9.9" + prometheus: + enabled: true + servicemonitor: + enabled: true + prometheusInstance: monitoring diff --git a/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml new file mode 100644 index 000000000..8cd0c1a37 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: cert-manager +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml +configMapGenerator: + - name: cert-manager-dashboard + files: + - cert-manager-dashboard.json=https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled + labels: + grafana_dashboard: "true" diff --git a/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml b/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml new file mode 100644 index 000000000..563147967 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/prometheusrule.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cert-manager.rules + namespace: cert-manager +spec: + groups: + - name: cert-manager + rules: + - alert: CertManagerAbsent + expr: | + absent(up{job="cert-manager"}) + for: 15m + labels: + severity: critical + annotations: + description: + "New certificates will not be able to be minted, and existing + ones can't be renewed until cert-manager is back." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent + summary: "Cert Manager has dissapeared from Prometheus service discovery." + - name: certificates + rules: + - alert: CertManagerCertExpirySoon + expr: | + avg by (exported_namespace, namespace, name) ( + certmanager_certificate_expiration_timestamp_seconds - time()) + < (21 * 24 * 3600) + for: 15m + labels: + severity: warning + annotations: + description: + "The domain that this cert covers will be unavailable after + {{ $value | humanizeDuration }}. Clients using endpoints that this cert + protects will start to fail in {{ $value | humanizeDuration }}." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon + summary: + "The cert {{ $labels.name }} is {{ $value | humanizeDuration }} + from expiry, it should have renewed over a week ago." + - alert: CertManagerCertNotReady + expr: | + max by (name, exported_namespace, namespace, condition) ( + certmanager_certificate_ready_status{condition!="True"} == 1) + for: 15m + labels: + severity: critical + annotations: + description: + "This certificate has not been ready to serve traffic for at least + 10m. If the cert is being renewed or there is another valid cert, the ingress + controller _may_ be able to serve that instead." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready + summary: "The cert {{ $labels.name }} is not ready to serve traffic." + - alert: CertManagerHittingRateLimits + expr: | + sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) + > 0 + for: 15m + labels: + severity: critical + annotations: + description: + "Depending on the rate limit, cert-manager may be unable to generate + certificates for up to a week." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits + summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml new file mode 100644 index 000000000..473fb7b4b --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml @@ -0,0 +1,41 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: "${SECRET_CLOUDFLARE_EMAIL}" + privateKeySecretRef: + name: letsencrypt-production + solvers: + - dns01: + cloudflare: + email: "${SECRET_CLOUDFLARE_EMAIL}" + apiKeySecretRef: + name: cert-manager-secret + key: api-key + selector: + dnsZones: + - "${SECRET_DOMAIN}" +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + email: "${SECRET_CLOUDFLARE_EMAIL}" + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - dns01: + cloudflare: + email: "${SECRET_CLOUDFLARE_EMAIL}" + apiKeySecretRef: + name: cert-manager-secret + key: api-key + selector: + dnsZones: + - "${SECRET_DOMAIN}" diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml new file mode 100644 index 000000000..17754be63 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./issuers.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/apps/cert-manager/cert-manager/ks.yaml new file mode 100644 index 000000000..e248e2e61 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/ks.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-cert-manager + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: cert-manager + namespace: cert-manager + interval: 30m + retryInterval: 1m + timeout: 3m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-cert-manager-issuers + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + dependsOn: + - name: cluster-apps-cert-manager + path: ./kubernetes/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/cert-manager/kustomization.yaml b/kubernetes/apps/cert-manager/kustomization.yaml new file mode 100644 index 000000000..a0a3e5edf --- /dev/null +++ b/kubernetes/apps/cert-manager/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cert-manager/ks.yaml diff --git a/kubernetes/apps/cert-manager/namespace.yaml b/kubernetes/apps/cert-manager/namespace.yaml new file mode 100644 index 000000000..ed788350f --- /dev/null +++ b/kubernetes/apps/cert-manager/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/default/echo-server/app/helmrelease.yaml b/kubernetes/apps/default/echo-server/app/helmrelease.yaml new file mode 100644 index 000000000..6d417c0b7 --- /dev/null +++ b/kubernetes/apps/default/echo-server/app/helmrelease.yaml @@ -0,0 +1,81 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: &app echo-server + namespace: default +spec: + interval: 15m + chart: + spec: + chart: app-template + version: 1.2.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + controller: + replicas: 1 + strategy: RollingUpdate + image: + repository: docker.io/jmalloc/echo-server + tag: 0.3.4 + service: + main: + ports: + http: + port: &port 8080 + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + ingress: + main: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-staging + external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" + external-dns.home.arpa/enabled: "true" + hajimari.io/icon: video-input-antenna + hosts: + - host: &host "{{ .Release.Name }}.${SECRET_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: echo-server-tls + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + resources: + requests: + cpu: 5m + memory: 10Mi + limits: + memory: 50Mi diff --git a/kubernetes/apps/default/echo-server/app/kustomization.yaml b/kubernetes/apps/default/echo-server/app/kustomization.yaml new file mode 100644 index 000000000..c0cd21834 --- /dev/null +++ b/kubernetes/apps/default/echo-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/default/echo-server/ks.yaml b/kubernetes/apps/default/echo-server/ks.yaml new file mode 100644 index 000000000..30baeb948 --- /dev/null +++ b/kubernetes/apps/default/echo-server/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-echo-server + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/default/echo-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: echo-server + namespace: default + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/default/hajimari/app/helmrelease.yaml b/kubernetes/apps/default/hajimari/app/helmrelease.yaml new file mode 100644 index 000000000..aef6b8e84 --- /dev/null +++ b/kubernetes/apps/default/hajimari/app/helmrelease.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: hajimari + namespace: default +spec: + interval: 15m + chart: + spec: + chart: hajimari + version: 2.0.2 + sourceRef: + kind: HelmRepository + name: hajimari + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + hajimari: + title: Apps + darkTheme: espresso + alwaysTargetBlank: true + showGreeting: false + showAppGroups: false + showAppStatus: false + showBookmarkGroups: false + showGlobalBookmarks: false + showAppUrls: false + defaultEnable: true + namespaceSelector: + matchNames: + - default + ingress: + main: + enabled: true + ingressClassName: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-staging + nginx.ingress.kubernetes.io/whitelist-source-range: | + 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + hajimari.io/enable: "false" + hosts: + - host: &host "hajimari.${SECRET_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: hajimari-tls + podAnnotations: + configmap.reloader.stakater.com/reload: hajimari-settings + persistence: + data: + enabled: true + type: emptyDir + resources: + requests: + cpu: 100m + memory: 128M diff --git a/kubernetes/apps/default/hajimari/app/kustomization.yaml b/kubernetes/apps/default/hajimari/app/kustomization.yaml new file mode 100644 index 000000000..c0cd21834 --- /dev/null +++ b/kubernetes/apps/default/hajimari/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/default/hajimari/ks.yaml b/kubernetes/apps/default/hajimari/ks.yaml new file mode 100644 index 000000000..0e1dba454 --- /dev/null +++ b/kubernetes/apps/default/hajimari/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-hajimari + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/default/hajimari/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: hajimari + namespace: default + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/default/kustomization.yaml b/kubernetes/apps/default/kustomization.yaml new file mode 100644 index 000000000..1e28c1695 --- /dev/null +++ b/kubernetes/apps/default/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./echo-server/ks.yaml + - ./hajimari/ks.yaml diff --git a/kubernetes/apps/default/namespace.yaml b/kubernetes/apps/default/namespace.yaml new file mode 100644 index 000000000..f659b055d --- /dev/null +++ b/kubernetes/apps/default/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: default + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/flux-system/addons/ks.yaml b/kubernetes/apps/flux-system/addons/ks.yaml new file mode 100644 index 000000000..f412434b6 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/ks.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-flux-webhooks + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/flux-system/addons/webhooks + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml b/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml new file mode 100644 index 000000000..d999baab8 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/webhooks/github/ingress.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: webhook-receiver + namespace: flux-system + annotations: + cert-manager.io/cluster-issuer: letsencrypt-staging + external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" + external-dns.home.arpa/enabled: "true" + hajimari.io/enable: "false" +spec: + ingressClassName: nginx + rules: + - host: &host "flux-receiver.${SECRET_DOMAIN}" + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 + tls: + - hosts: + - *host + secretName: flux-receiver-tls diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml b/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml new file mode 100644 index 000000000..786e654a5 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/webhooks/github/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./ingress.yaml + - ./receiver.yaml diff --git a/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml b/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml new file mode 100644 index 000000000..9a8428407 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/webhooks/github/receiver.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: notification.toolkit.fluxcd.io/v1beta1 +kind: Receiver +metadata: + name: github-receiver + namespace: flux-system +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1beta2 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml b/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml new file mode 100644 index 000000000..ccd8b3eb8 --- /dev/null +++ b/kubernetes/apps/flux-system/addons/webhooks/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/apps/flux-system/kustomization.yaml b/kubernetes/apps/flux-system/kustomization.yaml new file mode 100644 index 000000000..e3bc84149 --- /dev/null +++ b/kubernetes/apps/flux-system/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./addons/ks.yaml + - ./weave-gitops/ks.yaml diff --git a/kubernetes/apps/flux-system/namespace.yaml b/kubernetes/apps/flux-system/namespace.yaml new file mode 100644 index 000000000..b48db4521 --- /dev/null +++ b/kubernetes/apps/flux-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/flux-system/weave-gitops/app/helmrelease.yaml b/kubernetes/apps/flux-system/weave-gitops/app/helmrelease.yaml new file mode 100644 index 000000000..378808355 --- /dev/null +++ b/kubernetes/apps/flux-system/weave-gitops/app/helmrelease.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: weave-gitops + namespace: flux-system +spec: + interval: 15m + chart: + spec: + chart: weave-gitops + version: 4.0.10 + sourceRef: + kind: HelmRepository + name: weave-gitops + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + adminUser: + create: true + username: admin + ingress: + enabled: true + className: nginx + annotations: + cert-manager.io/cluster-issuer: letsencrypt-staging + nginx.ingress.kubernetes.io/whitelist-source-range: | + 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + hajimari.io/icon: sawtooth-wave + hosts: + - host: &host "gitops.${SECRET_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: weave-gitops-tls + networkPolicy: + create: false + metrics: + enabled: true + rbac: + create: true + impersonationResourceNames: ["admin"] + podAnnotations: + secret.reloader.stakater.com/reload: weave-gitops-secret + valuesFrom: + - kind: Secret + name: weave-gitops-secret + valuesKey: adminPassword + targetPath: adminUser.passwordHash diff --git a/kubernetes/apps/flux-system/weave-gitops/app/kustomization.yaml b/kubernetes/apps/flux-system/weave-gitops/app/kustomization.yaml new file mode 100644 index 000000000..3805ad867 --- /dev/null +++ b/kubernetes/apps/flux-system/weave-gitops/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./secret.sops.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/flux-system/weave-gitops/ks.yaml b/kubernetes/apps/flux-system/weave-gitops/ks.yaml new file mode 100644 index 000000000..f5b97d889 --- /dev/null +++ b/kubernetes/apps/flux-system/weave-gitops/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-weave-gitops + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/flux-system/weave-gitops/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: weave-gitops + namespace: flux-system + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml b/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml new file mode 100644 index 000000000..8a7f835bd --- /dev/null +++ b/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-vip + namespace: kube-system + labels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip +spec: + selector: + matchLabels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip + template: + metadata: + labels: + app.kubernetes.io/instance: kube-vip + app.kubernetes.io/name: kube-vip + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.5.7 + imagePullPolicy: IfNotPresent + args: ["manager"] + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: svc_enable + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: "${KUBE_VIP_ADDR}" + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + serviceAccountName: kube-vip + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml b/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml new file mode 100644 index 000000000..1d0fbe56f --- /dev/null +++ b/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: + - ./rbac.yaml + - ./daemonset.yaml +labels: + - pairs: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml b/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml new file mode 100644 index 000000000..3cd709682 --- /dev/null +++ b/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-vip + namespace: kube-system +secrets: + - name: kube-vip +--- +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: kube-vip + namespace: kube-system + annotations: + kubernetes.io/service-account.name: kube-vip +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:kube-vip-role +rules: + - apiGroups: [""] + resources: ["services", "services/status", "nodes"] + verbs: ["list", "get", "watch", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "get", "watch", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-vip-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-vip-role +subjects: + - kind: ServiceAccount + name: kube-vip + namespace: kube-system diff --git a/kubernetes/apps/kube-system/kube-vip/ks.yaml b/kubernetes/apps/kube-system/kube-vip/ks.yaml new file mode 100644 index 000000000..6d3716841 --- /dev/null +++ b/kubernetes/apps/kube-system/kube-vip/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-kube-vip + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/kube-system/kube-vip/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: apps/v1 + kind: DaemonSet + name: kube-vip + namespace: kube-system + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/kube-system/kustomization.yaml b/kubernetes/apps/kube-system/kustomization.yaml new file mode 100644 index 000000000..b9817047d --- /dev/null +++ b/kubernetes/apps/kube-system/kustomization.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./kube-vip/ks.yaml + - ./metrics-server/ks.yaml + - ./reloader/ks.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml new file mode 100644 index 000000000..3af2a2a37 --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: metrics-server + namespace: kube-system +spec: + interval: 15m + chart: + spec: + chart: metrics-server + version: 3.8.3 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + args: + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml new file mode 100644 index 000000000..1c3fdb04d --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/ks.yaml b/kubernetes/apps/kube-system/metrics-server/ks.yaml new file mode 100644 index 000000000..cb8b337a8 --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-metrics-server + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: metrics-server + namespace: kube-system + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/kube-system/namespace.yaml b/kubernetes/apps/kube-system/namespace.yaml new file mode 100644 index 000000000..5eeb2c918 --- /dev/null +++ b/kubernetes/apps/kube-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml new file mode 100644 index 000000000..b4f125a67 --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: reloader + namespace: &namespace kube-system +spec: + interval: 15m + chart: + spec: + chart: reloader + version: v0.0.126 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + fullnameOverride: reloader + reloader: + enableHA: true + deployment: + replicas: 3 + reloadStrategy: annotations + reloadOnCreate: false + podMonitor: + enabled: true + namespace: *namespace diff --git a/kubernetes/apps/kube-system/reloader/app/kustomization.yaml b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml new file mode 100644 index 000000000..1c3fdb04d --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/reloader/ks.yaml b/kubernetes/apps/kube-system/reloader/ks.yaml new file mode 100644 index 000000000..bfcd9b852 --- /dev/null +++ b/kubernetes/apps/kube-system/reloader/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-reloader + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: reloader + namespace: kube-system + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/networking/cloudflare-ddns/app/cloudflare-ddns.sh b/kubernetes/apps/networking/cloudflare-ddns/app/cloudflare-ddns.sh new file mode 100755 index 000000000..0ba5b7039 --- /dev/null +++ b/kubernetes/apps/networking/cloudflare-ddns/app/cloudflare-ddns.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -o nounset +set -o errexit + +current_ipv4="$(curl -s https://ipv4.icanhazip.com/)" +zone_id=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones?name=${CLOUDFLARE_RECORD_NAME#*.}&status=active" \ + -H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \ + -H "X-Auth-Key: ${CLOUDFLARE_APIKEY}" \ + -H "Content-Type: application/json" \ + | jq --raw-output ".result[0] | .id" +) +record_ipv4=$(curl -s -X GET \ + "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?name=${CLOUDFLARE_RECORD_NAME}&type=A" \ + -H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \ + -H "X-Auth-Key: ${CLOUDFLARE_APIKEY}" \ + -H "Content-Type: application/json" \ +) +old_ip4=$(echo "$record_ipv4" | jq --raw-output '.result[0] | .content') +if [[ "${current_ipv4}" == "${old_ip4}" ]]; then + printf "%s - IP Address '%s' has not changed" "$(date -u)" "${current_ipv4}" + exit 0 +fi +record_ipv4_identifier="$(echo "$record_ipv4" | jq --raw-output '.result[0] | .id')" +update_ipv4=$(curl -s -X PUT \ + "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_ipv4_identifier}" \ + -H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \ + -H "X-Auth-Key: ${CLOUDFLARE_APIKEY}" \ + -H "Content-Type: application/json" \ + --data "{\"id\":\"${zone_id}\",\"type\":\"A\",\"proxied\":true,\"name\":\"${CLOUDFLARE_RECORD_NAME}\",\"content\":\"${current_ipv4}\"}" \ +) +if [[ "$(echo "$update_ipv4" | jq --raw-output '.success')" == "true" ]]; then + printf "%s - Success - IP Address '%s' has been updated" "$(date -u)" "${current_ipv4}" + exit 0 +else + printf "%s - Yikes - Updating IP Address '%s' has failed" "$(date -u)" "${current_ipv4}" + exit 1 +fi diff --git a/kubernetes/apps/networking/cloudflare-ddns/app/helmrelease.yaml b/kubernetes/apps/networking/cloudflare-ddns/app/helmrelease.yaml new file mode 100644 index 000000000..0be8d7cc9 --- /dev/null +++ b/kubernetes/apps/networking/cloudflare-ddns/app/helmrelease.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cloudflare-ddns + namespace: networking +spec: + interval: 15m + chart: + spec: + chart: app-template + version: 1.2.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + controller: + type: cronjob + cronjob: + concurrencyPolicy: Forbid + schedule: "@hourly" + restartPolicy: OnFailure + image: + repository: ghcr.io/onedr0p/kubernetes-kubectl + tag: 1.26.0@sha256:6ee3353da3495bc99d73af29a4495bb95570e31e71579cfce719c4123882723d + command: ["/bin/bash", "/app/cloudflare-ddns.sh"] + envFrom: + - secretRef: + name: cloudflare-ddns-secret + service: + main: + enabled: false + persistence: + config: + enabled: true + type: configMap + name: cloudflare-ddns-configmap + subPath: cloudflare-ddns.sh + mountPath: /app/cloudflare-ddns.sh + defaultMode: 0775 + readOnly: true diff --git a/kubernetes/apps/networking/cloudflare-ddns/app/kustomization.yaml b/kubernetes/apps/networking/cloudflare-ddns/app/kustomization.yaml new file mode 100644 index 000000000..b726e78c2 --- /dev/null +++ b/kubernetes/apps/networking/cloudflare-ddns/app/kustomization.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - ./secret.sops.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflare-ddns-configmap + files: + - ./cloudflare-ddns.sh +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/apps/networking/cloudflare-ddns/ks.yaml b/kubernetes/apps/networking/cloudflare-ddns/ks.yaml new file mode 100644 index 000000000..b3577fd61 --- /dev/null +++ b/kubernetes/apps/networking/cloudflare-ddns/ks.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-cloudflare-ddns + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/networking/cloudflare-ddns/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: cloudflare-ddns + namespace: networking + interval: 30m + timeout: 5m diff --git a/kubernetes/apps/networking/external-dns/app/helmrelease.yaml b/kubernetes/apps/networking/external-dns/app/helmrelease.yaml new file mode 100644 index 000000000..4de524f67 --- /dev/null +++ b/kubernetes/apps/networking/external-dns/app/helmrelease.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: external-dns + namespace: networking +spec: + interval: 15m + chart: + spec: + chart: external-dns + version: 1.12.0 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + interval: 2m + # logLevel: debug + provider: cloudflare + env: + - name: CF_API_EMAIL + valueFrom: + secretKeyRef: + name: external-dns-secret + key: email + - name: CF_API_KEY + valueFrom: + secretKeyRef: + name: external-dns-secret + key: api-key + extraArgs: + - --cloudflare-proxied + - --annotation-filter=external-dns.home.arpa/enabled in (true) + policy: sync + sources: ["ingress"] + txtPrefix: k8s. + txtOwnerId: default + domainFilters: + - "${SECRET_DOMAIN}" + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: external-dns-secret diff --git a/kubernetes/apps/networking/external-dns/app/kustomization.yaml b/kubernetes/apps/networking/external-dns/app/kustomization.yaml new file mode 100644 index 000000000..a07ca991d --- /dev/null +++ b/kubernetes/apps/networking/external-dns/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - ./secret.sops.yaml + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/external-dns/ks.yaml b/kubernetes/apps/networking/external-dns/ks.yaml new file mode 100644 index 000000000..45af753f9 --- /dev/null +++ b/kubernetes/apps/networking/external-dns/ks.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-external-dns + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/networking/external-dns/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: external-dns + namespace: networking + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/networking/ingress-nginx/app/cloudflare-networks.txt b/kubernetes/apps/networking/ingress-nginx/app/cloudflare-networks.txt new file mode 100644 index 000000000..d6e3abd1a --- /dev/null +++ b/kubernetes/apps/networking/ingress-nginx/app/cloudflare-networks.txt @@ -0,0 +1 @@ +173.245.48.0/20\,103.21.244.0/22\,103.22.200.0/22\,103.31.4.0/22\,141.101.64.0/18\,108.162.192.0/18\,190.93.240.0/20\,188.114.96.0/20\,197.234.240.0/22\,198.41.128.0/17\,162.158.0.0/15\,104.16.0.0/13\,104.24.0.0/14\,172.64.0.0/13\,131.0.72.0/22\,2400:cb00::/32\,2606:4700::/32\,2803:f800::/32\,2405:b500::/32\,2405:8100::/32\,2a06:98c0::/29\,2c0f:f248::/32 diff --git a/kubernetes/apps/networking/ingress-nginx/app/helmrelease.yaml b/kubernetes/apps/networking/ingress-nginx/app/helmrelease.yaml new file mode 100644 index 000000000..999f7b74e --- /dev/null +++ b/kubernetes/apps/networking/ingress-nginx/app/helmrelease.yaml @@ -0,0 +1,84 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: ingress-nginx + namespace: networking +spec: + interval: 15m + chart: + spec: + chart: ingress-nginx + version: 4.4.0 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + dependsOn: + - name: cert-manager + namespace: cert-manager + values: + controller: + replicaCount: 2 + extraEnvs: + - name: TZ + value: "${TIMEZONE}" + service: + annotations: + metallb.universe.tf/loadBalancerIPs: |- + ${METALLB_INGRESS_ADDR} + externalTrafficPolicy: Local + publishService: + enabled: true + ingressClassResource: + default: true + config: + client-header-timeout: 120 + client-body-buffer-size: "100M" + client-body-timeout: 120 + custom-http-errors: |- + 401,403,404,500,501,502,503 + enable-brotli: "true" + forwarded-for-header: "CF-Connecting-IP" + hsts-max-age: "31449600" + keep-alive: 120 + keep-alive-requests: 10000 + proxy-body-size: "100M" + ssl-protocols: "TLSv1.3 TLSv1.2" + use-forwarded-headers: "true" + metrics: + enabled: false + podAnnotations: + configmap.reloader.stakater.com/reload: "cloudflare-networks" + resources: + requests: + cpu: 10m + memory: 250Mi + limits: + memory: 500Mi + defaultBackend: + enabled: true + image: + repository: ghcr.io/tarampampam/error-pages + tag: 2.19.0 + replicaCount: 1 + extraEnvs: + - name: TEMPLATE_NAME + value: lost-in-space + - name: SHOW_DETAILS + value: "false" + valuesFrom: + # Cloudflare Networks + # https://www.cloudflare.com/ips/ + - targetPath: controller.config.proxy-real-ip-cidr + kind: ConfigMap + name: cloudflare-networks + valuesKey: cloudflare-networks.txt diff --git a/kubernetes/apps/networking/ingress-nginx/app/kustomization.yaml b/kubernetes/apps/networking/ingress-nginx/app/kustomization.yaml new file mode 100644 index 000000000..bbb7ea265 --- /dev/null +++ b/kubernetes/apps/networking/ingress-nginx/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflare-networks + files: + - ./cloudflare-networks.txt +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/apps/networking/ingress-nginx/ks.yaml b/kubernetes/apps/networking/ingress-nginx/ks.yaml new file mode 100644 index 000000000..fc7e8d721 --- /dev/null +++ b/kubernetes/apps/networking/ingress-nginx/ks.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-ingress-nginx + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + dependsOn: + - name: cluster-apps-cert-manager + - name: cluster-apps-cert-manager-issuers + - name: cluster-apps-metallb + - name: cluster-apps-metallb-config + path: ./kubernetes/apps/networking/ingress-nginx/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: ingress-nginx + namespace: networking + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml b/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml new file mode 100644 index 000000000..a0116e9b5 --- /dev/null +++ b/kubernetes/apps/networking/k8s-gateway/app/helmrelease.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: k8s-gateway + namespace: networking +spec: + interval: 15m + chart: + spec: + chart: k8s-gateway + version: 2.0.0 + sourceRef: + kind: HelmRepository + name: k8s-gateway + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: + retries: 5 + upgrade: + remediation: + retries: 5 + dependsOn: + - name: metallb + values: + fullnameOverride: k8s-gateway + domain: "${SECRET_DOMAIN}" + ttl: 1 + service: + type: LoadBalancer + port: 53 + annotations: + metallb.universe.tf/loadBalancerIPs: "${METALLB_K8S_GATEWAY_ADDR}" + externalTrafficPolicy: Local diff --git a/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml b/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml new file mode 100644 index 000000000..c83d92a87 --- /dev/null +++ b/kubernetes/apps/networking/k8s-gateway/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/k8s-gateway/ks.yaml b/kubernetes/apps/networking/k8s-gateway/ks.yaml new file mode 100644 index 000000000..dfecac51e --- /dev/null +++ b/kubernetes/apps/networking/k8s-gateway/ks.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-k8s-gateway + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + dependsOn: + - name: cluster-apps-metallb + - name: cluster-apps-metallb-config + path: ./kubernetes/apps/networking/k8s-gateway/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: k8s-gateway + namespace: networking + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/networking/kustomization.yaml b/kubernetes/apps/networking/kustomization.yaml new file mode 100644 index 000000000..4a86c4fdc --- /dev/null +++ b/kubernetes/apps/networking/kustomization.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cloudflare-ddns/ks.yaml + - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml + - ./k8s-gateway/ks.yaml + - ./metallb/ks.yaml diff --git a/kubernetes/apps/networking/metallb/app/helmrelease.yaml b/kubernetes/apps/networking/metallb/app/helmrelease.yaml new file mode 100644 index 000000000..2db0214ef --- /dev/null +++ b/kubernetes/apps/networking/metallb/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: metallb + namespace: networking +spec: + interval: 15m + chart: + spec: + chart: metallb + version: 0.13.7 + sourceRef: + kind: HelmRepository + name: metallb + namespace: flux-system + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + remediation: + retries: 3 + values: + crds: + enabled: true + psp: + create: false diff --git a/kubernetes/apps/networking/metallb/app/kustomization.yaml b/kubernetes/apps/networking/metallb/app/kustomization.yaml new file mode 100644 index 000000000..c83d92a87 --- /dev/null +++ b/kubernetes/apps/networking/metallb/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/networking/metallb/config/kustomization.yaml b/kubernetes/apps/networking/metallb/config/kustomization.yaml new file mode 100644 index 000000000..b739ea95a --- /dev/null +++ b/kubernetes/apps/networking/metallb/config/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: networking +resources: + - resources.yaml diff --git a/kubernetes/apps/networking/metallb/config/resources.yaml b/kubernetes/apps/networking/metallb/config/resources.yaml new file mode 100644 index 000000000..7c646df4e --- /dev/null +++ b/kubernetes/apps/networking/metallb/config/resources.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: l2-ip + namespace: networking +spec: + ipAddressPools: + - default-pool +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: default-pool + namespace: networking +spec: + addresses: + - "${METALLB_LB_RANGE}" diff --git a/kubernetes/apps/networking/metallb/ks.yaml b/kubernetes/apps/networking/metallb/ks.yaml new file mode 100644 index 000000000..53f482f4b --- /dev/null +++ b/kubernetes/apps/networking/metallb/ks.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-metallb + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/networking/metallb/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + healthChecks: + - apiVersion: helm.toolkit.fluxcd.io/v2beta1 + kind: HelmRelease + name: metallb + namespace: networking + interval: 30m + retryInterval: 1m + timeout: 3m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-metallb-config + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + dependsOn: + - name: cluster-apps-metallb + path: ./kubernetes/apps/networking/metallb/config + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 3m diff --git a/kubernetes/apps/networking/namespace.yaml b/kubernetes/apps/networking/namespace.yaml new file mode 100644 index 000000000..b9e4a4161 --- /dev/null +++ b/kubernetes/apps/networking/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: networking + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/system-upgrade/kustomization.yaml b/kubernetes/apps/system-upgrade/kustomization.yaml new file mode 100644 index 000000000..dbe5bdb5a --- /dev/null +++ b/kubernetes/apps/system-upgrade/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + # - ./system-upgrade-controller/ks.yaml diff --git a/kubernetes/apps/system-upgrade/namespace.yaml b/kubernetes/apps/system-upgrade/namespace.yaml new file mode 100644 index 000000000..5ea024dde --- /dev/null +++ b/kubernetes/apps/system-upgrade/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml b/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml new file mode 100644 index 000000000..0e540a25a --- /dev/null +++ b/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # renovate: datasource=docker image=rancher/system-upgrade-controller + - https://github.com/rancher/system-upgrade-controller/releases/download/v0.10.0/crd.yaml + - https://github.com/rancher/system-upgrade-controller?ref=v0.10.0 +images: + - name: rancher/system-upgrade-controller + newTag: v0.10.0 +commonLabels: + app.kubernetes.io/name: system-upgrade-controller + app.kubernetes.io/instance: system-upgrade-controller +patchesStrategicMerge: + - | + $patch: delete + apiVersion: v1 + kind: Namespace + metadata: + name: system-upgrade diff --git a/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml b/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml new file mode 100644 index 000000000..6d10ca90e --- /dev/null +++ b/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-system-upgrade-controller + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 3m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps-system-upgrade-controller-plans + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + dependsOn: + - name: cluster-apps-system-upgrade-controller + path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/plans + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml new file mode 100644 index 000000000..92444d7cc --- /dev/null +++ b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: agent + namespace: system-upgrade +spec: + # renovate: datasource=github-releases depName=k3s-io/k3s + version: "v1.24.8+k3s1" + serviceAccountName: system-upgrade + concurrency: 1 + nodeSelector: + matchExpressions: + - {key: node-role.kubernetes.io/control-plane, operator: DoesNotExist} + prepare: + image: rancher/k3s-upgrade + args: ["prepare", "server"] + upgrade: + image: rancher/k3s-upgrade diff --git a/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml new file mode 100644 index 000000000..2a658c35b --- /dev/null +++ b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./server.yaml + - ./agent.yaml diff --git a/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/server.yaml b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/server.yaml new file mode 100644 index 000000000..5be645bf2 --- /dev/null +++ b/kubernetes/apps/system-upgrade/system-upgrade-controller/plans/server.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: server + namespace: system-upgrade +spec: + # renovate: datasource=github-releases depName=k3s-io/k3s + version: "v1.24.8+k3s1" + serviceAccountName: system-upgrade + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - {key: node-role.kubernetes.io/control-plane, operator: Exists} + upgrade: + image: rancher/k3s-upgrade diff --git a/kubernetes/bootstrap/kustomization.yaml b/kubernetes/bootstrap/kustomization.yaml new file mode 100644 index 000000000..f83faf11d --- /dev/null +++ b/kubernetes/bootstrap/kustomization.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v0.38.2 +patches: + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + version: v1 + kind: NetworkPolicy + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=20 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: "(kustomize-controller|helm-controller|source-controller)" diff --git a/kubernetes/flux/apps.yaml b/kubernetes/flux/apps.yaml new file mode 100644 index 000000000..6d1491546 --- /dev/null +++ b/kubernetes/flux/apps.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + target: + labelSelector: substitution.flux.home.arpa/enabled=true diff --git a/kubernetes/flux/config/flux.yaml b/kubernetes/flux/config/flux.yaml new file mode 100644 index 000000000..26ffa5c11 --- /dev/null +++ b/kubernetes/flux/config/flux.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v0.38.2 +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + version: v1 + kind: NetworkPolicy + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=20 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: "(kustomize-controller|helm-controller|source-controller)" diff --git a/kubernetes/flux/config/kustomization.yaml b/kubernetes/flux/config/kustomization.yaml new file mode 100644 index 000000000..ef231746a --- /dev/null +++ b/kubernetes/flux/config/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/kubernetes/flux/repositories/git/.gitkeep b/kubernetes/flux/repositories/git/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/kubernetes/flux/repositories/helm/bitnami.yaml b/kubernetes/flux/repositories/helm/bitnami.yaml new file mode 100644 index 000000000..deac3b981 --- /dev/null +++ b/kubernetes/flux/repositories/helm/bitnami.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bitnami + namespace: flux-system +spec: + interval: 1h + url: https://charts.bitnami.com/bitnami diff --git a/kubernetes/flux/repositories/helm/bjw-s.yaml b/kubernetes/flux/repositories/helm/bjw-s.yaml new file mode 100644 index 000000000..c9e6c2d48 --- /dev/null +++ b/kubernetes/flux/repositories/helm/bjw-s.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + interval: 1h + url: https://bjw-s.github.io/helm-charts/ diff --git a/kubernetes/flux/repositories/helm/external-dns.yaml b/kubernetes/flux/repositories/helm/external-dns.yaml new file mode 100644 index 000000000..b76b9662c --- /dev/null +++ b/kubernetes/flux/repositories/helm/external-dns.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/external-dns diff --git a/kubernetes/flux/repositories/helm/hajimari.yaml b/kubernetes/flux/repositories/helm/hajimari.yaml new file mode 100644 index 000000000..e246f09be --- /dev/null +++ b/kubernetes/flux/repositories/helm/hajimari.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: hajimari + namespace: flux-system +spec: + interval: 1h + url: https://hajimari.io diff --git a/kubernetes/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/flux/repositories/helm/ingress-nginx.yaml new file mode 100644 index 000000000..4dcf5eeac --- /dev/null +++ b/kubernetes/flux/repositories/helm/ingress-nginx.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes.github.io/ingress-nginx diff --git a/kubernetes/flux/repositories/helm/jetstack.yaml b/kubernetes/flux/repositories/helm/jetstack.yaml new file mode 100644 index 000000000..d7e64ffc7 --- /dev/null +++ b/kubernetes/flux/repositories/helm/jetstack.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 1h + url: https://charts.jetstack.io/ diff --git a/kubernetes/flux/repositories/helm/k8s-gateway.yaml b/kubernetes/flux/repositories/helm/k8s-gateway.yaml new file mode 100644 index 000000000..a18177eb4 --- /dev/null +++ b/kubernetes/flux/repositories/helm/k8s-gateway.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: k8s-gateway + namespace: flux-system +spec: + interval: 1h + url: https://ori-edge.github.io/k8s_gateway/ diff --git a/kubernetes/flux/repositories/helm/kustomization.yaml b/kubernetes/flux/repositories/helm/kustomization.yaml new file mode 100644 index 000000000..7bd24cd8f --- /dev/null +++ b/kubernetes/flux/repositories/helm/kustomization.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./bitnami.yaml + - ./bjw-s.yaml + - ./external-dns.yaml + - ./hajimari.yaml + - ./ingress-nginx.yaml + - ./jetstack.yaml + - ./k8s-gateway.yaml + - ./metallb.yaml + - ./metrics-server.yaml + - ./stakater.yaml + - ./weave-gitops.yaml diff --git a/kubernetes/flux/repositories/helm/metallb.yaml b/kubernetes/flux/repositories/helm/metallb.yaml new file mode 100644 index 000000000..61bf8079c --- /dev/null +++ b/kubernetes/flux/repositories/helm/metallb.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: metallb + namespace: flux-system +spec: + interval: 1h + url: https://metallb.github.io/metallb diff --git a/kubernetes/flux/repositories/helm/metrics-server.yaml b/kubernetes/flux/repositories/helm/metrics-server.yaml new file mode 100644 index 000000000..57e7aa0c5 --- /dev/null +++ b/kubernetes/flux/repositories/helm/metrics-server.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/kubernetes/flux/repositories/helm/stakater.yaml b/kubernetes/flux/repositories/helm/stakater.yaml new file mode 100644 index 000000000..1846e8ae4 --- /dev/null +++ b/kubernetes/flux/repositories/helm/stakater.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + interval: 1h + url: https://stakater.github.io/stakater-charts diff --git a/kubernetes/flux/repositories/helm/weave-gitops.yaml b/kubernetes/flux/repositories/helm/weave-gitops.yaml new file mode 100644 index 000000000..81d4cea20 --- /dev/null +++ b/kubernetes/flux/repositories/helm/weave-gitops.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: weave-gitops + namespace: flux-system +spec: + interval: 1h + url: https://helm.gitops.weave.works diff --git a/kubernetes/flux/repositories/kustomization.yaml b/kubernetes/flux/repositories/kustomization.yaml new file mode 100644 index 000000000..4ff69becd --- /dev/null +++ b/kubernetes/flux/repositories/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # - ./git + - ./helm + # - ./oci diff --git a/kubernetes/flux/repositories/oci/.gitkeep b/kubernetes/flux/repositories/oci/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/kubernetes/flux/vars/kustomization.yaml b/kubernetes/flux/vars/kustomization.yaml new file mode 100644 index 000000000..8db2fe911 --- /dev/null +++ b/kubernetes/flux/vars/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cluster-settings.yaml + - ./cluster-secrets.sops.yaml diff --git a/terraform/cloudflare/main.tf b/terraform/cloudflare/main.tf new file mode 100644 index 000000000..7b684737a --- /dev/null +++ b/terraform/cloudflare/main.tf @@ -0,0 +1,93 @@ +terraform { + + required_providers { + cloudflare = { + source = "cloudflare/cloudflare" + version = "3.30.0" + } + http = { + source = "hashicorp/http" + version = "3.2.1" + } + sops = { + source = "carlpett/sops" + version = "0.7.1" + } + } +} + +data "sops_file" "cloudflare_secrets" { + source_file = "secret.sops.yaml" +} + +provider "cloudflare" { + email = data.sops_file.cloudflare_secrets.data["cloudflare_email"] + api_key = data.sops_file.cloudflare_secrets.data["cloudflare_apikey"] +} + +data "cloudflare_zones" "domain" { + filter { + name = data.sops_file.cloudflare_secrets.data["cloudflare_domain"] + } +} + +resource "cloudflare_zone_settings_override" "cloudflare_settings" { + zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") + settings { + ssl = "strict" + always_use_https = "on" + min_tls_version = "1.2" + opportunistic_encryption = "on" + tls_1_3 = "zrt" + automatic_https_rewrites = "on" + universal_ssl = "on" + browser_check = "on" + challenge_ttl = 1800 + privacy_pass = "on" + security_level = "medium" + brotli = "on" + minify { + css = "on" + js = "on" + html = "on" + } + rocket_loader = "on" + always_online = "off" + development_mode = "off" + http3 = "on" + zero_rtt = "on" + ipv6 = "on" + websockets = "on" + opportunistic_onion = "on" + pseudo_ipv4 = "off" + ip_geolocation = "on" + email_obfuscation = "on" + server_side_exclude = "on" + hotlink_protection = "off" + security_header { + enabled = false + } + } +} + +data "http" "ipv4" { + url = "http://ipv4.icanhazip.com" +} + +resource "cloudflare_record" "ipv4" { + name = "ipv4" + zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") + value = chomp(data.http.ipv4.response_body) + proxied = true + type = "A" + ttl = 1 +} + +resource "cloudflare_record" "root" { + name = data.sops_file.cloudflare_secrets.data["cloudflare_domain"] + zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") + value = "ipv4.${data.sops_file.cloudflare_secrets.data["cloudflare_domain"]}" + proxied = true + type = "CNAME" + ttl = 1 +} diff --git a/tmpl/.sops.yaml b/tmpl/.sops.yaml new file mode 100644 index 000000000..727731e39 --- /dev/null +++ b/tmpl/.sops.yaml @@ -0,0 +1,15 @@ +--- +creation_rules: + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" + key_groups: + - age: + - ${BOOTSTRAP_AGE_PUBLIC_KEY} + - path_regex: ansible/.*\.sops\.ya?ml + key_groups: + - age: + - ${BOOTSTRAP_AGE_PUBLIC_KEY} + - path_regex: terraform/.*\.sops\.ya?ml + key_groups: + - age: + - ${BOOTSTRAP_AGE_PUBLIC_KEY} diff --git a/tmpl/ansible/kube-vip.yml b/tmpl/ansible/kube-vip.yml new file mode 100644 index 000000000..0720e05e1 --- /dev/null +++ b/tmpl/ansible/kube-vip.yml @@ -0,0 +1,3 @@ +--- +# (string) The ARP address kube-vip broadcasts +kubevip_address: "${BOOTSTRAP_KUBE_VIP_ADDR}" diff --git a/tmpl/kubernetes/cert-manager-secret.sops.yaml b/tmpl/kubernetes/cert-manager-secret.sops.yaml new file mode 100644 index 000000000..490000275 --- /dev/null +++ b/tmpl/kubernetes/cert-manager-secret.sops.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-manager-secret + namespace: cert-manager +stringData: + api-key: "${BOOTSTRAP_CLOUDFLARE_APIKEY}" diff --git a/tmpl/kubernetes/cloudflare-ddns-secret.sops.yaml b/tmpl/kubernetes/cloudflare-ddns-secret.sops.yaml new file mode 100644 index 000000000..d9af99bff --- /dev/null +++ b/tmpl/kubernetes/cloudflare-ddns-secret.sops.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflare-ddns-secret + namespace: networking +type: Opaque +stringData: + CLOUDFLARE_EMAIL: "${BOOTSTRAP_CLOUDFLARE_EMAIL}" + CLOUDFLARE_APIKEY: "${BOOTSTRAP_CLOUDFLARE_APIKEY}" + CLOUDFLARE_RECORD_NAME: "ipv4.${BOOTSTRAP_CLOUDFLARE_DOMAIN}" diff --git a/tmpl/kubernetes/cluster-secrets.sops.yaml b/tmpl/kubernetes/cluster-secrets.sops.yaml new file mode 100644 index 000000000..22cc746dd --- /dev/null +++ b/tmpl/kubernetes/cluster-secrets.sops.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + SECRET_DOMAIN: "${BOOTSTRAP_CLOUDFLARE_DOMAIN}" + SECRET_CLOUDFLARE_EMAIL: "${BOOTSTRAP_CLOUDFLARE_EMAIL}" diff --git a/tmpl/kubernetes/external-dns-secret.sops.yaml b/tmpl/kubernetes/external-dns-secret.sops.yaml new file mode 100644 index 000000000..5047e45c7 --- /dev/null +++ b/tmpl/kubernetes/external-dns-secret.sops.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: external-dns-secret + namespace: networking +stringData: + email: "${BOOTSTRAP_CLOUDFLARE_EMAIL}" + api-key: "${BOOTSTRAP_CLOUDFLARE_APIKEY}" diff --git a/tmpl/kubernetes/flux/cluster-settings.yaml b/tmpl/kubernetes/flux/cluster-settings.yaml new file mode 100644 index 000000000..5e126097b --- /dev/null +++ b/tmpl/kubernetes/flux/cluster-settings.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-settings + namespace: flux-system +data: + TIMEZONE: "${BOOTSTRAP_TIMEZONE}" + KUBE_VIP_ADDR: "${BOOTSTRAP_KUBE_VIP_ADDR}" + METALLB_LB_RANGE: "${BOOTSTRAP_METALLB_LB_RANGE}" + METALLB_K8S_GATEWAY_ADDR: "${BOOTSTRAP_METALLB_K8S_GATEWAY_ADDR}" + METALLB_INGRESS_ADDR: "${BOOTSTRAP_METALLB_INGRESS_ADDR}" diff --git a/tmpl/kubernetes/flux/cluster.yaml b/tmpl/kubernetes/flux/cluster.yaml new file mode 100644 index 000000000..bfe7a5dea --- /dev/null +++ b/tmpl/kubernetes/flux/cluster.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + ref: + branch: main + url: "${BOOTSTRAP_GIT_REPOSITORY}" + ignore: | + # exclude all + /* + # include kubernetes directory + !/kubernetes +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets diff --git a/tmpl/kubernetes/github-webhook-token-secret.sops.yaml b/tmpl/kubernetes/github-webhook-token-secret.sops.yaml new file mode 100644 index 000000000..58b3f9160 --- /dev/null +++ b/tmpl/kubernetes/github-webhook-token-secret.sops.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-webhook-token-secret + namespace: flux-system +stringData: + token: "${BOOTSTRAP_FLUX_GITHUB_WEBHOOK_SECRET}" diff --git a/tmpl/kubernetes/weave-gitops-secret.sops.yaml b/tmpl/kubernetes/weave-gitops-secret.sops.yaml new file mode 100644 index 000000000..bb3d837fb --- /dev/null +++ b/tmpl/kubernetes/weave-gitops-secret.sops.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: weave-gitops-secret + namespace: flux-system +type: Opaque +stringData: + adminPassword: "${BOOTSTRAP_WEAVE_GITOPS_ADMIN_PASSWORD}" diff --git a/tmpl/terraform/secret.sops.yaml b/tmpl/terraform/secret.sops.yaml new file mode 100644 index 000000000..117a997ed --- /dev/null +++ b/tmpl/terraform/secret.sops.yaml @@ -0,0 +1,3 @@ +cloudflare_email: "${BOOTSTRAP_CLOUDFLARE_EMAIL}" +cloudflare_apikey: "${BOOTSTRAP_CLOUDFLARE_APIKEY}" +cloudflare_domain: "${BOOTSTRAP_CLOUDFLARE_DOMAIN}"