From 645ed81c88bfed703ea4b12d7a5cf192ede28051 Mon Sep 17 00:00:00 2001 From: Joseph Hanson Date: Thu, 11 Jan 2024 15:03:54 -0600 Subject: [PATCH] Homelab --- .ansible-lint | 9 + .editorconfig | 23 ++ .envrc | 4 + .gitattributes | 4 + .gitignore | 13 + .markdownlint.yaml | 23 ++ .pre-commit-config.yaml | 53 ++++ .renovate/customManagers.json5 | 37 +++ .sops.yaml | 15 ++ .task/checksum/ansible--venv | 1 + .taskfiles/Ansible/Taskfile.yaml | 52 ++++ .taskfiles/PreCommit/Tasks.yaml | 16 ++ .taskfiles/VolSync/ListJob.tmpl.yaml | 19 ++ .../VolSync/ReplicationDestination.tmpl.yaml | 25 ++ .taskfiles/VolSync/Tasks.yaml | 158 ++++++++++++ .taskfiles/VolSync/UnockJob.tmpl.yaml | 19 ++ .taskfiles/VolSync/WipeJob.tmpl.yaml | 25 ++ .taskfiles/VolSync/wait-for-job.sh | 14 ++ .taskfiles/_scripts/wait-for-k8s-job.sh | 14 ++ .taskfiles/flux/Taskfile.yaml | 47 ++++ .taskfiles/k8s/Taskfile.yaml | 12 + .taskfiles/rook/Taskfile.yaml | 104 ++++++++ .taskfiles/rook/WipeDiskJob.tmpl.yaml | 26 ++ .taskfiles/rook/WipeRookDataJob.tmpl.yaml | 29 +++ .taskfiles/rook/pod.yaml | 19 ++ .vscode/extensions.json | 10 + .vscode/settings.json | 46 ++++ .yamllint.yaml | 29 +++ README.md | 1 + Taskfile.yaml | 158 ++++++++++++ ansible/cilium-install.sh | 10 + ansible/main/.envrc | 8 + .../main/inventory/group_vars/all/main.yaml | 28 +++ .../group_vars/all/supplemental.yaml | 3 + .../inventory/group_vars/master/main.yaml | 25 ++ .../inventory/group_vars/worker/main.yaml | 5 + ansible/main/inventory/hosts.yaml | 18 ++ ansible/main/playbooks/cluster-add-user.yaml | 44 ++++ .../main/playbooks/cluster-ceph-reset.yaml | 40 +++ .../main/playbooks/cluster-installation.yaml | 95 ++++++++ ansible/main/playbooks/cluster-nuke.yaml | 61 +++++ ansible/main/playbooks/cluster-prepare.yaml | 130 ++++++++++ .../playbooks/cluster-rollout-update.yaml | 71 ++++++ ansible/main/playbooks/files/.vimrc | 2 + ansible/main/playbooks/files/config.toml.tmpl | 25 ++ .../playbooks/files/stale-containers.service | 6 + .../playbooks/files/stale-containers.timer | 11 + ansible/main/playbooks/tasks/cilium.yaml | 56 +++++ ansible/main/playbooks/tasks/coredns.yaml | 56 +++++ ansible/main/playbooks/tasks/cruft.yaml | 32 +++ .../main/playbooks/tasks/helm_controller.yaml | 16 ++ .../playbooks/tasks/stale_containers.yaml | 36 +++ .../templates/custom-cilium-helmchart.yaml.j2 | 46 ++++ .../custom-coredns-helmchart.yaml.j2 | 77 ++++++ ansible/requirements.txt | 8 + ansible/requirements.yaml | 18 ++ .../cert-manager/app/helmrelease.yaml | 47 ++++ .../cert-manager/app/kustomization.yaml | 7 + .../issuers/cloudflare/externalsecret.yaml | 19 ++ .../cloudflare/issuer-letsencrypt-prod.yaml | 22 ++ .../issuer-letsencrypt-staging.yaml | 22 ++ .../dnsimple/dnsimple-issuer-rbac.yaml | 22 ++ .../issuers/dnsimple/externalsecret.yaml | 23 ++ .../issuers/dnsimple/helmrelease.yaml | 36 +++ .../dnsimple/issuer-letsencrypt-prod.yaml | 22 ++ .../dnsimple/issuer-letsencrypt-staging.yaml | 21 ++ .../cert-manager/issuers/kustomization.yaml | 14 ++ .../apps/cert-manager/cert-manager/ks.yaml | 32 +++ .../apps/cert-manager/kustomization.yaml | 9 + kubernetes/apps/cert-manager/namespace.yaml | 7 + .../default/jellyfin/app/helmrelease.yaml | 117 +++++++++ .../default/jellyfin/app/kustomization.yaml | 8 + kubernetes/apps/default/jellyfin/app/pvc.yaml | 15 ++ kubernetes/apps/default/jellyfin/ks.yaml | 19 ++ kubernetes/apps/default/kustomization.yaml | 7 + kubernetes/apps/default/namespace.yaml | 7 + kubernetes/apps/default/rocky-nessa.yaml | 20 ++ kubernetes/apps/default/rocky-nienna.yaml | 20 ++ kubernetes/apps/default/ubuntu.yaml | 19 ++ kubernetes/apps/flux-system/add-ons/ks.yaml | 34 +++ .../add-ons/monitoring/kustomization.yaml | 8 + .../add-ons/monitoring/podmonitor.yaml | 32 +++ .../add-ons/monitoring/prometheusrule.yaml | 32 +++ .../add-ons/webhooks/git/externalsecret.yaml | 19 ++ .../add-ons/webhooks/git/ingress.yaml | 24 ++ .../add-ons/webhooks/git/kustomization.yaml | 8 + .../add-ons/webhooks/git/receiver.yaml | 29 +++ .../add-ons/webhooks/kustomization.yaml | 6 + .../apps/flux-system/kustomization.yaml | 9 + kubernetes/apps/flux-system/namespace.yaml | 7 + .../kube-system/cilium/app/helmrelease.yaml | 75 ++++++ .../cilium/app/netpols/allow-same-ns.yaml | 9 + .../cilium/app/netpols/allow-ssh.yaml | 23 ++ .../cilium/app/netpols/apiserver.yaml | 27 +++ .../cilium/app/netpols/cilium-health.yaml | 41 ++++ .../cilium/app/netpols/cilium-vxlan.yaml | 26 ++ .../cilium/app/netpols/core-dns.yaml | 65 +++++ .../kube-system/cilium/app/netpols/etcd.yaml | 27 +++ .../cilium/app/netpols/fix-apiserver.yml | 15 ++ .../cilium/app/netpols/hubble-relay.yaml | 50 ++++ .../cilium/app/netpols/hubble-ui.yaml | 75 ++++++ .../cilium/app/netpols/kubelet.yaml | 28 +++ .../cilium/app/netpols/kustomization.yaml | 16 ++ kubernetes/apps/kube-system/cilium/ks.yaml | 17 ++ .../apps/kube-system/kustomization.yaml | 11 + .../metrics-server/app/helmrelease.yaml | 26 ++ .../metrics-server/app/kustomization.yaml | 7 + .../apps/kube-system/metrics-server/ks.yaml | 17 ++ kubernetes/apps/kube-system/namespace.yaml | 7 + kubernetes/apps/kyverno/kustomization.yaml | 9 + .../apps/kyverno/kyverno/app/helmrelease.yaml | 80 ++++++ .../kyverno/kyverno/app/kustomization.yaml | 7 + kubernetes/apps/kyverno/kyverno/ks.yaml | 36 +++ .../kyverno/policies/kustomization.yaml | 6 + .../kyverno/policies/remove-cpu-limits.yaml | 44 ++++ kubernetes/apps/kyverno/namespace.yaml | 7 + .../network/echo-server/app/helmrelease.yaml | 61 +++++ .../echo-server/app/kustomization.yaml | 10 + kubernetes/apps/network/echo-server/ks.yaml | 17 ++ .../app/hsn-dev/externalsecret.yaml | 19 ++ .../external-dns/app/hsn-dev/helmrelease.yaml | 69 ++++++ .../app/hsn-dev/kustomization.yaml | 8 + .../app/shared/dns_endpoint-crd.yaml | 93 +++++++ .../app/shared/kustomization.yaml | 7 + .../app/valinor-social/externalsecret.yaml | 19 ++ .../app/valinor-social/helmrelease.yaml | 70 ++++++ .../app/valinor-social/kustomization.yaml | 8 + kubernetes/apps/network/external-dns/ks.yaml | 57 +++++ .../ingress-nginx/app/certificate.yaml | 16 ++ .../ingress-nginx/app/externalsecret.yaml | 19 ++ .../ingress-nginx/app/helmrelease.yaml | 99 ++++++++ .../ingress-nginx/app/kustomization.yaml | 9 + kubernetes/apps/network/ingress-nginx/ks.yaml | 38 +++ .../ingress-nginx/mastodon/certificate.yaml | 16 ++ .../ingress-nginx/peertube/certificate.yaml | 16 ++ .../ingress-nginx/peertube/helmrelease.yaml | 108 +++++++++ .../ingress-nginx/peertube/kustomization.yaml | 8 + kubernetes/apps/network/kustomization.yaml | 11 + kubernetes/apps/network/namespace.yaml | 7 + .../external-secrets/app/helmrelease.yaml | 25 ++ .../external-secrets/app/kustomization.yaml | 7 + .../cluster-secrets/kustomization.yaml | 6 + .../cluster-secrets/pgo-s3-creds.yaml | 41 ++++ .../apps/security/external-secrets/ks.yaml | 50 ++++ .../stores/kustomization.yaml | 6 + .../onepassword/clustersecretstore.yaml | 19 ++ .../stores/onepassword/helmrelease.yaml | 142 +++++++++++ .../stores/onepassword/kustomization.yaml | 15 ++ .../stores/onepassword/secret.sops.yaml | 55 +++++ kubernetes/apps/security/kustomization.yaml | 9 + kubernetes/apps/security/namespace.yaml | 8 + kubernetes/apps/system/kustomization.yaml | 12 + kubernetes/apps/system/namespace.yaml | 7 + .../app/helmrelease.yaml | 37 +++ .../app/kustomization.yaml | 6 + .../system/node-feature-discovery/ks.yaml | 19 ++ .../apps/system/reloader/app/helmrelease.yaml | 30 +++ .../system/reloader/app/kustomization.yaml | 7 + kubernetes/apps/system/reloader/ks.yaml | 17 ++ .../snapshot-controller/app/helmrelease.yaml | 34 +++ .../app/kustomization.yaml | 6 + .../system/snapshot-controller/app/pki.yaml | 17 ++ .../apps/system/snapshot-controller/ks.yaml | 22 ++ kubernetes/bootstrap/flux/age-key.sops.yaml | 28 +++ .../bootstrap/flux/git-deploy-key.sops.yaml | 31 +++ kubernetes/bootstrap/flux/kustomization.yaml | 17 ++ kubernetes/bootstrap/hcloud.sops.yaml | 30 +++ kubernetes/bootstrap/readme.md | 28 +++ kubernetes/flux/cluster-apps.yaml | 45 ++++ kubernetes/flux/config/cluster.yaml | 46 ++++ kubernetes/flux/config/flux.yaml | 125 ++++++++++ kubernetes/flux/config/kustomization.yaml | 7 + .../flux/repositories/helm/authentik.yaml | 10 + .../flux/repositories/helm/bitnami.yaml | 11 + kubernetes/flux/repositories/helm/bjw-s.yaml | 11 + kubernetes/flux/repositories/helm/cilium.yaml | 11 + .../repositories/helm/cloudnative-pg.yaml | 10 + .../flux/repositories/helm/crowdsec.yaml | 11 + .../flux/repositories/helm/crunchydata.yaml | 12 + .../repositories/helm/democratic-csi.yaml | 11 + .../flux/repositories/helm/dragonflydb.yaml | 11 + .../flux/repositories/helm/elastic.yaml | 11 + .../repositories/helm/external-secrets.yaml | 10 + .../flux/repositories/helm/fairwinds.yaml | 11 + .../flux/repositories/helm/grafana.yaml | 11 + .../flux/repositories/helm/hetzner.yaml | 11 + .../flux/repositories/helm/ingress-nginx.yaml | 11 + kubernetes/flux/repositories/helm/intel.yaml | 11 + .../flux/repositories/helm/jahanson.yaml | 11 + .../flux/repositories/helm/jetstack.yaml | 11 + .../helm/kubernetes-sigs-external-dns.yaml | 11 + .../helm/kubernetes-sigs-metrics-server.yaml | 11 + .../helm/kubernetes-sigs-nfd.yaml | 11 + .../flux/repositories/helm/kustomization.yaml | 34 +++ .../flux/repositories/helm/kyverno.yaml | 11 + .../flux/repositories/helm/piraeus.yaml | 11 + .../flux/repositories/helm/postfinance.yaml | 11 + .../helm/prometheus-community.yaml | 11 + .../flux/repositories/helm/rook-ceph.yaml | 11 + .../flux/repositories/helm/stakater.yaml | 11 + .../flux/repositories/kustomization.yaml | 7 + .../flux/vars/cluster-secrets.sops.yaml | 31 +++ kubernetes/flux/vars/cluster-settings.yaml | 8 + kubernetes/tools/kbench.yaml | 48 ++++ kubernetes/tools/wipe-rook-fast.yaml | 108 +++++++++ kubernetes/tools/wipe-rook-slow.yaml | 105 ++++++++ renovate.json5 | 229 ++++++++++++++++++ 207 files changed, 6030 insertions(+) create mode 100644 .ansible-lint create mode 100644 .editorconfig create mode 100644 .envrc create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 .markdownlint.yaml create mode 100644 .pre-commit-config.yaml create mode 100644 .renovate/customManagers.json5 create mode 100644 .sops.yaml create mode 100644 .task/checksum/ansible--venv create mode 100644 .taskfiles/Ansible/Taskfile.yaml create mode 100644 .taskfiles/PreCommit/Tasks.yaml create mode 100644 .taskfiles/VolSync/ListJob.tmpl.yaml create mode 100644 .taskfiles/VolSync/ReplicationDestination.tmpl.yaml create mode 100644 .taskfiles/VolSync/Tasks.yaml create mode 100644 .taskfiles/VolSync/UnockJob.tmpl.yaml create mode 100644 .taskfiles/VolSync/WipeJob.tmpl.yaml create mode 100644 .taskfiles/VolSync/wait-for-job.sh create mode 100644 .taskfiles/_scripts/wait-for-k8s-job.sh create mode 100644 .taskfiles/flux/Taskfile.yaml create mode 100644 .taskfiles/k8s/Taskfile.yaml create mode 100644 .taskfiles/rook/Taskfile.yaml create mode 100644 .taskfiles/rook/WipeDiskJob.tmpl.yaml create mode 100644 .taskfiles/rook/WipeRookDataJob.tmpl.yaml create mode 100644 .taskfiles/rook/pod.yaml create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json create mode 100644 .yamllint.yaml create mode 100644 README.md create mode 100644 Taskfile.yaml create mode 100644 ansible/cilium-install.sh create mode 100644 ansible/main/.envrc create mode 100644 ansible/main/inventory/group_vars/all/main.yaml create mode 100644 ansible/main/inventory/group_vars/all/supplemental.yaml create mode 100644 ansible/main/inventory/group_vars/master/main.yaml create mode 100644 ansible/main/inventory/group_vars/worker/main.yaml create mode 100644 ansible/main/inventory/hosts.yaml create mode 100644 ansible/main/playbooks/cluster-add-user.yaml create mode 100644 ansible/main/playbooks/cluster-ceph-reset.yaml create mode 100644 ansible/main/playbooks/cluster-installation.yaml create mode 100644 ansible/main/playbooks/cluster-nuke.yaml create mode 100644 ansible/main/playbooks/cluster-prepare.yaml create mode 100644 ansible/main/playbooks/cluster-rollout-update.yaml create mode 100644 ansible/main/playbooks/files/.vimrc create mode 100644 ansible/main/playbooks/files/config.toml.tmpl create mode 100644 ansible/main/playbooks/files/stale-containers.service create mode 100644 ansible/main/playbooks/files/stale-containers.timer create mode 100644 ansible/main/playbooks/tasks/cilium.yaml create mode 100644 ansible/main/playbooks/tasks/coredns.yaml create mode 100644 ansible/main/playbooks/tasks/cruft.yaml create mode 100644 ansible/main/playbooks/tasks/helm_controller.yaml create mode 100644 ansible/main/playbooks/tasks/stale_containers.yaml create mode 100644 ansible/main/playbooks/templates/custom-cilium-helmchart.yaml.j2 create mode 100644 ansible/main/playbooks/templates/custom-coredns-helmchart.yaml.j2 create mode 100644 ansible/requirements.txt create mode 100644 ansible/requirements.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/externalsecret.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-prod.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-staging.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/dnsimple-issuer-rbac.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/externalsecret.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/helmrelease.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-prod.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-staging.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml create mode 100644 kubernetes/apps/cert-manager/cert-manager/ks.yaml create mode 100644 kubernetes/apps/cert-manager/kustomization.yaml create mode 100644 kubernetes/apps/cert-manager/namespace.yaml create mode 100644 kubernetes/apps/default/jellyfin/app/helmrelease.yaml create mode 100644 kubernetes/apps/default/jellyfin/app/kustomization.yaml create mode 100644 kubernetes/apps/default/jellyfin/app/pvc.yaml create mode 100644 kubernetes/apps/default/jellyfin/ks.yaml create mode 100644 kubernetes/apps/default/kustomization.yaml create mode 100644 kubernetes/apps/default/namespace.yaml create mode 100644 kubernetes/apps/default/rocky-nessa.yaml create mode 100644 kubernetes/apps/default/rocky-nienna.yaml create mode 100644 kubernetes/apps/default/ubuntu.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/ks.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/monitoring/kustomization.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/monitoring/podmonitor.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/monitoring/prometheusrule.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/webhooks/git/externalsecret.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/webhooks/git/ingress.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/webhooks/git/kustomization.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/webhooks/git/receiver.yaml create mode 100644 kubernetes/apps/flux-system/add-ons/webhooks/kustomization.yaml create mode 100644 kubernetes/apps/flux-system/kustomization.yaml create mode 100644 kubernetes/apps/flux-system/namespace.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/helmrelease.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/allow-same-ns.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/allow-ssh.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/apiserver.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/cilium-health.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/cilium-vxlan.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/core-dns.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/etcd.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/fix-apiserver.yml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/hubble-relay.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/hubble-ui.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/kubelet.yaml create mode 100644 kubernetes/apps/kube-system/cilium/app/netpols/kustomization.yaml create mode 100644 kubernetes/apps/kube-system/cilium/ks.yaml create mode 100644 kubernetes/apps/kube-system/kustomization.yaml create mode 100644 kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml create mode 100644 kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml create mode 100644 kubernetes/apps/kube-system/metrics-server/ks.yaml create mode 100644 kubernetes/apps/kube-system/namespace.yaml create mode 100644 kubernetes/apps/kyverno/kustomization.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/app/kustomization.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/ks.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/policies/remove-cpu-limits.yaml create mode 100644 kubernetes/apps/kyverno/namespace.yaml create mode 100644 kubernetes/apps/network/echo-server/app/helmrelease.yaml create mode 100644 kubernetes/apps/network/echo-server/app/kustomization.yaml create mode 100644 kubernetes/apps/network/echo-server/ks.yaml create mode 100644 kubernetes/apps/network/external-dns/app/hsn-dev/externalsecret.yaml create mode 100644 kubernetes/apps/network/external-dns/app/hsn-dev/helmrelease.yaml create mode 100644 kubernetes/apps/network/external-dns/app/hsn-dev/kustomization.yaml create mode 100644 kubernetes/apps/network/external-dns/app/shared/dns_endpoint-crd.yaml create mode 100644 kubernetes/apps/network/external-dns/app/shared/kustomization.yaml create mode 100644 kubernetes/apps/network/external-dns/app/valinor-social/externalsecret.yaml create mode 100644 kubernetes/apps/network/external-dns/app/valinor-social/helmrelease.yaml create mode 100644 kubernetes/apps/network/external-dns/app/valinor-social/kustomization.yaml create mode 100644 kubernetes/apps/network/external-dns/ks.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/app/certificate.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/app/externalsecret.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/app/helmrelease.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/app/kustomization.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/ks.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/mastodon/certificate.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/peertube/certificate.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/peertube/helmrelease.yaml create mode 100644 kubernetes/apps/network/ingress-nginx/peertube/kustomization.yaml create mode 100644 kubernetes/apps/network/kustomization.yaml create mode 100644 kubernetes/apps/network/namespace.yaml create mode 100644 kubernetes/apps/security/external-secrets/app/helmrelease.yaml create mode 100644 kubernetes/apps/security/external-secrets/app/kustomization.yaml create mode 100644 kubernetes/apps/security/external-secrets/cluster-secrets/kustomization.yaml create mode 100644 kubernetes/apps/security/external-secrets/cluster-secrets/pgo-s3-creds.yaml create mode 100644 kubernetes/apps/security/external-secrets/ks.yaml create mode 100644 kubernetes/apps/security/external-secrets/stores/kustomization.yaml create mode 100644 kubernetes/apps/security/external-secrets/stores/onepassword/clustersecretstore.yaml create mode 100644 kubernetes/apps/security/external-secrets/stores/onepassword/helmrelease.yaml create mode 100644 kubernetes/apps/security/external-secrets/stores/onepassword/kustomization.yaml create mode 100644 kubernetes/apps/security/external-secrets/stores/onepassword/secret.sops.yaml create mode 100644 kubernetes/apps/security/kustomization.yaml create mode 100644 kubernetes/apps/security/namespace.yaml create mode 100644 kubernetes/apps/system/kustomization.yaml create mode 100644 kubernetes/apps/system/namespace.yaml create mode 100644 kubernetes/apps/system/node-feature-discovery/app/helmrelease.yaml create mode 100644 kubernetes/apps/system/node-feature-discovery/app/kustomization.yaml create mode 100644 kubernetes/apps/system/node-feature-discovery/ks.yaml create mode 100644 kubernetes/apps/system/reloader/app/helmrelease.yaml create mode 100644 kubernetes/apps/system/reloader/app/kustomization.yaml create mode 100644 kubernetes/apps/system/reloader/ks.yaml create mode 100644 kubernetes/apps/system/snapshot-controller/app/helmrelease.yaml create mode 100644 kubernetes/apps/system/snapshot-controller/app/kustomization.yaml create mode 100644 kubernetes/apps/system/snapshot-controller/app/pki.yaml create mode 100644 kubernetes/apps/system/snapshot-controller/ks.yaml create mode 100644 kubernetes/bootstrap/flux/age-key.sops.yaml create mode 100644 kubernetes/bootstrap/flux/git-deploy-key.sops.yaml create mode 100644 kubernetes/bootstrap/flux/kustomization.yaml create mode 100644 kubernetes/bootstrap/hcloud.sops.yaml create mode 100644 kubernetes/bootstrap/readme.md create mode 100644 kubernetes/flux/cluster-apps.yaml create mode 100644 kubernetes/flux/config/cluster.yaml create mode 100644 kubernetes/flux/config/flux.yaml create mode 100644 kubernetes/flux/config/kustomization.yaml create mode 100644 kubernetes/flux/repositories/helm/authentik.yaml create mode 100644 kubernetes/flux/repositories/helm/bitnami.yaml create mode 100644 kubernetes/flux/repositories/helm/bjw-s.yaml create mode 100644 kubernetes/flux/repositories/helm/cilium.yaml create mode 100644 kubernetes/flux/repositories/helm/cloudnative-pg.yaml create mode 100644 kubernetes/flux/repositories/helm/crowdsec.yaml create mode 100644 kubernetes/flux/repositories/helm/crunchydata.yaml create mode 100644 kubernetes/flux/repositories/helm/democratic-csi.yaml create mode 100644 kubernetes/flux/repositories/helm/dragonflydb.yaml create mode 100644 kubernetes/flux/repositories/helm/elastic.yaml create mode 100644 kubernetes/flux/repositories/helm/external-secrets.yaml create mode 100644 kubernetes/flux/repositories/helm/fairwinds.yaml create mode 100644 kubernetes/flux/repositories/helm/grafana.yaml create mode 100644 kubernetes/flux/repositories/helm/hetzner.yaml create mode 100644 kubernetes/flux/repositories/helm/ingress-nginx.yaml create mode 100644 kubernetes/flux/repositories/helm/intel.yaml create mode 100644 kubernetes/flux/repositories/helm/jahanson.yaml create mode 100644 kubernetes/flux/repositories/helm/jetstack.yaml create mode 100644 kubernetes/flux/repositories/helm/kubernetes-sigs-external-dns.yaml create mode 100644 kubernetes/flux/repositories/helm/kubernetes-sigs-metrics-server.yaml create mode 100644 kubernetes/flux/repositories/helm/kubernetes-sigs-nfd.yaml create mode 100644 kubernetes/flux/repositories/helm/kustomization.yaml create mode 100644 kubernetes/flux/repositories/helm/kyverno.yaml create mode 100644 kubernetes/flux/repositories/helm/piraeus.yaml create mode 100644 kubernetes/flux/repositories/helm/postfinance.yaml create mode 100644 kubernetes/flux/repositories/helm/prometheus-community.yaml create mode 100644 kubernetes/flux/repositories/helm/rook-ceph.yaml create mode 100644 kubernetes/flux/repositories/helm/stakater.yaml create mode 100644 kubernetes/flux/repositories/kustomization.yaml create mode 100644 kubernetes/flux/vars/cluster-secrets.sops.yaml create mode 100644 kubernetes/flux/vars/cluster-settings.yaml create mode 100644 kubernetes/tools/kbench.yaml create mode 100644 kubernetes/tools/wipe-rook-fast.yaml create mode 100644 kubernetes/tools/wipe-rook-slow.yaml create mode 100644 renovate.json5 diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 0000000..8f92c9a --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,9 @@ +--- +skip_list: + - yaml[line-length] + - var-naming +warn_list: + - command-instead-of-shell + - deprecated-command-syntax + - experimental + - no-changed-when diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..6e40cb6 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,23 @@ +; https://editorconfig.org/ + +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +[{Dockerfile,*.bash,*.sh}] +indent_style = space +indent_size = 4 diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..1ec308d --- /dev/null +++ b/.envrc @@ -0,0 +1,4 @@ +#shellcheck disable=SC2148,SC2155 +export KUBECONFIG="$(expand_path ./kubeconfig)" +export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" +export TALOSCONFIG="$(expand_path ./talos/clusterconfig/talosconfig)" \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..8d1628f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +* text=auto eol=lf +*.sops.* diff=sopsdiffer +*.sops.toml linguist-language=JSON +*.yaml.j2 linguist-language=YAML diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b5d766a --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +.DS_Store +Thumbs.db +.private/ +.venv/ +.terraform +*.tfvars +.decrypted~* +*.agekey +*.pub +*.key +*.pem +kubeconfig* +config.xml diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000..3443fa7 --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,23 @@ +--- +default: true + +# MD013/line-length - Line length +MD013: + # Number of characters + line_length: 240 + # Number of characters for headings + heading_line_length: 80 + # Number of characters for code blocks + code_block_line_length: 80 + # Include code blocks + code_blocks: true + # Include tables + tables: true + # Include headings + headings: true + # Include headings + headers: true + # Strict length checking + strict: false + # Stern length checking + stern: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..dd72080 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,53 @@ +--- +fail_fast: false + +exclude: | + (?x)^( + docs/_assets/.* + | .*\.sops\.toml + )$ + +repos: + - repo: https://github.com/adrienverge/yamllint + rev: v1.32.0 + hooks: + - id: yamllint + args: + - -c + - ".yamllint.yaml" + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: fix-byte-order-marker + - id: mixed-line-ending + - id: check-added-large-files + args: [--maxkb=2048] + - id: check-merge-conflict + - id: check-executables-have-shebangs + + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.4 + hooks: + - id: forbid-crlf + - id: forbid-tabs + + - repo: https://github.com/sirosen/fix-smartquotes + rev: 0.2.0 + hooks: + - id: fix-smartquotes + + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck + language: script + args: [--severity=error] + additional_dependencies: [] + + - repo: https://github.com/k8s-at-home/sops-pre-commit + rev: v2.1.1 + hooks: + - id: forbid-secrets diff --git a/.renovate/customManagers.json5 b/.renovate/customManagers.json5 new file mode 100644 index 0000000..1840864 --- /dev/null +++ b/.renovate/customManagers.json5 @@ -0,0 +1,37 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customDatasources": { + "grafana-dashboards": { + "defaultRegistryUrlTemplate": "https://grafana.com/api/dashboards/{{packageName}}", + "format": "json", + "transformTemplates": [ + "{\"releases\":[{\"version\": $string(revision)}]}" + ] + } + }, + "customManagers": [ + { + "customType": "regex", + "description": "Process Grafana dashboards", + "fileMatch": [ + "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?$" + ], + "matchStrings": [ + "depName=\"(?\\S+)\"\\n.*?gnetId: (?\\d+)\\n.*?revision: (?\\d+)" + ], + "datasourceTemplate": "custom.grafana-dashboards", + "versioningTemplate": "regex:^(?\\d+)$" + } + ], + "packageRules": [ + { + "addLabels": ["renovate/grafana-dashboard"], + "commitMessageExtra": "to revision {{newVersion}}", + "commitMessageTopic": "dashboard {{depName}}", + "matchDatasources": ["grafana-dashboards", "custom.grafana-dashboards"], + "matchUpdateTypes": ["major"], + "semanticCommitScope": "grafana-dashboards", + "semanticCommitType": "chore" + } + ] + } diff --git a/.sops.yaml b/.sops.yaml new file mode 100644 index 0000000..363adda --- /dev/null +++ b/.sops.yaml @@ -0,0 +1,15 @@ +--- +creation_rules: + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" + # Valinor + age: >- + age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + - path_regex: .*\.sops\.(env|ini|json|toml) + # Valinor + age: >- + age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + - path_regex: (ansible|terraform|talos)/.*\.sops\.ya?ml + # Valinor + age: >- + age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve diff --git a/.task/checksum/ansible--venv b/.task/checksum/ansible--venv new file mode 100644 index 0000000..aa1be3f --- /dev/null +++ b/.task/checksum/ansible--venv @@ -0,0 +1 @@ +7cc8dd1959207470e1da885dcb6fda02 diff --git a/.taskfiles/Ansible/Taskfile.yaml b/.taskfiles/Ansible/Taskfile.yaml new file mode 100644 index 0000000..ecacebf --- /dev/null +++ b/.taskfiles/Ansible/Taskfile.yaml @@ -0,0 +1,52 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + PYTHON_BIN: python3 + +env: + PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH" + VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" + ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy" + ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles" + ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops" + +tasks: + + deps: + desc: Set up Ansible dependencies for the environment + cmds: + - task: .venv + + run: + desc: Run an Ansible playbook for configuring a cluster + summary: | + Args: + cluster: Cluster to run command against (required) + playbook: Playbook to run (required) + prompt: Run Ansible playbook '{{.playbook}}' against the '{{.cluster}}' cluster... continue? + deps: ["deps"] + cmd: | + .venv/bin/ansible-playbook \ + --inventory {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml \ + {{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}} + preconditions: + - { msg: "Argument (cluster) is required", sh: "test -n {{.cluster}}" } + - { msg: "Argument (playbook) is required", sh: "test -n {{.playbook}}" } + - { msg: "Venv not found", sh: "test -d {{.ROOT_DIR}}/.venv" } + - { msg: "Inventory not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml" } + - { msg: "Playbook not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml" } + + .venv: + internal: true + cmds: + - true && {{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv + - .venv/bin/python3 -m pip install --upgrade pip setuptools wheel + - .venv/bin/python3 -m pip install --upgrade --requirement {{.ANSIBLE_DIR}}/requirements.txt + - .venv/bin/ansible-galaxy install --role-file "{{.ANSIBLE_DIR}}/requirements.yaml" --force + sources: + - "{{.ANSIBLE_DIR}}/requirements.txt" + - "{{.ANSIBLE_DIR}}/requirements.yaml" + generates: + - "{{.ROOT_DIR}}/.venv/pyvenv.cfg" diff --git a/.taskfiles/PreCommit/Tasks.yaml b/.taskfiles/PreCommit/Tasks.yaml new file mode 100644 index 0000000..e708a01 --- /dev/null +++ b/.taskfiles/PreCommit/Tasks.yaml @@ -0,0 +1,16 @@ +--- +version: "3" + +tasks: + init: + desc: Initialize pre-commit hooks + cmds: + - pre-commit install --install-hooks + run: + desc: Run pre-commit + cmds: + - pre-commit run --all-files + update: + desc: Update pre-commit hooks + cmds: + - pre-commit autoupdate diff --git a/.taskfiles/VolSync/ListJob.tmpl.yaml b/.taskfiles/VolSync/ListJob.tmpl.yaml new file mode 100644 index 0000000..0d63998 --- /dev/null +++ b/.taskfiles/VolSync/ListJob.tmpl.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "list-${rsrc}-${ts}" + namespace: "${namespace}" +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: list + image: docker.io/restic/restic:0.16.0 + args: ["snapshots"] + envFrom: + - secretRef: + name: "${rsrc}-restic-secret" diff --git a/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml b/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml new file mode 100644 index 0000000..46be699 --- /dev/null +++ b/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: "${rsrc}-${claim}-${ts}" + namespace: "${namespace}" +spec: + trigger: + manual: restore-once + restic: + repository: "${rsrc}-restic-secret" + destinationPVC: "${claim}" + copyMethod: Direct + storageClassName: ceph-block + # IMPORTANT NOTE: + # Set to the last X number of snapshots to restore from + previous: ${previous} + # OR; + # IMPORTANT NOTE: + # On bootstrap set `restoreAsOf` to the time the old cluster was destroyed. + # This will essentially prevent volsync from trying to restore a backup + # from a application that started with default data in the PVC. + # Do not restore snapshots made after the following RFC3339 Timestamp. + # date --rfc-3339=seconds (--utc) + # restoreAsOf: "2022-12-10T16:00:00-05:00" diff --git a/.taskfiles/VolSync/Tasks.yaml b/.taskfiles/VolSync/Tasks.yaml new file mode 100644 index 0000000..a27cc6d --- /dev/null +++ b/.taskfiles/VolSync/Tasks.yaml @@ -0,0 +1,158 @@ +--- +version: "3" + +x-task-vars: &task-vars + rsrc: '{{.rsrc}}' + controller: '{{.controller}}' + namespace: '{{.namespace}}' + claim: '{{.claim}}' + ts: '{{.ts}}' + kustomization: '{{.kustomization}}' + previous: '{{.previous}}' + +vars: + destinationTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml" + wipeJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/WipeJob.tmpl.yaml" + waitForJobScript: "{{.ROOT_DIR}}/.taskfiles/VolSync/wait-for-job.sh" + listJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/ListJob.tmpl.yaml" + unlockJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/UnlockJob.tmpl.yaml" + ts: '{{now | date "150405"}}' + +tasks: + + list: + desc: List all snapshots taken by restic for a given ReplicationSource (ex. task volsync:list rsrc=plex [namespace=default]) + silent: true + cmds: + - envsubst < <(cat {{.listJobTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} list-{{.rsrc}}-{{.ts}} {{.namespace}} + - kubectl -n {{.namespace}} wait job/list-{{.rsrc}}-{{.ts}} --for condition=complete --timeout=1m + - kubectl -n {{.namespace}} logs job/list-{{.rsrc}}-{{.ts}} --container list + - kubectl -n {{.namespace}} delete job list-{{.rsrc}}-{{.ts}} + vars: + rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}' + namespace: '{{.namespace | default "default"}}' + env: *task-vars + preconditions: + - sh: test -f {{.waitForJobScript}} + - sh: test -f {{.listJobTemplate}} + + unlock: + desc: Unlocks restic repository for a given ReplicationSource (ex. task volsync:unlock rsrc=plex [namespace=default]) + silent: true + cmds: + - envsubst < <(cat {{.unlockJobTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} unlock-{{.rsrc}}-{{.ts}} {{.namespace}} + - kubectl -n {{.namespace}} wait job/unlock-{{.rsrc}}-{{.ts}} --for condition=complete --timeout=1m + - kubectl -n {{.namespace}} logs job/unlock-{{.rsrc}}-{{.ts}} --container unlock + - kubectl -n {{.namespace}} delete job unlock-{{.rsrc}}-{{.ts}} + vars: + rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}' + namespace: '{{.namespace | default "default"}}' + env: *task-vars + preconditions: + - sh: test -f {{.waitForJobScript}} + - sh: test -f {{.unlockJobTemplate}} + + # To run backup jobs in parallel for all replicationsources: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot rsrc=$0 namespace=$1' + # + snapshot: + desc: Trigger a Restic ReplicationSource snapshot (ex. task volsync:snapshot rsrc=plex [namespace=default]) + cmds: + - kubectl -n {{.namespace}} patch replicationsources {{.rsrc}} --type merge -p '{"spec":{"trigger":{"manual":"{{.ts}}"}}}' + - bash {{.waitForJobScript}} volsync-src-{{.rsrc}} {{.namespace}} + - kubectl -n {{.namespace}} wait job/volsync-src-{{.rsrc}} --for condition=complete --timeout=120m + # TODO: Find a way to output logs + # Error from server (NotFound): jobs.batch "volsync-src-zzztest" not found + # - kubectl -n {{.namespace}} logs job/volsync-src-{{.rsrc}} + vars: + rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}' + namespace: '{{.namespace | default "default"}}' + env: *task-vars + preconditions: + - sh: test -f {{.waitForJobScript}} + - sh: kubectl -n {{.namespace}} get replicationsources {{.rsrc}} + msg: "ReplicationSource '{{.rsrc}}' not found in namespace '{{.namespace}}'" + + # To run restore jobs in parallel for all replicationdestinations: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=2 -l bash -c 'task volsync:restore rsrc=$0 namespace=$1' + # + restore: + desc: Trigger a Restic ReplicationSource restore (ex. task volsync:restore rsrc=plex [namespace=default]) + cmds: + - task: restore-suspend-app + vars: *task-vars + - task: restore-wipe-job + vars: *task-vars + - task: restore-volsync-job + vars: *task-vars + - task: restore-resume-app + vars: *task-vars + vars: + rsrc: '{{ or .rsrc (fail "Variable `rsrc` is required") }}' + namespace: '{{.namespace | default "default"}}' + # 1) Query to find the Flux Kustomization associated with the ReplicationSource (rsrc) + kustomization: + sh: | + kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \ + -o jsonpath="{.metadata.labels.kustomize\.toolkit\.fluxcd\.io/name}" + # 2) Query to find the Claim associated with the ReplicationSource (rsrc) + claim: + sh: | + kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \ + -o jsonpath="{.spec.sourcePVC}" + # 3) Query to find the controller associated with the PersistentVolumeClaim (claim) + controller: + sh: | + app=$(kubectl -n {{.namespace}} get persistentvolumeclaim {{.claim}} -o jsonpath="{.metadata.labels.app\.kubernetes\.io/name}") + if kubectl -n {{ .namespace }} get deployment.apps/$app >/dev/null 2>&1 ; then + echo "deployment.apps/$app" + else + echo "statefulset.apps/$app" + fi + previous: "{{.previous | default 2}}" + env: *task-vars + preconditions: + - sh: test -f {{.wipeJobTemplate}} + - sh: test -f {{.destinationTemplate}} + - sh: test -f {{.waitForJobScript}} + + # Suspend the Flux ks and hr + restore-suspend-app: + internal: true + cmds: + - flux -n flux-system suspend kustomization {{.kustomization}} + - flux -n {{.namespace}} suspend helmrelease {{.rsrc}} + - kubectl -n {{.namespace}} scale {{.controller}} --replicas 0 + - kubectl -n {{.namespace}} wait pod --for delete --selector="app.kubernetes.io/name={{.rsrc}}" --timeout=2m + env: *task-vars + + # Wipe the PVC of all data + restore-wipe-job: + internal: true + cmds: + - envsubst < <(cat {{.wipeJobTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} wipe-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}} + - kubectl -n {{.namespace}} wait job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m + - kubectl -n {{.namespace}} logs job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --container wipe + - kubectl -n {{.namespace}} delete job wipe-{{.rsrc}}-{{.claim}}-{{.ts}} + env: *task-vars + + # Create VolSync replicationdestination CR to restore data + restore-volsync-job: + internal: true + cmds: + - envsubst < <(cat {{.destinationTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}} + - kubectl -n {{.namespace}} wait job/volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m + - kubectl -n {{.namespace}} delete replicationdestination {{.rsrc}}-{{.claim}}-{{.ts}} + env: *task-vars + + # Resume Flux ks and hr + restore-resume-app: + internal: true + cmds: + - flux -n {{.namespace}} resume helmrelease {{.rsrc}} + - flux -n flux-system resume kustomization {{.kustomization}} + env: *task-vars diff --git a/.taskfiles/VolSync/UnockJob.tmpl.yaml b/.taskfiles/VolSync/UnockJob.tmpl.yaml new file mode 100644 index 0000000..310f1b7 --- /dev/null +++ b/.taskfiles/VolSync/UnockJob.tmpl.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "unlock-${rsrc}-${ts}" + namespace: "${namespace}" +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: unlock + image: docker.io/restic/restic:0.16.0 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: "${rsrc}-restic-secret" diff --git a/.taskfiles/VolSync/WipeJob.tmpl.yaml b/.taskfiles/VolSync/WipeJob.tmpl.yaml new file mode 100644 index 0000000..eb878b0 --- /dev/null +++ b/.taskfiles/VolSync/WipeJob.tmpl.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "wipe-${rsrc}-${claim}-${ts}" + namespace: "${namespace}" +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: wipe + image: public.ecr.aws/docker/library/busybox:latest + command: ["/bin/sh", "-c", "cd /config; find . -delete"] + volumeMounts: + - name: config + mountPath: /config + securityContext: + privileged: true + volumes: + - name: config + persistentVolumeClaim: + claimName: "${claim}" diff --git a/.taskfiles/VolSync/wait-for-job.sh b/.taskfiles/VolSync/wait-for-job.sh new file mode 100644 index 0000000..32feadd --- /dev/null +++ b/.taskfiles/VolSync/wait-for-job.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +JOB_NAME=$1 +NAMESPACE="${2:-default}" + +[[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1 + +while true; do + STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')" + if [ "${STATUS}" == "Pending" ]; then + break + fi + sleep 1 +done diff --git a/.taskfiles/_scripts/wait-for-k8s-job.sh b/.taskfiles/_scripts/wait-for-k8s-job.sh new file mode 100644 index 0000000..32feadd --- /dev/null +++ b/.taskfiles/_scripts/wait-for-k8s-job.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +JOB_NAME=$1 +NAMESPACE="${2:-default}" + +[[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1 + +while true; do + STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')" + if [ "${STATUS}" == "Pending" ]; then + break + fi + sleep 1 +done diff --git a/.taskfiles/flux/Taskfile.yaml b/.taskfiles/flux/Taskfile.yaml new file mode 100644 index 0000000..2f3768a --- /dev/null +++ b/.taskfiles/flux/Taskfile.yaml @@ -0,0 +1,47 @@ +--- +version: "3" + +tasks: + gr-sync: + desc: Sync all Flux GitRepositories + cmds: + - | + kubectl get gitrepositories --all-namespaces --no-headers | awk '{print $1, $2}' \ + | xargs -P 4 -L 1 bash -c \ + 'kubectl -n $0 annotate gitrepository/$1 reconcile.fluxcd.io/requestedAt=$(date +%s) --field-manager=flux-client-side-apply --overwrite' + + ks-sync: + desc: Sync all Flux Kustomizations + cmds: + - | + kubectl get kustomization --all-namespaces --no-headers | awk '{print $1, $2}' \ + | xargs -P 4 -L 1 bash -c \ + 'kubectl -n $0 annotate kustomization/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --field-manager=flux-client-side-apply --overwrite' + + hr-sync: + desc: Sync all Flux HelmReleases + cmds: + - | + kubectl get helmreleases --all-namespaces --no-headers | awk '{print $1, $2}' \ + | xargs -P 4 -L 1 bash -c \ + 'kubectl -n $0 annotate helmrelease/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --overwrite' + + tf-sync: + desc: Sync Flux Terraforms + cmds: + - | + kubectl get terraforms --all-namespaces --no-headers | awk '{print $1, $2}' \ + | xargs -P 4 -L 1 bash -c \ + 'kubectl -n $0 annotate terraform/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --overwrite' + hr-suspend: + desc: Suspend all Flux HelmReleases + cmds: + - | + flux get helmrelease --all-namespaces --no-header | awk '{print $1, $2}' \ + | xargs -L 1 bash -c 'flux -n $0 suspend helmrelease $1' + hr-resume: + desc: Resume all Flux HelmReleases + cmds: + - | + flux get helmrelease --all-namespaces --no-header | awk '{print $1, $2}' \ + | xargs -L 1 bash -c 'flux -n $0 resume helmrelease $1' diff --git a/.taskfiles/k8s/Taskfile.yaml b/.taskfiles/k8s/Taskfile.yaml new file mode 100644 index 0000000..541afc1 --- /dev/null +++ b/.taskfiles/k8s/Taskfile.yaml @@ -0,0 +1,12 @@ +--- +version: "3" + +tasks: + hubble: + desc: forward the hubble relay + cmds: + - cilium hubble port-forward & + hubble-ui: + desc: port-forward hubble to 8888 + cmds: + - kubectl port-forward -n kube-system svc/hubble-ui 8888:80 \ No newline at end of file diff --git a/.taskfiles/rook/Taskfile.yaml b/.taskfiles/rook/Taskfile.yaml new file mode 100644 index 0000000..df004a1 --- /dev/null +++ b/.taskfiles/rook/Taskfile.yaml @@ -0,0 +1,104 @@ +--- +version: "3" + +x-task-vars: &task-vars + node: "{{.node}}" + ceph_disk: "{{.ceph_disk}}" + ts: "{{.ts}}" + jobName: "{{.jobName}}" + +vars: + waitForJobScript: "../_scripts/wait-for-k8s-job.sh" + ts: '{{now | date "150405"}}' + +tasks: + wipe-node-aule: + desc: Trigger a wipe of Rook-Ceph data on node "aule" + cmds: + - task: wipe-disk + vars: + node: "{{.node}}" + ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460833" + - task: wipe-data + vars: + node: "{{.node}}" + vars: + node: aule + + wipe-node-orome: + desc: Trigger a wipe of Rook-Ceph data on node "orome" + cmds: + - task: wipe-disk + vars: + node: "{{.node}}" + ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37645333" + - task: wipe-data + vars: + node: "{{.node}}" + vars: + node: orome + + wipe-node-eonwe: + desc: Trigger a wipe of Rook-Ceph data on node "eonwe" + cmds: + - task: wipe-disk + vars: + node: "{{.node}}" + ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460887" + - task: wipe-data + vars: + node: "{{.node}}" + vars: + node: eonwe + + wipe-node-arlen: + desc: Trigger a wipe of Rook-Ceph data on node "arlen" + cmds: + - task: wipe-disk + vars: + node: "{{.node}}" + ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460897" + - task: wipe-data + vars: + node: "{{.node}}" + vars: + node: arlen + + wipe-disk: + desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-disk node=aule ceph_disk="/dev/nvme0n1") + silent: true + internal: true + cmds: + - envsubst < <(cat {{.wipeRookDiskJobTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} {{.wipeCephDiskJobName}} default + - kubectl -n default wait job/{{.wipeCephDiskJobName}} --for condition=complete --timeout=1m + - kubectl -n default logs job/{{.wipeCephDiskJobName}} --container list + - kubectl -n default delete job {{.wipeCephDiskJobName}} + vars: + node: '{{ or .node (fail "`node` is required") }}' + ceph_disk: '{{ or .ceph_disk (fail "`ceph_disk` is required") }}' + jobName: 'wipe-disk-{{- .node -}}-{{- .ceph_disk | replace "/" "-" -}}-{{- .ts -}}' + wipeRookDiskJobTemplate: "WipeDiskJob.tmpl.yaml" + env: *task-vars + preconditions: + - sh: test -f {{.waitForJobScript}} + - sh: test -f {{.wipeRookDiskJobTemplate}} + + wipe-data: + desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-data node=aule) + silent: true + internal: true + cmds: + - envsubst < <(cat {{.wipeRookDataJobTemplate}}) | kubectl apply -f - + - bash {{.waitForJobScript}} {{.wipeRookDataJobName}} default + - kubectl -n default wait job/{{.wipeRookDataJobName}} --for condition=complete --timeout=1m + - kubectl -n default logs job/{{.wipeRookDataJobName}} --container list + - kubectl -n default delete job {{.wipeRookDataJobName}} + vars: + node: '{{ or .node (fail "`node` is required") }}' + jobName: "wipe-rook-data-{{- .node -}}-{{- .ts -}}" + wipeRookDataJobTemplate: "WipeRookDataJob.tmpl.yaml" + env: *task-vars + preconditions: + - sh: test -f {{.waitForJobScript}} + - sh: test -f {{.wipeRookDataJobTemplate}} diff --git a/.taskfiles/rook/WipeDiskJob.tmpl.yaml b/.taskfiles/rook/WipeDiskJob.tmpl.yaml new file mode 100644 index 0000000..13fa4f7 --- /dev/null +++ b/.taskfiles/rook/WipeDiskJob.tmpl.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "${jobName}" + namespace: "default" +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: Never + nodeName: ${node} + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4 + securityContext: + privileged: true + resources: {} + command: ["/bin/sh", "-c"] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all ${ceph_disk}; + blkdiscard ${ceph_disk}; + dd if=/dev/zero bs=1M count=10000 oflag=direct of=${ceph_disk}; + partprobe ${ceph_disk}; diff --git a/.taskfiles/rook/WipeRookDataJob.tmpl.yaml b/.taskfiles/rook/WipeRookDataJob.tmpl.yaml new file mode 100644 index 0000000..e5e5eef --- /dev/null +++ b/.taskfiles/rook/WipeRookDataJob.tmpl.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: "${jobName}" + namespace: "default" +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: Never + nodeName: ${node} + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4 + securityContext: + privileged: true + resources: {} + command: ["/bin/sh", "-c"] + args: + - rm -rf /mnt/host_var/lib/rook + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var diff --git a/.taskfiles/rook/pod.yaml b/.taskfiles/rook/pod.yaml new file mode 100644 index 0000000..8dddcad --- /dev/null +++ b/.taskfiles/rook/pod.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: my-pod +spec: + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4 + securityContext: + privileged: true + resources: {} + command: ["/bin/sh", "-c"] + args: + - apk add --no-cache sgdisk util-linux parted e2fsprogs; + sgdisk --zap-all /dev/nvme1n1; + blkdiscard /dev/nvme1n1; + dd if=/dev/zero bs=1M count=10000 oflag=direct of=/dev/nvme1n1; + sgdisk /dev/nvme1n1 + partprobe /dev/nvme1n1; diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..3814880 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,10 @@ +{ + "recommendations": [ + "mikestead.dotenv", + "redhat.ansible", + "redhat.vscode-yaml", + "signageos.signageos-vscode-sops", + "pkief.material-icon-theme", + "ms-vscode-remote.remote-ssh" + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..67d6e32 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,46 @@ +{ + "ansible.validation.lint.arguments": "-c .ansible-lint", + "files.associations": { + "*.json5": "jsonc", + "**/ansible/**/*.yaml": "ansible", + "**/ansible/**/*.sops.yaml": "yaml", + "**/ansible/**/inventory/**/*.yaml": "yaml", + "**/kubernetes/**/*.sops.toml": "plaintext" + }, + "material-icon-theme.folders.associations": { + ".taskfiles": "utils", + "bootstrap": "import", + "charts": "kubernetes", + "hack": "scripts", + "repositories": "database", + "vars": "other", + // namespaces + "cert-manager": "guard", + "external-secrets": "keys", + "kube-system": "kubernetes", + "monitoring": "event", + "networking": "connection", + "rook-ceph": "dump", + }, + "yaml.schemaStore.enable": true, + "yaml.schemas": { + "ansible": "ansible/**/*.yaml", + "kubernetes": "kubernetes/**/*.yaml" + }, + "editor.fontFamily": "FiraCode Nerd Font", + "editor.fontLigatures": true, + "editor.bracketPairColorization.enabled": true, + "editor.guides.bracketPairs": true, + "editor.guides.bracketPairsHorizontal": true, + "editor.guides.highlightActiveBracketPair": true, + "editor.hover.delay": 1500, + "editor.stickyScroll.enabled": false, + "editor.rulers": [ + 100 + ], + "explorer.autoReveal": false, + "files.trimTrailingWhitespace": true, + "ansible.python.interpreterPath": "/usr/bin/python3", + "sops.defaults.ageKeyFile": "age.key", + "ansible.validation.lint.path": "~/projects/valinor/.venv/bin/ansible-lint" +} diff --git a/.yamllint.yaml b/.yamllint.yaml new file mode 100644 index 0000000..bb7b058 --- /dev/null +++ b/.yamllint.yaml @@ -0,0 +1,29 @@ +--- +ignore: | + .ansible/ + .direnv/ + .private/ + .vscode/ + *.sops.* + ansible/roles/xanmanning.k3s/ + +extends: default + +rules: + truthy: + allowed-values: ["true", "false", "on"] + + comments: + min-spaces-from-content: 1 + + line-length: disable + + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + + indentation: enable diff --git a/README.md b/README.md new file mode 100644 index 0000000..f68a50d --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +Kubernetes with Talos @ Hetzner \ No newline at end of file diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 0000000..59290ce --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,158 @@ +--- +version: "3" + +vars: + PYTHON_BIN: python3 + ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" + KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" + TERRAFORM_DIR: "{{.ROOT_DIR}}/terraform" + CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env" + CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env" + +env: + KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" + SOPS_AGE_KEY_FILE: "{{.ROOT_DIR}}/age.key" + PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH" + VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" + ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy" + ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles" + ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops" + K8S_AUTH_KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" + +includes: + ansible: .taskfiles/Ansible/Taskfile.yaml + volsync: .taskfiles/VolSync/Tasks.yaml + precommit: .taskfiles/PreCommit/Tasks.yaml + k8s: .taskfiles/k8s/Taskfile.yaml + rook: + taskfile: ".taskfiles/rook" + dir: .taskfiles/rook + flux: + dir: .taskfiles/flux + taskfile: .taskfiles/flux + +tasks: + + default: + silent: true + cmds: ["task -l"] + + init: + desc: Initialize workstation dependencies with Brew + cmds: + - brew install {{.DEPS}} {{.CLI_ARGS}} + preconditions: + - sh: command -v brew + msg: | + Homebrew is not installed. Using MacOS, Linux or WSL? + Head over to https://brew.sh to get up and running. + vars: + DEPS: >- + age + ansible + direnv + derailed/k9s/k9s + fluxcd/tap/flux + go-task/tap/go-task + helm + ipcalc + jq + kubernetes-cli + kustomize + pre-commit + prettier + sops + stern + terraform + tflint + weaveworks/tap/gitops + yamllint + yq + + configure-venv: + desc: Install or upgrade the Python virtual env + cmds: + - "{{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv" + - .venv/bin/python3 -m pip install --upgrade pip setuptools wheel + - .venv/bin/python3 -m pip install --upgrade --requirement "{{.ROOT_DIR}}/requirements.txt" + - .venv/bin/ansible-galaxy install --role-file "{{.ROOT_DIR}}/requirements.yaml" --force + + flux-apply: + desc: Apply a resource path that contains Flux substitution variables + dotenv: ['{{.CLUSTER_SETTINGS_FILE}}'] + vars: + ks: '{{ or .ks (fail "Missing path (`ks` var)") }}' + cmd: | + sops exec-env {{.CLUSTER_SECRETS_FILE}} \ + "kustomize build --load-restrictor=LoadRestrictionsNone {{.ks}} | \ + envsubst | kubectl apply --server-side --field-manager=kustomize-controller -f -" + preconditions: + - sh: test -f {{.CLUSTER_SECRETS_FILE}} + - sh: test -f {{.CLUSTER_SETTINGS_FILE}} + + sync-secrets: + desc: Sync ExternalSecret resources + vars: + secret: '{{ .secret | default ""}}' + namespace: '{{.namespace | default "default"}}' + cmd: | + {{if eq .secret ""}} + kubectl get externalsecret.external-secrets.io --all-namespaces --no-headers -A | awk '{print $1, $2}' \ + | xargs --max-procs=4 -l bash -c 'kubectl -n $0 annotate externalsecret.external-secrets.io $1 force-sync=$(date +%s) --overwrite' + {{else}} + kubectl -n {{.namespace}} annotate externalsecret.external-secrets.io {{.secret}} force-sync=$(date +%s) --overwrite + {{end}} + preconditions: + - kubectl -n {{.namespace}} get externalsecret {{.secret}} + + mount-volume: + desc: Mount a PersistentVolumeClaim to a temporary pod + interactive: true + vars: + claim: '{{ or .claim (fail "PersistentVolumeClaim `claim` is required") }}' + namespace: '{{.namespace | default "default"}}' + cmd: | + kubectl run -n {{.namespace}} debug-{{.claim}} -i --tty --rm --image=null --privileged --overrides=' + { + "apiVersion": "v1", + "spec": { + "containers": [ + { + "name": "debug", + "image": "ghcr.io/onedr0p/alpine:rolling", + "command": ["/bin/bash"], + "stdin": true, + "stdinOnce": true, + "tty": true, + "volumeMounts": [ + { + "name": "config", + "mountPath": "/config" + } + ] + } + ], + "volumes": [ + { + "name": "config", + "persistentVolumeClaim": { + "claimName": "{{.claim}}" + } + } + ], + "restartPolicy": "Never" + } + }' + preconditions: + - kubectl -n {{.namespace}} get pvc {{.claim}} + + # https://github.com/fluxcd/helm-controller/issues/644 + "644": + cmds: + - kubectl -n {{.namespace}} delete secret -l owner=helm,name={{.release}},status=pending-upgrade + - flux -n {{.namespace}} reconcile hr {{.release}} + vars: + release: '{{ or .release (fail "HelmRelease `release` is required") }}' + namespace: '{{.namespace | default "default"}}' + preconditions: + - flux -n {{.namespace}} get hr {{.release}} diff --git a/ansible/cilium-install.sh b/ansible/cilium-install.sh new file mode 100644 index 0000000..0411a6e --- /dev/null +++ b/ansible/cilium-install.sh @@ -0,0 +1,10 @@ +#!/bin/bash +cilium install \ +--helm-set=ipam.mode=kubernetes \ +--helm-set=kubeProxyReplacement=true \ +--helm-set=k8sServiceHost=167.235.217.82 \ +--helm-set=policyAuditMode=true \ +--helm-set=hostFirewall.enabled=true \ +--helm-set=extraConfig.allow-localhost=policy \ +--helm-set=hubble.relay.enabled=true \ +--helm-set=hubble.ui.enabled=true diff --git a/ansible/main/.envrc b/ansible/main/.envrc new file mode 100644 index 0000000..a3eca56 --- /dev/null +++ b/ansible/main/.envrc @@ -0,0 +1,8 @@ +#shellcheck disable=SC2148,SC2155 +export SOPS_AGE_KEY_FILE="$(expand_path ../../age.key)" +export VIRTUAL_ENV="$(expand_path ../../.venv)" +export ANSIBLE_COLLECTIONS_PATH=$(expand_path ../../.venv/galaxy) +export ANSIBLE_ROLES_PATH=$(expand_path ../../.venv/galaxy/ansible_roles) +export ANSIBLE_VARS_ENABLED="host_group_vars,community.sops.sops" +export ANSIBLE_INVENTORY=$(expand_path ./inventory/hosts.yaml) +PATH_add "$(expand_path ../../.venv/bin)" diff --git a/ansible/main/inventory/group_vars/all/main.yaml b/ansible/main/inventory/group_vars/all/main.yaml new file mode 100644 index 0000000..a778361 --- /dev/null +++ b/ansible/main/inventory/group_vars/all/main.yaml @@ -0,0 +1,28 @@ +--- +# renovate: datasource=github-releases depName=k3s-io/k3s +k3s_release_version: "v1.29.0+k3s1" +k3s_install_hard_links: true +k3s_become: true +k3s_etcd_datastore: true +k3s_registration_address: 10.5.0.2 +# /var/lib/rancher/k3s/server/manifests +k3s_server_manifests_urls: + # Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release) + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + filename: custom-prometheus-podmonitors.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + filename: custom-prometheus-prometheusrules.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml + filename: custom-prometheus-scrapeconfigs.yaml + - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + filename: custom-prometheus-servicemonitors.yaml +# /var/lib/rancher/k3s/server/manifests +k3s_server_manifests_templates: + - custom-coredns-helmchart.yaml.j2 + - custom-cilium-helmchart.yaml.j2 +# k3s_registries: +# mirrors: +# docker.io: +# endpoint: ["http://harbor.hsn.dev/v2/docker.io"] +# ghcr.io: +# endpoint: ["http://harbor.hsn.dev/v2/ghcr.io"] \ No newline at end of file diff --git a/ansible/main/inventory/group_vars/all/supplemental.yaml b/ansible/main/inventory/group_vars/all/supplemental.yaml new file mode 100644 index 0000000..6aba05e --- /dev/null +++ b/ansible/main/inventory/group_vars/all/supplemental.yaml @@ -0,0 +1,3 @@ +--- +github_username: jahanson +timezone: America/Chicago diff --git a/ansible/main/inventory/group_vars/master/main.yaml b/ansible/main/inventory/group_vars/master/main.yaml new file mode 100644 index 0000000..f6003c9 --- /dev/null +++ b/ansible/main/inventory/group_vars/master/main.yaml @@ -0,0 +1,25 @@ +--- +k3s_control_node: true +k3s_server: + cluster-cidr: 10.32.0.0/16 + disable: ["coredns", "flannel", "local-storage", "metrics-server", "servicelb", "traefik"] + disable-cloud-controller: true + disable-helm-controller: false + disable-kube-proxy: true + disable-network-policy: true + docker: false + etcd-disable-snapshots: true + etcd-expose-metrics: true + flannel-backend: "none" # quote + https-listen-port: 6443 + # kube-apiserver-arg: ["anonymous-auth=true"] + # kubelet-arg: ["feature-gates=ImageMaximumGCAge=true","imageMaximumGCAge=30m"] + kubelet-arg: ["image-gc-high-threshold=85","image-gc-low-threshold=80"] + kube-controller-manager-arg: ["bind-address=0.0.0.0"] + kube-scheduler-arg: ["bind-address=0.0.0.0"] + node-ip: "{{ ansible_host }}" + pause-image: registry.k8s.io/pause:3.9 + secrets-encryption: true + service-cidr: 10.33.0.0/16 + tls-san: ["{{ k3s_registration_address }}"] + write-kubeconfig-mode: "0644" diff --git a/ansible/main/inventory/group_vars/worker/main.yaml b/ansible/main/inventory/group_vars/worker/main.yaml new file mode 100644 index 0000000..dfce51b --- /dev/null +++ b/ansible/main/inventory/group_vars/worker/main.yaml @@ -0,0 +1,5 @@ +--- +k3s_control_node: false +k3s_agent: + node-ip: "{{ ansible_host }}" + pause-image: registry.k8s.io/pause:3.9 diff --git a/ansible/main/inventory/hosts.yaml b/ansible/main/inventory/hosts.yaml new file mode 100644 index 0000000..4ca5f7a --- /dev/null +++ b/ansible/main/inventory/hosts.yaml @@ -0,0 +1,18 @@ +--- +kubernetes: + vars: + ansible_user: jahanson + ansible_ssh_port: 22 + children: + master: + hosts: + galadriel: + ansible_host: 10.1.1.61 + thrain: + ansible_host: 10.1.1.62 + cirdan: + ansible_host: 10.1.1.63 + workers: + hosts: + qbee: + ansible_host: 10.1.1.41 diff --git a/ansible/main/playbooks/cluster-add-user.yaml b/ansible/main/playbooks/cluster-add-user.yaml new file mode 100644 index 0000000..477691d --- /dev/null +++ b/ansible/main/playbooks/cluster-add-user.yaml @@ -0,0 +1,44 @@ +--- +- name: Add user 'jahanson' and add to sudo group + hosts: all + become: true + + tasks: + - name: Create user 'jahanson' + ansible.builtin.user: + name: jahanson + state: present + - name: Add user 'jahanson' to sudo group + when: ansible_user == 'root' + ansible.builtin.user: + name: jahanson + groups: sudo + append: true + - name: User Configuration | SSH keys + ansible.posix.authorized_key: + user: "jahanson" + key: "https://github.com/jahanson.keys" + - name: User Configuration | Silence login + ansible.builtin.file: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.hushlogin" + state: touch + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0644" + modification_time: preserve + access_time: preserve + - name: Copy .vimrc file + ansible.builtin.copy: + src: "files/.vimrc" + dest: "/home/jahanson/.vimrc" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0644" + + - name: User Configuration | Add user to sudoers + ansible.builtin.copy: + content: "jahanson ALL=(ALL:ALL) NOPASSWD:ALL" + dest: "/etc/sudoers.d/jahanson" + owner: root + group: root + mode: "0440" diff --git a/ansible/main/playbooks/cluster-ceph-reset.yaml b/ansible/main/playbooks/cluster-ceph-reset.yaml new file mode 100644 index 0000000..d39c72a --- /dev/null +++ b/ansible/main/playbooks/cluster-ceph-reset.yaml @@ -0,0 +1,40 @@ +--- +- name: Reset Ceph Drives + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Reset Ceph Drives # noqa: ignore-errors + ignore_errors: true + when: ceph_drives | default([]) | length > 0 + block: + - name: Delete (/var/lib/rook) + ansible.builtin.file: + state: absent + path: /var/lib/rook + - name: Delete (/dev/mapper/ceph-*) # noqa: no-changed-when + ansible.builtin.shell: | + set -o pipefail + ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove_all --force % || true + - name: Delete (/dev/ceph-*) # noqa: no-changed-when + ansible.builtin.command: rm -rf /dev/ceph-* + - name: Delete (/dev/mapper/ceph--*) # noqa: no-changed-when + ansible.builtin.command: rm -rf /dev/mapper/ceph--* + - name: Wipe (sgdisk) # noqa: no-changed-when + ansible.builtin.command: "sgdisk --zap-all {{ item }}" + loop: "{{ ceph_drives }}" + - name: Wipe (dd) # noqa: no-changed-when + ansible.builtin.command: "dd if=/dev/zero of={{ item }} bs=1M count=100 oflag=direct,dsync" + loop: "{{ ceph_drives }}" + - name: Wipe (blkdiscard) # noqa: no-changed-when + ansible.builtin.command: "blkdiscard {{ item }}" + loop: "{{ ceph_drives }}" + when: "'nvme' in item" + - name: Wipe (partprobe) # noqa: no-changed-when + ansible.builtin.command: "partprobe {{ item }}" + loop: "{{ ceph_drives }}" diff --git a/ansible/main/playbooks/cluster-installation.yaml b/ansible/main/playbooks/cluster-installation.yaml new file mode 100644 index 0000000..8876216 --- /dev/null +++ b/ansible/main/playbooks/cluster-installation.yaml @@ -0,0 +1,95 @@ +--- +- name: Cluster Installation + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Check if cluster is installed + check_mode: false + ansible.builtin.stat: + path: /etc/rancher/k3s/config.yaml + register: k3s_installed + + - name: Ignore manifests templates and urls if the cluster is already installed + when: k3s_installed.stat.exists + ansible.builtin.set_fact: + k3s_server_manifests_templates: [] + k3s_server_manifests_urls: [] + + - name: Install Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: installed + + - name: Kubeconfig + ansible.builtin.include_tasks: tasks/kubeconfig.yaml + vars: + repository_base: "{{ lookup('ansible.builtin.pipe', 'git rev-parse --show-toplevel') }}" + + - name: Wait for custom manifests to rollout + when: + - k3s_primary_control_node + - (k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0) + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" + wait: true + wait_sleep: 10 + wait_timeout: 360 + loop: + - { name: cilium, kind: HelmChart, namespace: kube-system } + - { name: coredns, kind: HelmChart, namespace: kube-system } + - { name: policy, kind: CiliumL2AnnouncementPolicy } + - { name: pool, kind: CiliumLoadBalancerIPPool } + - { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition } + + - name: Coredns + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/coredns.yaml + + - name: Cilium + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cilium.yaml + + - name: Cruft + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cruft.yaml + + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yaml + vars: + stale_containers_state: disabled + + # - name: Helm controller + # notify: Restart Kubernetes + # when: k3s_control_node + # ansible.builtin.include_tasks: tasks/helm_controller.yaml + + # TODO: Replace this with embedded spegel in the future + - name: Copy custom containerd configuration + notify: Restart Kubernetes + ansible.builtin.copy: + src: files/config.toml.tmpl + dest: /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl + owner: root + group: root + mode: "0644" + + handlers: + - name: Restart Kubernetes + ansible.builtin.systemd: + name: k3s + state: restarted diff --git a/ansible/main/playbooks/cluster-nuke.yaml b/ansible/main/playbooks/cluster-nuke.yaml new file mode 100644 index 0000000..1d99039 --- /dev/null +++ b/ansible/main/playbooks/cluster-nuke.yaml @@ -0,0 +1,61 @@ +--- +- name: Cluster Nuke + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Stop Kubernetes # noqa: ignore-errors + ignore_errors: true + block: + - name: Stop Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: stopped + + # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md + - name: Networking + block: + - name: Networking | Delete Cilium links + ansible.builtin.command: + cmd: "ip link delete {{ item }}" + removes: "/sys/class/net/{{ item }}" + loop: ["cilium_host", "cilium_net", "cilium_vxlan"] + - name: Networking | Flush iptables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Flush ip6tables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + ip_version: ipv6 + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Delete CNI directory + ansible.builtin.file: + path: /etc/cni/net.d + state: absent + + - name: Uninstall Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: uninstalled + + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yaml + vars: + stale_containers_state: disabled + + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes + reboot_timeout: 3600 diff --git a/ansible/main/playbooks/cluster-prepare.yaml b/ansible/main/playbooks/cluster-prepare.yaml new file mode 100644 index 0000000..b993446 --- /dev/null +++ b/ansible/main/playbooks/cluster-prepare.yaml @@ -0,0 +1,130 @@ +--- +- name: Prepare System + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Locale + block: + - name: Locale | Set timezone + community.general.timezone: + name: "{{ timezone | default('Etc/UTC') }}" + + - name: Packages + block: + - name: Packages | Add non-free repository + ansible.builtin.apt_repository: + repo: deb http://deb.debian.org/debian/ stable main contrib non-free + filename: non-free + update_cache: true + - name: Packages | Install Intel common packages + when: inventory_hostname == 'orome' + ansible.builtin.apt: + name: vim,i965-va-driver-shaders,apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk, + gnupg,hdparm,htop,btop,intel-gpu-tools,intel-media-va-driver-non-free,iperf3,iptables,iputils-ping,ipvsadm, + libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3, + python3-apt,python3-openshift,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common, + unzip,util-linux + install_recommends: false + - name: Packages | Install AMD common packages + when: inventory_hostname != 'orome' + ansible.builtin.apt: + name: vim,apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk, + gnupg,hdparm,htop,btop,iperf3,iptables,iputils-ping,ipvsadm, + libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3, + python3-apt,python3-openshift,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common, + unzip,util-linux + install_recommends: false + + + - name: Fish + block: + - name: Fish | Add fish apt key + ansible.builtin.get_url: + url: https://download.opensuse.org/repositories/shells:fish:release:3/Debian_12/Release.key + dest: /etc/apt/trusted.gpg.d/fish.asc + owner: root + group: root + mode: "0644" + - name: Fish | Add fish repository + ansible.builtin.apt_repository: + repo: deb [signed-by=/etc/apt/trusted.gpg.d/fish.asc] http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_12/ / + filename: fish + update_cache: true + - name: Fish | Install fish + ansible.builtin.apt: + name: fish + install_recommends: false + - name: Fish | Set as default shell + ansible.builtin.user: + name: "{{ ansible_user }}" + shell: /usr/bin/fish + - name: Fish | Create configuration directory + ansible.builtin.file: + path: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions" + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + recurse: true + - name: Fish | Create neofetch greeting + ansible.builtin.copy: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/fish_greeting.fish" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + content: neofetch --config none + - name: Fish | Create kubectl shorthand + ansible.builtin.copy: + dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/k.fish" + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + content: | + function k --wraps=kubectl --description 'kubectl shorthand' + kubectl $argv + end + + - name: System Configuration + notify: Reboot + block: + - name: System Configuration | Disable swap + ansible.posix.mount: + name: "{{ item }}" + fstype: swap + state: absent + loop: ["none", "swap"] + - name: System Configuration | Create Kernel modules + ansible.builtin.copy: + dest: "/etc/modules-load.d/{{ item }}.conf" + mode: "0644" + content: "{{ item }}" + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd", "tcp_bbr"] + register: modules_status + - name: System Configuration | Reload Kernel modules # noqa: no-changed-when no-handler + when: modules_status.changed + ansible.builtin.systemd: + name: systemd-modules-load + state: restarted + - name: System Configuration | Sysctl + ansible.posix.sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_file: /etc/sysctl.d/99-kubernetes.conf + reload: true + with_dict: "{{ sysctl_config }}" + vars: + sysctl_config: + fs.inotify.max_queued_events: 65536 + fs.inotify.max_user_watches: 524288 + fs.inotify.max_user_instances: 8192 + + handlers: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting nodes + reboot_timeout: 3600 diff --git a/ansible/main/playbooks/cluster-rollout-update.yaml b/ansible/main/playbooks/cluster-rollout-update.yaml new file mode 100644 index 0000000..1dc4f7b --- /dev/null +++ b/ansible/main/playbooks/cluster-rollout-update.yaml @@ -0,0 +1,71 @@ +--- +# https://github.com/kevincoakley/ansible-role-k8s-rolling-update +- name: Cluster update rollout + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + serial: 1 + pre_tasks: + - name: Pausing for 2 seconds... + ansible.builtin.pause: + seconds: 2 + tasks: + - name: Details + ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json" + register: kubectl_get_node + delegate_to: "{{ groups['master'][0] }}" + failed_when: false + changed_when: false + + - name: Update + when: + # When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False + - kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status") + # If spec.unschedulable is defined then the node is cordoned + - not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined + block: + - name: Cordon + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: cordon + delegate_to: "{{ groups['master'][0] }}" + + - name: Drain + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: drain + delete_options: + delete_emptydir_data: true + ignore_daemonsets: true + terminate_grace_period: 600 + wait_timeout: 900 + pod_selectors: + - app!=rook-ceph-osd + delegate_to: "{{ groups['master'][0] }}" + + - name: Update + ansible.builtin.apt: + upgrade: dist + update_cache: true + + - name: Check if reboot is required + ansible.builtin.stat: + path: /var/run/reboot-required + register: reboot_required + + - name: Reboot + when: reboot_required.stat.exists + ansible.builtin.reboot: + msg: Rebooting node + post_reboot_delay: 120 + reboot_timeout: 3600 + + - name: Uncordon + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: uncordon + delegate_to: "{{ groups['master'][0] }}" diff --git a/ansible/main/playbooks/files/.vimrc b/ansible/main/playbooks/files/.vimrc new file mode 100644 index 0000000..e841c6d --- /dev/null +++ b/ansible/main/playbooks/files/.vimrc @@ -0,0 +1,2 @@ +source $VIMRUNTIME/defaults.vim +set mouse-=a \ No newline at end of file diff --git a/ansible/main/playbooks/files/config.toml.tmpl b/ansible/main/playbooks/files/config.toml.tmpl new file mode 100644 index 0000000..d252c88 --- /dev/null +++ b/ansible/main/playbooks/files/config.toml.tmpl @@ -0,0 +1,25 @@ +version = 2 + +[plugins."io.containerd.internal.v1.opt"] + path = "/var/lib/rancher/k3s/agent/containerd" + +[plugins."io.containerd.grpc.v1.cri"] + stream_server_address = "127.0.0.1" + stream_server_port = "10010" + enable_selinux = false + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + sandbox_image = "registry.k8s.io/pause:3.9" + +[plugins."io.containerd.grpc.v1.cri".containerd] + snapshotter = "overlayfs" + disable_snapshot_annotations = true + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + +[plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/var/lib/rancher/k3s/agent/etc/containerd/certs.d" diff --git a/ansible/main/playbooks/files/stale-containers.service b/ansible/main/playbooks/files/stale-containers.service new file mode 100644 index 0000000..5136df2 --- /dev/null +++ b/ansible/main/playbooks/files/stale-containers.service @@ -0,0 +1,6 @@ +[Unit] +Description=Stale containers + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/k3s crictl rmi --prune diff --git a/ansible/main/playbooks/files/stale-containers.timer b/ansible/main/playbooks/files/stale-containers.timer new file mode 100644 index 0000000..731885a --- /dev/null +++ b/ansible/main/playbooks/files/stale-containers.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Stale containers + +[Timer] +OnCalendar=weekly +AccuracySec=1h +Persistent=true +RandomizedDelaySec=6000 + +[Install] +WantedBy=timers.target diff --git a/ansible/main/playbooks/tasks/cilium.yaml b/ansible/main/playbooks/tasks/cilium.yaml new file mode 100644 index 0000000..ca242bb --- /dev/null +++ b/ansible/main/playbooks/tasks/cilium.yaml @@ -0,0 +1,56 @@ +--- +- name: Cilium + block: + - name: Cilium | Check if Cilium HelmChart exists + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + register: cilium_helmchart + + - name: Cilium | Wait for Cilium to rollout + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-cilium + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Cilium | Patch the Cilium HelmChart to unmanage it + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Cilium | Delete the Cilium HelmChart CR + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: absent + + - name: Cilium | Force delete the Cilium HelmChart + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/main/playbooks/tasks/coredns.yaml b/ansible/main/playbooks/tasks/coredns.yaml new file mode 100644 index 0000000..d18383a --- /dev/null +++ b/ansible/main/playbooks/tasks/coredns.yaml @@ -0,0 +1,56 @@ +--- +- name: Coredns + block: + - name: Coredns | Check if Coredns HelmChart exists + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + register: coredns_helmchart + + - name: Coredns | Wait for Coredns to rollout + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-coredns + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Coredns | Patch the Coredns HelmChart to unmanage it + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Coredns | Delete the Coredns HelmChart CR + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: absent + + - name: Coredns | Force delete the Coredns HelmChart + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/main/playbooks/tasks/cruft.yaml b/ansible/main/playbooks/tasks/cruft.yaml new file mode 100644 index 0000000..6714d8b --- /dev/null +++ b/ansible/main/playbooks/tasks/cruft.yaml @@ -0,0 +1,32 @@ +--- +# https://github.com/k3s-io/k3s/issues/1971 +- name: Cruft + block: + - name: Cruft | Get list of custom manifests + ansible.builtin.find: + paths: "{{ k3s_server_manifests_dir }}" + file_type: file + use_regex: true + patterns: ["^custom-.*"] + register: custom_manifest + + - name: Cruft | Delete custom manifests + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ custom_manifest.files }}" + + - name: Cruft | Get list of custom addons + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: Addon + register: addons_list + + - name: Cruft | Delete addons + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: "{{ item.metadata.name }}" + kind: Addon + namespace: kube-system + state: absent + loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/ansible/main/playbooks/tasks/helm_controller.yaml b/ansible/main/playbooks/tasks/helm_controller.yaml new file mode 100644 index 0000000..01ea6f1 --- /dev/null +++ b/ansible/main/playbooks/tasks/helm_controller.yaml @@ -0,0 +1,16 @@ +--- +- name: Helm Controller + block: + - name: Helm Controller | Disable Helm controller + ansible.builtin.replace: + path: /etc/rancher/k3s/config.yaml + regexp: '^disable-helm-controller: false$' + replace: 'disable-helm-controller: true' + + - name: Helm Controller | Delete Helm controller CRDs + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: "{{ item }}" + kind: CustomResourceDefinition + state: absent + loop: ["helmcharts.helm.cattle.io", "helmchartconfigs.helm.cattle.io"] diff --git a/ansible/main/playbooks/tasks/stale_containers.yaml b/ansible/main/playbooks/tasks/stale_containers.yaml new file mode 100644 index 0000000..9857d6b --- /dev/null +++ b/ansible/main/playbooks/tasks/stale_containers.yaml @@ -0,0 +1,36 @@ +--- +# https://github.com/k3s-io/k3s/issues/1900 +- name: Enabled Stale containers + when: stale_containers_state == "enabled" + block: + - name: Stale containers | Create systemd unit + ansible.builtin.copy: + src: files/stale-containers.service + dest: /etc/systemd/system/stale-containers.service + owner: root + group: root + mode: "0644" + + - name: Stale containers | Create systemd timer + ansible.builtin.copy: + src: files/stale-containers.timer + dest: /etc/systemd/system/stale-containers.timer + owner: root + group: root + mode: "0644" + + - name: Stale containers | Start the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + enabled: true + daemon_reload: true + masked: false + state: started + +- name: Disable Stale containers + when: stale_containers_state == "disabled" + block: + - name: Stale containers | Mask the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + masked: true diff --git a/ansible/main/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/ansible/main/playbooks/templates/custom-cilium-helmchart.yaml.j2 new file mode 100644 index 0000000..8688131 --- /dev/null +++ b/ansible/main/playbooks/templates/custom-cilium-helmchart.yaml.j2 @@ -0,0 +1,46 @@ +--- +# https://docs.k3s.io/helm +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: cilium + namespace: kube-system +spec: + # renovate: datasource=helm + repo: https://helm.cilium.io/ + chart: cilium + version: 1.14.5 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + cluster: + name: homelab + id: 1 + containerRuntime: + integration: containerd + socketPath: /var/run/k3s/containerd/containerd.sock + hubble: + enabled: true + relay: + enabled: true + ui: + enabled: true + ipam: + mode: kubernetes + ipv4NativeRoutingCIDR: "{{ k3s_server['cluster-cidr'] }}" + k8sServiceHost: "{{ k3s_registration_address }}" + k8sServicePort: 6443 + kubeProxyReplacement: true + localRedirectPolicy: true + operator: + rollOutPods: true + rollOutCiliumPods: true + securityContext: + privileged: true + policyAuditMode: true + hostFirewall: + enabled: true + extraConfig: + allow-localhost: policy + + diff --git a/ansible/main/playbooks/templates/custom-coredns-helmchart.yaml.j2 b/ansible/main/playbooks/templates/custom-coredns-helmchart.yaml.j2 new file mode 100644 index 0000000..3b038d7 --- /dev/null +++ b/ansible/main/playbooks/templates/custom-coredns-helmchart.yaml.j2 @@ -0,0 +1,77 @@ +--- +# https://docs.k3s.io/helm +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: coredns + namespace: kube-system +spec: + # renovate: datasource=helm + repo: https://coredns.github.io/helm + chart: coredns + version: 1.29.0 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + fullnameOverride: coredns + replicaCount: 2 + k8sAppLabelOverride: kube-dns + service: + name: kube-dns + clusterIP: {{ k3s_server['service-cidr'] | ansible.utils.nthhost(10) }} + serviceAccount: + create: true + deployment: + annotations: + reloader.stakater.com/auto: "true" + servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: log + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: coredns diff --git a/ansible/requirements.txt b/ansible/requirements.txt new file mode 100644 index 0000000..6078f86 --- /dev/null +++ b/ansible/requirements.txt @@ -0,0 +1,8 @@ +ansible==9.1.0 +ansible-lint==6.22.1 +# https://github.com/pyca/bcrypt/issues/684 +bcrypt==4.1.2 +jmespath==1.0.1 +netaddr==0.10.0 +openshift==0.13.2 +passlib==1.7.4 diff --git a/ansible/requirements.yaml b/ansible/requirements.yaml new file mode 100644 index 0000000..5b72928 --- /dev/null +++ b/ansible/requirements.yaml @@ -0,0 +1,18 @@ +--- +collections: + - name: ansible.posix + version: 1.5.4 + - name: ansible.utils + version: 3.0.0 + - name: community.general + version: 8.1.0 + - name: community.sops + version: 1.6.7 + - name: kubernetes.core + version: 3.0.0 + - name: onepassword.connect + version: 2.2.4 +roles: + - name: xanmanning.k3s + src: https://github.com/PyratLabs/ansible-role-k3s + version: v3.4.3 diff --git a/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml new file mode 100644 index 0000000..b66980a --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -0,0 +1,47 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.13.3 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + interval: 30m + + install: + crds: CreateReplace + upgrade: + crds: CreateReplace + + values: + installCRDs: true + + webhook: + enabled: true + + extraArgs: + - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 + - --dns01-recursive-nameservers-only + - --enable-certificate-owner-ref + + replicaCount: 1 + + podDnsPolicy: "None" + podDnsConfig: + nameservers: + - "1.1.1.1" + - "9.9.9.9" + prometheus: + enabled: true + servicemonitor: + enabled: true + prometheusInstance: monitoring diff --git a/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml new file mode 100644 index 0000000..b0a63bf --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: cert-manager +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/externalsecret.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/externalsecret.yaml new file mode 100644 index 0000000..fbeba34 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudflare-api-token + namespace: cert-manager +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: cloudflare-api-token + creationPolicy: Owner + data: + - secretKey: api-token + remoteRef: + key: Cloudflare + property: hsn_api_token diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-prod.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-prod.yaml new file mode 100644 index 0000000..e94b0ea --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-prod.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/clusterissuer_v1.json +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-cloudflare-production +spec: + acme: + email: "joe@veri.dev" + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-cloudflare-production + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cloudflare-api-token + key: api-token + selector: + dnsZones: + - hsn.dev diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-staging.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-staging.yaml new file mode 100644 index 0000000..09429f9 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/cloudflare/issuer-letsencrypt-staging.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/clusterissuer_v1.json +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-cloudflare-staging +spec: + acme: + email: "joe@veri.dev" + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-cloudflare-staging + server: https://acme-staging-v02.api.letsencrypt.org/directory + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cloudflare-api-token + key: api-token + selector: + dnsZones: + - hsn.dev diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/dnsimple-issuer-rbac.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/dnsimple-issuer-rbac.yaml new file mode 100644 index 0000000..241ba25 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/dnsimple-issuer-rbac.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: flow-schema-reader +rules: + - apiGroups: ["flowcontrol.apiserver.k8s.io"] + resources: ["flowschemas", "prioritylevelconfigurations"] + verbs: ["list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grant-flow-schema-permission +subjects: + - kind: ServiceAccount + name: dnsimple-issuer-cert-manager-webhook-dnsimple + namespace: cert-manager +roleRef: + kind: ClusterRole + name: flow-schema-reader + apiGroup: rbac.authorization.k8s.io diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/externalsecret.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/externalsecret.yaml new file mode 100644 index 0000000..d5d62de --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/externalsecret.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: dnsimple-api-token + namespace: cert-manager +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: dnsimple-api-token + creationPolicy: Owner + data: + - secretKey: api-token + remoteRef: + key: DNSimple + property: cert-manager + - secretKey: letsencrypt-email + remoteRef: + key: DNSimple + property: letsencrypt-email diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/helmrelease.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/helmrelease.yaml new file mode 100644 index 0000000..a2de653 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/helmrelease.yaml @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: dnsimple-issuer + namespace: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager-webhook-dnsimple + version: 0.0.11 + interval: 30m + sourceRef: + kind: HelmRepository + name: jahanson + namespace: flux-system + + values: + controller: + annotations: + reloader.stakater.com/auto: "true" + dnsimple: + token: + valueFrom: + secretKeyRef: + name: dnsimple-api-token + key: api-token + clusterIssuer: + email: + valueFrom: + secretKeyRef: + name: dnsimple-api-token + key: letsencrypt-email + containerport: 8443 diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-prod.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-prod.yaml new file mode 100644 index 0000000..16d5003 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-prod.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/clusterissuer_v1.json +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-dnsimple-production +spec: + acme: + email: "joe@veri.dev" + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-dnsimple-production + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - dns01: + webhook: + config: + tokenSecretRef: + key: api-token + name: dnsimple-api-token + solverName: dnsimple + groupName: acme.jahanson.com diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-staging.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-staging.yaml new file mode 100644 index 0000000..da67735 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/dnsimple/issuer-letsencrypt-staging.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/clusterissuer_v1.json +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-staging + server: https://acme-staging-v02.api.letsencrypt.org/directory + solvers: + - dns01: + webhook: + config: + tokenSecretRef: + key: api-token + name: dnsimple-api-token + solverName: dnsimple + groupName: acme.jahanson.com diff --git a/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml new file mode 100644 index 0000000..1e33035 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: cert-manager +resources: + - ./dnsimple/externalsecret.yaml + - ./dnsimple/issuer-letsencrypt-prod.yaml + - ./dnsimple/issuer-letsencrypt-staging.yaml + - ./dnsimple/dnsimple-issuer-rbac.yaml + - ./dnsimple/helmrelease.yaml + - ./cloudflare/externalsecret.yaml + - ./cloudflare/issuer-letsencrypt-prod.yaml + - ./cloudflare/issuer-letsencrypt-staging.yaml diff --git a/kubernetes/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/apps/cert-manager/cert-manager/ks.yaml new file mode 100644 index 0000000..0597f29 --- /dev/null +++ b/kubernetes/apps/cert-manager/cert-manager/ks.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-cert-manager + namespace: flux-system +spec: + interval: 10m + path: "./kubernetes/apps/cert-manager/cert-manager/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-cert-manager-issuers + namespace: flux-system +spec: + interval: 10m + path: "./kubernetes/apps/cert-manager/cert-manager/issuers" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: false + dependsOn: + - name: cluster-apps-cert-manager diff --git a/kubernetes/apps/cert-manager/kustomization.yaml b/kubernetes/apps/cert-manager/kustomization.yaml new file mode 100644 index 0000000..a298983 --- /dev/null +++ b/kubernetes/apps/cert-manager/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cert-manager/ks.yaml diff --git a/kubernetes/apps/cert-manager/namespace.yaml b/kubernetes/apps/cert-manager/namespace.yaml new file mode 100644 index 0000000..ed78835 --- /dev/null +++ b/kubernetes/apps/cert-manager/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/default/jellyfin/app/helmrelease.yaml b/kubernetes/apps/default/jellyfin/app/helmrelease.yaml new file mode 100644 index 0000000..73ae2dd --- /dev/null +++ b/kubernetes/apps/default/jellyfin/app/helmrelease.yaml @@ -0,0 +1,117 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: jellyfin + namespace: default +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.4.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + maxHistory: 2 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + dependsOn: + - name: intel-device-plugins-gpu + namespace: system + values: + controllers: + main: + type: statefulset + annotations: + reloader.stakater.com/auto: "true" + containers: + main: + image: + repository: jellyfin/jellyfin + tag: 10.8.13 + env: + DOTNET_SYSTEM_IO_DISABLEFILELOCKING: "true" + JELLYFIN_FFmpeg__probesize: 50000000 + JELLYFIN_FFmpeg__analyzeduration: 50000000 + JELLYFIN_PublishedServerUrl: jelly.hsn.dev + TZ: America/Chicago + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: &port 8096 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + resources: + requests: + gpu.intel.com/i915: 1 + cpu: 100m + memory: 512Mi + limits: + gpu.intel.com/i915: 1 + memory: 4Gi + pod: + enableServiceLinks: false + nodeSelector: + intel.feature.node.kubernetes.io/gpu: "true" + securityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [44, 105, 10000] + statefulset: + volumeClaimTemplates: + - name: config + accessMode: ReadWriteOnce + size: 50Gi + storageClass: ceph-block + globalMounts: + - path: /config + service: + main: + ports: + http: + port: *port + ingress: + main: + enabled: true + className: hsn-nginx + annotations: + hosts: + - host: &host "jelly.hsn.dev" + paths: + - path: / + service: + name: main + port: http + tls: + - hosts: + - *host + persistence: + transcode: + type: emptyDir + globalMounts: + - path: /transcode + media: + existingClaim: media + globalMounts: + - path: /media diff --git a/kubernetes/apps/default/jellyfin/app/kustomization.yaml b/kubernetes/apps/default/jellyfin/app/kustomization.yaml new file mode 100644 index 0000000..4891eb4 --- /dev/null +++ b/kubernetes/apps/default/jellyfin/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./helmrelease.yaml + - ./pvc.yaml diff --git a/kubernetes/apps/default/jellyfin/app/pvc.yaml b/kubernetes/apps/default/jellyfin/app/pvc.yaml new file mode 100644 index 0000000..e7f77be --- /dev/null +++ b/kubernetes/apps/default/jellyfin/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: media + namespace: default +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 20Gi + + storageClassName: ceph-block diff --git a/kubernetes/apps/default/jellyfin/ks.yaml b/kubernetes/apps/default/jellyfin/ks.yaml new file mode 100644 index 0000000..d8a2a27 --- /dev/null +++ b/kubernetes/apps/default/jellyfin/ks.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-jellyfin + namespace: flux-system +spec: + dependsOn: + - name: cluster-apps-external-secrets-stores + path: ./kubernetes/apps/default/jellyfin/app + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/default/kustomization.yaml b/kubernetes/apps/default/kustomization.yaml new file mode 100644 index 0000000..7f75aaf --- /dev/null +++ b/kubernetes/apps/default/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Flux-Kustomizations + - ./jellyfin/ks.yaml diff --git a/kubernetes/apps/default/namespace.yaml b/kubernetes/apps/default/namespace.yaml new file mode 100644 index 0000000..f659b05 --- /dev/null +++ b/kubernetes/apps/default/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: default + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/default/rocky-nessa.yaml b/kubernetes/apps/default/rocky-nessa.yaml new file mode 100644 index 0000000..d48a8a3 --- /dev/null +++ b/kubernetes/apps/default/rocky-nessa.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: rocky-nessa + namespace: default +spec: + nodeName: nessa + containers: + - name: rocky + image: rockylinux:9 + securityContext: + privileged: true + command: ["/bin/bash", "-c", "while true; do sleep 10; done"] + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 4000m + memory: 4000Mi diff --git a/kubernetes/apps/default/rocky-nienna.yaml b/kubernetes/apps/default/rocky-nienna.yaml new file mode 100644 index 0000000..d9ab416 --- /dev/null +++ b/kubernetes/apps/default/rocky-nienna.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: rocky-nienna + namespace: default +spec: + nodeName: nienna + containers: + - name: rocky + image: rockylinux:9 + securityContext: + privileged: true + command: ["/bin/bash", "-c", "while true; do sleep 10; done"] + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 4000m + memory: 4000Mi diff --git a/kubernetes/apps/default/ubuntu.yaml b/kubernetes/apps/default/ubuntu.yaml new file mode 100644 index 0000000..b91eefb --- /dev/null +++ b/kubernetes/apps/default/ubuntu.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ubuntu + namespace: default +spec: + containers: + - name: ubuntu + image: ubuntu:latest + securityContext: + privileged: true + command: ["/bin/bash", "-c", "while true; do sleep 10; done"] + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 4000m + memory: 4000Mi diff --git a/kubernetes/apps/flux-system/add-ons/ks.yaml b/kubernetes/apps/flux-system/add-ons/ks.yaml new file mode 100644 index 0000000..ad9d786 --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/ks.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-flux-webhooks + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: ./kubernetes/apps/flux-system/add-ons/webhooks + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-flux-monitoring + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: ./kubernetes/apps/flux-system/add-ons/monitoring + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true diff --git a/kubernetes/apps/flux-system/add-ons/monitoring/kustomization.yaml b/kubernetes/apps/flux-system/add-ons/monitoring/kustomization.yaml new file mode 100644 index 0000000..7183475 --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/monitoring/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./podmonitor.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/apps/flux-system/add-ons/monitoring/podmonitor.yaml b/kubernetes/apps/flux-system/add-ons/monitoring/podmonitor.yaml new file mode 100644 index 0000000..b3f1a05 --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/monitoring/podmonitor.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/podmonitor_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: flux-system + namespace: flux-system + labels: + app.kubernetes.io/part-of: flux + app.kubernetes.io/component: monitoring +spec: + namespaceSelector: + matchNames: + - flux-system + selector: + matchExpressions: + - key: app + operator: In + values: + - helm-controller + - source-controller + - kustomize-controller + - notification-controller + - image-automation-controller + - image-reflector-controller + podMetricsEndpoints: + - port: http-prom + relabelings: + # https://github.com/prometheus-operator/prometheus-operator/issues/4816 + - sourceLabels: [__meta_kubernetes_pod_phase] + action: keep + regex: Running diff --git a/kubernetes/apps/flux-system/add-ons/monitoring/prometheusrule.yaml b/kubernetes/apps/flux-system/add-ons/monitoring/prometheusrule.yaml new file mode 100644 index 0000000..addd64e --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/monitoring/prometheusrule.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: flux-rules + namespace: flux-system +spec: + groups: + - name: flux.rules + rules: + - alert: FluxComponentAbsent + annotations: + summary: Flux component has disappeared from Prometheus target discovery. + expr: | + absent(up{job=~".*flux-system.*"} == 1) + for: 15m + labels: + severity: critical + - alert: FluxReconciliationFailure + annotations: + summary: >- + {{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation + has been failing for more than 15 minutes. + expr: | + max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + + + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) + by (namespace, name, kind)) * 2 == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/apps/flux-system/add-ons/webhooks/git/externalsecret.yaml b/kubernetes/apps/flux-system/add-ons/webhooks/git/externalsecret.yaml new file mode 100644 index 0000000..06aaadc --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/webhooks/git/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: git-webhook-token + namespace: flux-system +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: git-webhook-token + creationPolicy: Owner + data: + - secretKey: token + remoteRef: + key: flux + property: git_webhook_token diff --git a/kubernetes/apps/flux-system/add-ons/webhooks/git/ingress.yaml b/kubernetes/apps/flux-system/add-ons/webhooks/git/ingress.yaml new file mode 100644 index 0000000..6a035bd --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/webhooks/git/ingress.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: webhook-receiver + namespace: flux-system + annotations: + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" +spec: + ingressClassName: "hsn-nginx" + rules: + - host: &host "flux-receiver-valinor.hsn.dev" + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 + tls: + - hosts: + - *host diff --git a/kubernetes/apps/flux-system/add-ons/webhooks/git/kustomization.yaml b/kubernetes/apps/flux-system/add-ons/webhooks/git/kustomization.yaml new file mode 100644 index 0000000..d4db8ce --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/webhooks/git/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1beta1.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./ingress.yaml + - ./receiver.yaml diff --git a/kubernetes/apps/flux-system/add-ons/webhooks/git/receiver.yaml b/kubernetes/apps/flux-system/add-ons/webhooks/git/receiver.yaml new file mode 100644 index 0000000..6b79854 --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/webhooks/git/receiver.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/receiver-notification-v1.json +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: git-receiver + namespace: flux-system +spec: + type: github + events: + - "ping" + - "push" + secretRef: + name: git-webhook-token + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: "valinor" + namespace: "flux-system" + + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: "cluster" + namespace: "flux-system" + + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: "cluster-apps" + namespace: "flux-system" diff --git a/kubernetes/apps/flux-system/add-ons/webhooks/kustomization.yaml b/kubernetes/apps/flux-system/add-ons/webhooks/kustomization.yaml new file mode 100644 index 0000000..c0a6cd3 --- /dev/null +++ b/kubernetes/apps/flux-system/add-ons/webhooks/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./git diff --git a/kubernetes/apps/flux-system/kustomization.yaml b/kubernetes/apps/flux-system/kustomization.yaml new file mode 100644 index 0000000..937a503 --- /dev/null +++ b/kubernetes/apps/flux-system/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1beta1.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./add-ons/ks.yaml diff --git a/kubernetes/apps/flux-system/namespace.yaml b/kubernetes/apps/flux-system/namespace.yaml new file mode 100644 index 0000000..b48db45 --- /dev/null +++ b/kubernetes/apps/flux-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml new file mode 100644 index 0000000..5e78f50 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml @@ -0,0 +1,75 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: cilium + namespace: kube-system +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.14.5 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + maxHistory: 2 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + cluster: + name: valinor + id: 1 + hubble: + relay: + enabled: true + ui: + enabled: true + metrics: + # enabled: "{dns,drop,tcp,flow,port-distribution,icmp,httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction}" + enableOpenMetrics: true + prometheus: + enabled: true + operator: + prometheus: + enabled: true + ipam: + mode: kubernetes + policyEnforcementMode: always # enforce network policies + policyAuditMode: true # do not block traffic + hostFirewall: + enabled: true # enable host policies + extraConfig: + allow-localhost: policy # enable policies for localhost + + kubeProxyReplacement: true + securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE + k8sServiceHost: ${K8S_SERVICE_ENDPOINT} + k8sServicePort: 6443 + rollOutCiliumPods: true diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/allow-same-ns.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/allow-same-ns.yaml new file mode 100644 index 0000000..d91ced5 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/allow-same-ns.yaml @@ -0,0 +1,9 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-ns-ingress +spec: + podSelector: {} + ingress: + - from: + - podSelector: {} diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/allow-ssh.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/allow-ssh.yaml new file mode 100644 index 0000000..0a295ed --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/allow-ssh.yaml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: allow-ssh +spec: + description: "" + nodeSelector: + matchLabels: + # node-access: ssh + node-role.kubernetes.io/control-plane: "true" + ingress: + - fromEntities: + - cluster + - toPorts: + - ports: + - port: "22" + protocol: TCP + - icmps: + - fields: + - type: 8 + family: IPv4 diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/apiserver.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/apiserver.yaml new file mode 100644 index 0000000..7956dc9 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/apiserver.yaml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: api-server +spec: + nodeSelector: + # apply to master nodes + matchLabels: + node-role.kubernetes.io/control-plane: 'true' + ingress: + # load balancer -> api server + - fromCIDR: + - 167.235.217.82/32 + toPorts: + - ports: + - port: '6443' + protocol: TCP + egress: + # api server -> kubelet + - toEntities: + - remote-node + toPorts: + - ports: + - port: '10250' + protocol: TCP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/cilium-health.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/cilium-health.yaml new file mode 100644 index 0000000..e4c56f8 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/cilium-health.yaml @@ -0,0 +1,41 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: cilium-health +specs: + - endpointSelector: + # apply to health endpoints + matchLabels: + reserved:health: '' + ingress: + # cilium agent -> cilium agent + - fromEntities: + - host + - remote-node + toPorts: + - ports: + - port: '4240' + protocol: TCP + - nodeSelector: + # apply to all nodes + matchLabels: {} + ingress: + # cilium agent -> cilium agent + - fromEntities: + - health + - remote-node + toPorts: + - ports: + - port: '4240' + protocol: TCP + egress: + # cilium agent -> cilium agent + - toEntities: + - health + - remote-node + toPorts: + - ports: + - port: '4240' + protocol: TCP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/cilium-vxlan.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/cilium-vxlan.yaml new file mode 100644 index 0000000..98f0929 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/cilium-vxlan.yaml @@ -0,0 +1,26 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: cilium-vxlan +spec: + nodeSelector: + # apply to all nodes + matchLabels: {} + ingress: + # node -> vxlan + - fromEntities: + - remote-node + toPorts: + - ports: + - port: '8472' + protocol: UDP + egress: + # node -> vxlan + - toEntities: + - remote-node + toPorts: + - ports: + - port: '8472' + protocol: UDP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/core-dns.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/core-dns.yaml new file mode 100644 index 0000000..f31c8b7 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/core-dns.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: core-dns + namespace: kube-system +specs: + - nodeSelector: + # apply to master nodes + matchLabels: + node-role.kubernetes.io/control-plane: 'true' + ingress: + # core dns -> api server + - fromEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: coredns + toPorts: + - ports: + - port: '6443' + protocol: TCP + - nodeSelector: + # apply to all nodes + matchLabels: {} + egress: + # kubelet -> core dns probes + - toEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: coredns + toPorts: + - ports: + - port: '8080' + protocol: TCP + - port: '8181' + protocol: TCP + - endpointSelector: + # apply to core dns pods + matchLabels: + io.cilium.k8s.policy.serviceaccount: coredns + ingress: + # kubelet -> core dns probes + - fromEntities: + - host + toPorts: + - ports: + - port: '8080' + protocol: TCP + - port: '8181' + protocol: TCP + egress: + # core dns -> api server + - toEntities: + - kube-apiserver + toPorts: + - ports: + - port: '6443' + protocol: TCP + # core dns -> upstream DNS + - toCIDR: + - 185.12.64.1/32 + - 185.12.64.2/32 + toPorts: + - ports: + - port: '53' + protocol: UDP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/etcd.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/etcd.yaml new file mode 100644 index 0000000..e239332 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/etcd.yaml @@ -0,0 +1,27 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: etcd +spec: + nodeSelector: + # apply to master nodes + matchLabels: + node-role.kubernetes.io/control-plane: 'true' + ingress: + # etcd peer -> etcd peer + - fromEntities: + - remote-node + toPorts: + - ports: + - port: '2380' + protocol: TCP + egress: + # etcd peer -> etcd peer + - toEntities: + - remote-node + toPorts: + - ports: + - port: '2380' + protocol: TCP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/fix-apiserver.yml b/kubernetes/apps/kube-system/cilium/app/netpols/fix-apiserver.yml new file mode 100644 index 0000000..798ae74 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/fix-apiserver.yml @@ -0,0 +1,15 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: "cilium.io/v2" +kind: CiliumClusterwideNetworkPolicy +metadata: + name: allow-specific-traffic +spec: + endpointSelector: {} + ingress: + - fromEntities: + - host + toPorts: + - ports: + - port: '6443' + protocol: TCP \ No newline at end of file diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/hubble-relay.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/hubble-relay.yaml new file mode 100644 index 0000000..0473f98 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/hubble-relay.yaml @@ -0,0 +1,50 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: hubble-relay + namespace: kube-system +specs: + - nodeSelector: + # apply to all nodes + matchLabels: {} + ingress: + # hubble relay -> hubble agent + - fromEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-relay + toPorts: + - ports: + - port: '4244' + protocol: TCP + egress: + # kubelet -> hubble relay probes + - toEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-relay + toPorts: + - ports: + - port: '4245' + protocol: TCP + - endpointSelector: + # apply to hubble relay pods + matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-relay + ingress: + # kubelet -> hubble relay probes + - fromEntities: + - host + toPorts: + - ports: + - port: '4245' + protocol: TCP + egress: + # hubble relay -> hubble agent + - toEntities: + - host + - remote-node + toPorts: + - ports: + - port: '4244' + protocol: TCP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/hubble-ui.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/hubble-ui.yaml new file mode 100644 index 0000000..c4914d0 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/hubble-ui.yaml @@ -0,0 +1,75 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: hubble-ui + namespace: kube-system +specs: + - nodeSelector: + # apply to master nodes + matchLabels: + node-role.kubernetes.io/control-plane: '' + ingress: + # hubble ui -> api server + - fromEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-ui + toPorts: + - ports: + - port: '6443' + protocol: TCP + - endpointSelector: + # apply to core dns endpoints + matchLabels: + io.cilium.k8s.policy.serviceaccount: coredns + ingress: + # hubble ui -> core dns + - fromEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-ui + toPorts: + - ports: + - port: '53' + protocol: UDP + - endpointSelector: + # apply to hubble relay endpoints + matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-relay + ingress: + # hubble ui -> hubble relay + - fromEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-ui + toPorts: + - ports: + - port: '4245' + protocol: TCP + - endpointSelector: + # apply to hubble ui endpoints + matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-ui + egress: + # hubble ui -> api server + - toEntities: + - kube-apiserver + toPorts: + - ports: + - port: '6443' + protocol: TCP + # hubble ui -> hubble relay + - toEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: hubble-relay + toPorts: + - ports: + - port: '4245' + protocol: TCP + # hubble ui -> core dns + - toEndpoints: + - matchLabels: + io.cilium.k8s.policy.serviceaccount: coredns + toPorts: + - ports: + - port: '53' + protocol: UDP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/kubelet.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/kubelet.yaml new file mode 100644 index 0000000..23d5060 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/kubelet.yaml @@ -0,0 +1,28 @@ +# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json +--- +apiVersion: cilium.io/v2 +kind: CiliumClusterwideNetworkPolicy +metadata: + name: kubelet +spec: + nodeSelector: + # apply to all nodes + matchLabels: {} + ingress: + # api server -> kubelet + - fromEntities: + - kube-apiserver + toPorts: + - ports: + - port: '10250' + protocol: TCP + egress: + # kubelet -> load balancer + - toCIDR: + - 167.235.217.82/32 + toEntities: + - host + toPorts: + - ports: + - port: '6443' + protocol: TCP diff --git a/kubernetes/apps/kube-system/cilium/app/netpols/kustomization.yaml b/kubernetes/apps/kube-system/cilium/app/netpols/kustomization.yaml new file mode 100644 index 0000000..ceec6c3 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/app/netpols/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: + - ./allow-ssh.yaml + - ./apiserver.yaml + - ./cilium-health.yaml + - ./cilium-vxlan.yaml + - ./core-dns.yaml + - ./etcd.yaml + - ./hubble-relay.yaml + - ./hubble-ui.yaml + - ./kubelet.yaml + diff --git a/kubernetes/apps/kube-system/cilium/ks.yaml b/kubernetes/apps/kube-system/cilium/ks.yaml new file mode 100644 index 0000000..e2eadc5 --- /dev/null +++ b/kubernetes/apps/kube-system/cilium/ks.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-cilium + namespace: flux-system +spec: + interval: 30m + retryInterval: 1m + timeout: 5m + path: "./kubernetes/apps/kube-system/cilium/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: false diff --git a/kubernetes/apps/kube-system/kustomization.yaml b/kubernetes/apps/kube-system/kustomization.yaml new file mode 100644 index 0000000..6d5d7fc --- /dev/null +++ b/kubernetes/apps/kube-system/kustomization.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cilium/ks.yaml + - ./hccm/ks.yaml + - ./metrics-server/ks.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml new file mode 100644 index 0000000..5f4472c --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: metrics-server + namespace: kube-system +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.11.0 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-metrics-server + namespace: flux-system + interval: 30m + values: + metrics: + enabled: true + args: + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s diff --git a/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml new file mode 100644 index 0000000..d8365e6 --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kube-system/metrics-server/ks.yaml b/kubernetes/apps/kube-system/metrics-server/ks.yaml new file mode 100644 index 0000000..6c4f7f1 --- /dev/null +++ b/kubernetes/apps/kube-system/metrics-server/ks.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-metrics-server + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/kube-system/metrics-server/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true diff --git a/kubernetes/apps/kube-system/namespace.yaml b/kubernetes/apps/kube-system/namespace.yaml new file mode 100644 index 0000000..5eeb2c9 --- /dev/null +++ b/kubernetes/apps/kube-system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/kyverno/kustomization.yaml b/kubernetes/apps/kyverno/kustomization.yaml new file mode 100644 index 0000000..10b5d06 --- /dev/null +++ b/kubernetes/apps/kyverno/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./kyverno/ks.yaml diff --git a/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml new file mode 100644 index 0000000..970196d --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml @@ -0,0 +1,80 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: kyverno + namespace: kyverno +spec: + interval: 30m + chart: + spec: + chart: kyverno + version: 3.1.3 + sourceRef: + kind: HelmRepository + name: kyverno + namespace: flux-system + maxHistory: 2 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + crds: + install: true + grafana: + enabled: true + annotations: + grafana_folder: System + backgroundController: + serviceMonitor: + enabled: true + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - patch + - delete + - get + - list + cleanupController: + serviceMonitor: + enabled: true + reportsController: + serviceMonitor: + enabled: true + admissionController: + replicas: 3 + serviceMonitor: + enabled: true + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - delete + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: kyverno + app.kubernetes.io/component: kyverno diff --git a/kubernetes/apps/kyverno/kyverno/app/kustomization.yaml b/kubernetes/apps/kyverno/kyverno/app/kustomization.yaml new file mode 100644 index 0000000..82dc325 --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kyverno +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/kyverno/kyverno/ks.yaml b/kubernetes/apps/kyverno/kyverno/ks.yaml new file mode 100644 index 0000000..471f56b --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/ks.yaml @@ -0,0 +1,36 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-kyverno + namespace: flux-system +spec: + path: ./kubernetes/apps/kyverno/kyverno/app + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-cluster-policies + namespace: flux-system +spec: + dependsOn: + - name: cluster-apps-kyverno + path: ./kubernetes/apps/kyverno/kyverno/policies + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml new file mode 100644 index 0000000..5e3e3fa --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./remove-cpu-limits.yaml diff --git a/kubernetes/apps/kyverno/kyverno/policies/remove-cpu-limits.yaml b/kubernetes/apps/kyverno/kyverno/policies/remove-cpu-limits.yaml new file mode 100644 index 0000000..6b48726 --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/policies/remove-cpu-limits.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: remove-cpu-limit + annotations: + policies.kyverno.io/title: Remove CPU limits + policies.kyverno.io/category: Best Practices + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy removes CPU limits from all Pods. + pod-policies.kyverno.io/autogen-controllers: none +spec: + generateExistingOnPolicyUpdate: true + rules: + - name: remove-containers-cpu-limits + match: + any: + - resources: + kinds: ["Pod"] + mutate: + foreach: + - list: "request.object.spec.containers" + patchesJson6902: |- + - path: /spec/containers/{{elementIndex}}/resources/limits/cpu + op: remove + - name: delete-initcontainers-cpu-limits + match: + any: + - resources: + kinds: ["Pod"] + preconditions: + all: + - key: "{{ request.object.spec.initContainers[] || `[]` | length(@) }}" + operator: GreaterThanOrEquals + value: 1 + mutate: + foreach: + - list: "request.object.spec.initContainers" + patchesJson6902: |- + - path: /spec/initContainers/{{elementIndex}}/resources/limits/cpu + op: remove diff --git a/kubernetes/apps/kyverno/namespace.yaml b/kubernetes/apps/kyverno/namespace.yaml new file mode 100644 index 0000000..263304d --- /dev/null +++ b/kubernetes/apps/kyverno/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kyverno + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/network/echo-server/app/helmrelease.yaml b/kubernetes/apps/network/echo-server/app/helmrelease.yaml new file mode 100644 index 0000000..1950ffb --- /dev/null +++ b/kubernetes/apps/network/echo-server/app/helmrelease.yaml @@ -0,0 +1,61 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: echo-server + namespace: network +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.4.0 + interval: 30m + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + + values: + controllers: + main: + containers: + main: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: "31" + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: "true" + LOG_IGNORE_PATH: "/healthz" + + service: + main: + ports: + http: + port: *port + + ingress: + main: + enabled: true + className: hsn-nginx + annotations: + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" + hosts: + - host: &host "esv.hsn.dev" + paths: + - path: / + service: + name: main + port: http + tls: + - hosts: + - *host + + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 128M diff --git a/kubernetes/apps/network/echo-server/app/kustomization.yaml b/kubernetes/apps/network/echo-server/app/kustomization.yaml new file mode 100644 index 0000000..689f842 --- /dev/null +++ b/kubernetes/apps/network/echo-server/app/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./helmrelease.yaml +commonLabels: + app.kubernetes.io/name: echo-server + app.kubernetes.io/instance: echo-server diff --git a/kubernetes/apps/network/echo-server/ks.yaml b/kubernetes/apps/network/echo-server/ks.yaml new file mode 100644 index 0000000..e4bd6a2 --- /dev/null +++ b/kubernetes/apps/network/echo-server/ks.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-echo-server + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/network/echo-server/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true diff --git a/kubernetes/apps/network/external-dns/app/hsn-dev/externalsecret.yaml b/kubernetes/apps/network/external-dns/app/hsn-dev/externalsecret.yaml new file mode 100644 index 0000000..70873a3 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/hsn-dev/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: externaldns-hsn-dev-secrets + namespace: cert-manager +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: externaldns-hsn-dev-secrets + creationPolicy: Owner + data: + - secretKey: cloudflare_api_token + remoteRef: + key: Cloudflare + property: hsn_api_token diff --git a/kubernetes/apps/network/external-dns/app/hsn-dev/helmrelease.yaml b/kubernetes/apps/network/external-dns/app/hsn-dev/helmrelease.yaml new file mode 100644 index 0000000..ecbcfc3 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/hsn-dev/helmrelease.yaml @@ -0,0 +1,69 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: &name externaldns-hsn-dev + namespace: network +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.13.1 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-external-dns + namespace: flux-system + interval: 30m + + values: + fullnameOverride: *name + + domainFilters: + - hsn.dev + + env: + - name: CF_API_TOKEN + valueFrom: + secretKeyRef: + name: externaldns-hsn-dev-secrets + key: cloudflare_api_token + + extraArgs: + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --ingress-class=hsn-nginx + + podAnnotations: + secret.reloader.stakater.com/reload: externaldns-hsn-dev-secrets + + policy: sync + provider: cloudflare + + resources: + requests: + cpu: 5m + memory: 100Mi + limits: + memory: 100Mi + + serviceMonitor: + enabled: true + + sources: + - ingress + - crd + + txtPrefix: "k8s." + + postRenderers: + - kustomize: + patches: + - target: + version: v1 + kind: Deployment + name: *name + patch: | + - op: add + path: /spec/template/spec/enableServiceLinks + value: false diff --git a/kubernetes/apps/network/external-dns/app/hsn-dev/kustomization.yaml b/kubernetes/apps/network/external-dns/app/hsn-dev/kustomization.yaml new file mode 100644 index 0000000..c5f31b8 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/hsn-dev/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./helmrelease.yaml + - ./externalsecret.yaml diff --git a/kubernetes/apps/network/external-dns/app/shared/dns_endpoint-crd.yaml b/kubernetes/apps/network/external-dns/app/shared/dns_endpoint-crd.yaml new file mode 100644 index 0000000..2e0e45c --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/shared/dns_endpoint-crd.yaml @@ -0,0 +1,93 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007" + creationTimestamp: null + name: dnsendpoints.externaldns.k8s.io +spec: + group: externaldns.k8s.io + names: + kind: DNSEndpoint + listKind: DNSEndpointList + plural: dnsendpoints + singular: dnsendpoint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: DNSEndpointSpec defines the desired state of DNSEndpoint + properties: + endpoints: + items: + description: Endpoint is a high-level way of a connection between a service and an IP + properties: + dnsName: + description: The hostname of the DNS record + type: string + labels: + additionalProperties: + type: string + description: Labels stores labels defined for the Endpoint + type: object + providerSpecific: + description: ProviderSpecific stores provider specific config + items: + description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers + properties: + name: + type: string + value: + type: string + type: object + type: array + recordTTL: + description: TTL for the record + format: int64 + type: integer + recordType: + description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc + type: string + setIdentifier: + description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple') + type: string + targets: + description: The targets the DNS record points to + items: + type: string + type: array + type: object + type: array + type: object + status: + description: DNSEndpointStatus defines the observed state of DNSEndpoint + properties: + observedGeneration: + description: The generation observed by the external-dns controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/kubernetes/apps/network/external-dns/app/shared/kustomization.yaml b/kubernetes/apps/network/external-dns/app/shared/kustomization.yaml new file mode 100644 index 0000000..56ba502 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/shared/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./dns_endpoint-crd.yaml diff --git a/kubernetes/apps/network/external-dns/app/valinor-social/externalsecret.yaml b/kubernetes/apps/network/external-dns/app/valinor-social/externalsecret.yaml new file mode 100644 index 0000000..4950bfd --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/valinor-social/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: externaldns-valinor-social-secrets + namespace: cert-manager +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: externaldns-valinor-social-secrets + creationPolicy: Owner + data: + - secretKey: dnsimple_api_token + remoteRef: + key: DNSimple + property: external-dns diff --git a/kubernetes/apps/network/external-dns/app/valinor-social/helmrelease.yaml b/kubernetes/apps/network/external-dns/app/valinor-social/helmrelease.yaml new file mode 100644 index 0000000..2c8f311 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/valinor-social/helmrelease.yaml @@ -0,0 +1,70 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: &name externaldns-valinor-social + namespace: network +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.13.1 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-external-dns + namespace: flux-system + interval: 30m + + values: + fullnameOverride: *name + + domainFilters: + - valinor.social + + env: + - name: DNSIMPLE_OAUTH + valueFrom: + secretKeyRef: + name: externaldns-valinor-social-secrets + key: dnsimple_api_token + + serviceMonitor: + enabled: true + + extraArgs: + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --annotation-filter=external-dns.alpha.kubernetes.io/target + + podAnnotations: + secret.reloader.stakater.com/reload: externaldns-valinor-social-secrets + + policy: sync + provider: dnsimple + + resources: + requests: + cpu: 5m + memory: 100Mi + limits: + memory: 100Mi + + sources: + - ingress + - crd + + txtPrefix: "k8s." + + postRenderers: + - kustomize: + patches: + - target: + version: v1 + kind: Deployment + name: *name + patch: | + - op: add + path: /spec/template/spec/enableServiceLinks + value: false diff --git a/kubernetes/apps/network/external-dns/app/valinor-social/kustomization.yaml b/kubernetes/apps/network/external-dns/app/valinor-social/kustomization.yaml new file mode 100644 index 0000000..c5f31b8 --- /dev/null +++ b/kubernetes/apps/network/external-dns/app/valinor-social/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./helmrelease.yaml + - ./externalsecret.yaml diff --git a/kubernetes/apps/network/external-dns/ks.yaml b/kubernetes/apps/network/external-dns/ks.yaml new file mode 100644 index 0000000..2557fce --- /dev/null +++ b/kubernetes/apps/network/external-dns/ks.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-externaldns-hsn-dev + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/network/external-dns/app/hsn-dev" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-external-secrets-stores +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-externaldns-valinor-social + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/network/external-dns/app/valinor-social" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-external-secrets-stores +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-externaldns-shared + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/network/external-dns/app/shared" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-external-secrets-stores diff --git a/kubernetes/apps/network/ingress-nginx/app/certificate.yaml b/kubernetes/apps/network/ingress-nginx/app/certificate.yaml new file mode 100644 index 0000000..c44a471 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/app/certificate.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "hsn-dev" + namespace: network +spec: + secretName: "hsn-dev-tls" + issuerRef: + name: letsencrypt-cloudflare-production + kind: ClusterIssuer + commonName: "hsn.dev" + dnsNames: + - "hsn.dev" + - "*.hsn.dev" diff --git a/kubernetes/apps/network/ingress-nginx/app/externalsecret.yaml b/kubernetes/apps/network/ingress-nginx/app/externalsecret.yaml new file mode 100644 index 0000000..b09b881 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/app/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: nginx-ingress-secrets + namespace: network +spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: nginx-ingress-secrets + creationPolicy: Owner + data: + - secretKey: nginx-ingress-bouncer-apikey + remoteRef: + key: Crowdsec + property: nginx-ingress-bouncer diff --git a/kubernetes/apps/network/ingress-nginx/app/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/app/helmrelease.yaml new file mode 100644 index 0000000..2894e94 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/app/helmrelease.yaml @@ -0,0 +1,99 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: ingress-nginx-hsn +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.9.0 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + interval: 30m + values: + controller: + replicaCount: 2 + updateStrategy: + type: RollingUpdate + allowSnippetAnnotations: true + enableAnnotationValidations: true + service: + enabled: true + type: LoadBalancer + annotations: + load-balancer.hetzner.cloud/location: fsn1 + load-balancer.hetzner.cloud/protocol: tcp + load-balancer.hetzner.cloud/name: hsn-nginx + load-balancer.hetzner.cloud/uses-proxyprotocol: true + + publishService: + enabled: true + + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: network + namespaceSelector: + any: true + + ingressClassResource: + name: hsn-nginx + default: true + + config: + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-header-timeout: 120 + client-body-buffer-size: "100M" + client-body-timeout: 120 + enable-brotli: "true" + enable-ocsp: "true" + enable-real-ip: "true" + use-proxy-protocol: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: "31449600" + keep-alive: 120 + keep-alive-requests: 10000 + proxy-body-size: 0 + proxy-buffer-size: "16k" + ssl-protocols: "TLSv1.3 TLSv1.2" + use-forwarded-headers: "true" + + extraArgs: + default-ssl-certificate: "network/hsn-dev-tls" + + topologySpreadConstraints: + - maxSkew: 2 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: ingress-nginx-hsn + app.kubernetes.io/component: controller + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - controller + - key: app.kubernetes.io/instance + operator: In + values: + - ingress-nginx-hsn + topologyKey: kubernetes.io/hostname + + resources: + requests: + cpu: 23m + memory: 381M + + defaultBackend: + enabled: false diff --git a/kubernetes/apps/network/ingress-nginx/app/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/app/kustomization.yaml new file mode 100644 index 0000000..c9f7d6c --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./helmrelease.yaml + - ./certificate.yaml + - ./externalsecret.yaml diff --git a/kubernetes/apps/network/ingress-nginx/ks.yaml b/kubernetes/apps/network/ingress-nginx/ks.yaml new file mode 100644 index 0000000..6c40e13 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/ks.yaml @@ -0,0 +1,38 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-ingress-nginx + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/network/ingress-nginx/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-cert-manager-issuers +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +# apiVersion: kustomize.toolkit.fluxcd.io/v1 +# kind: Kustomization +# metadata: +# name: cluster-apps-ingress-nginx-peertube +# namespace: flux-system +# labels: +# substitution.flux.home.arpa/enabled: "true" +# spec: +# interval: 10m +# path: "./kubernetes/apps/network/ingress-nginx/peertube" +# prune: true +# sourceRef: +# kind: GitRepository +# name: valinor +# wait: true +# dependsOn: +# - name: cluster-apps-cert-manager-issuers diff --git a/kubernetes/apps/network/ingress-nginx/mastodon/certificate.yaml b/kubernetes/apps/network/ingress-nginx/mastodon/certificate.yaml new file mode 100644 index 0000000..7346e42 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/mastodon/certificate.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "valinor-social" + namespace: network +spec: + secretName: "valinor-social-tls" + issuerRef: + name: letsencrypt-dnsimple-production + kind: ClusterIssuer + commonName: "valinor.social" + dnsNames: + - "valinor.social" + - "*.valinor.social" diff --git a/kubernetes/apps/network/ingress-nginx/peertube/certificate.yaml b/kubernetes/apps/network/ingress-nginx/peertube/certificate.yaml new file mode 100644 index 0000000..9160026 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/peertube/certificate.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "khazadtube-tv" + namespace: network +spec: + secretName: "khazadtube-tv-tls" + issuerRef: + name: letsencrypt-dnsimple-production + kind: ClusterIssuer + commonName: "khazadtube.tv" + dnsNames: + - "khazadtube.tv" + - "*.khazadtube.tv" diff --git a/kubernetes/apps/network/ingress-nginx/peertube/helmrelease.yaml b/kubernetes/apps/network/ingress-nginx/peertube/helmrelease.yaml new file mode 100644 index 0000000..60b4f51 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/peertube/helmrelease.yaml @@ -0,0 +1,108 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: ingress-nginx-peertube +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.9.0 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + interval: 30m + values: + controller: + replicaCount: 3 + updateStrategy: + type: RollingUpdate + allowSnippetAnnotations: true + enableAnnotationValidations: true + service: + enabled: true + type: LoadBalancer + annotations: + load-balancer.hetzner.cloud/location: fsn1 + load-balancer.hetzner.cloud/protocol: tcp + load-balancer.hetzner.cloud/name: peertube-nginx + load-balancer.hetzner.cloud/use-private-ip: false + load-balancer.hetzner.cloud/uses-proxyprotocol: true + + publishService: + enabled: true + + metrics: + enabled: true + serviceMonitor: + enabled: true + namespace: network + namespaceSelector: + any: true + + ingressClassResource: + name: peertube-nginx + default: false + + config: + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-header-timeout: 120 + client-body-buffer-size: "100M" + client-body-timeout: 120 + enable-brotli: "true" + enable-ocsp: "true" + enable-real-ip: "true" + use-proxy-protocol: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: "31449600" + keep-alive: 120 + keep-alive-requests: 10000 + proxy-body-size: 0 + proxy-buffer-size: "16k" + ssl-protocols: "TLSv1.3 TLSv1.2" + use-forwarded-headers: "true" + server-snippet: | + resolver local=on ipv6=off; + ssl_stapling on; + ssl_stapling_verify on; + ssl-echd-curve: "secp384r1" + ssl-session-timeout: "1d" + ssl-session-cache: "shared:SSL:10m" + ssl-session-tickets: "off" + + extraArgs: + default-ssl-certificate: "network/khazadtube-tv-tls" + + topologySpreadConstraints: + - maxSkew: 2 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: ingress-nginx-peertube + app.kubernetes.io/component: controller + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - controller + - key: app.kubernetes.io/instance + operator: In + values: + - ingress-nginx-peertube + topologyKey: kubernetes.io/hostname + + resources: + requests: + cpu: 23m + memory: 381M + + defaultBackend: + enabled: false diff --git a/kubernetes/apps/network/ingress-nginx/peertube/kustomization.yaml b/kubernetes/apps/network/ingress-nginx/peertube/kustomization.yaml new file mode 100644 index 0000000..dac1ce5 --- /dev/null +++ b/kubernetes/apps/network/ingress-nginx/peertube/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + - ./helmrelease.yaml + - ./certificate.yaml diff --git a/kubernetes/apps/network/kustomization.yaml b/kubernetes/apps/network/kustomization.yaml new file mode 100644 index 0000000..a4de423 --- /dev/null +++ b/kubernetes/apps/network/kustomization.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./echo-server/ks.yaml + - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml diff --git a/kubernetes/apps/network/namespace.yaml b/kubernetes/apps/network/namespace.yaml new file mode 100644 index 0000000..4d78d7b --- /dev/null +++ b/kubernetes/apps/network/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/kubernetes/apps/security/external-secrets/app/helmrelease.yaml b/kubernetes/apps/security/external-secrets/app/helmrelease.yaml new file mode 100644 index 0000000..01fd32f --- /dev/null +++ b/kubernetes/apps/security/external-secrets/app/helmrelease.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: external-secrets + namespace: security +spec: + interval: 30m + chart: + spec: + chart: external-secrets + version: 0.9.11 + interval: 30m + sourceRef: + kind: HelmRepository + name: external-secrets + namespace: flux-system + values: + installCRDs: true + replicaCount: 3 + leaderElect: true + serviceMonitor: + enabled: true + interval: 1m diff --git a/kubernetes/apps/security/external-secrets/app/kustomization.yaml b/kubernetes/apps/security/external-secrets/app/kustomization.yaml new file mode 100644 index 0000000..5a7bd4d --- /dev/null +++ b/kubernetes/apps/security/external-secrets/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: security +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/security/external-secrets/cluster-secrets/kustomization.yaml b/kubernetes/apps/security/external-secrets/cluster-secrets/kustomization.yaml new file mode 100644 index 0000000..1f608cc --- /dev/null +++ b/kubernetes/apps/security/external-secrets/cluster-secrets/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pgo-s3-creds.yaml diff --git a/kubernetes/apps/security/external-secrets/cluster-secrets/pgo-s3-creds.yaml b/kubernetes/apps/security/external-secrets/cluster-secrets/pgo-s3-creds.yaml new file mode 100644 index 0000000..daba91e --- /dev/null +++ b/kubernetes/apps/security/external-secrets/cluster-secrets/pgo-s3-creds.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/clusterexternalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: pgo-s3-creds +spec: + externalSecretName: pgo-s3-creds + + namespaceSelector: + matchLabels: + pgo-enabled-hsn.dev: "true" + + refreshTime: "1m" + + externalSecretSpec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + + target: + name: pgo-s3-creds + creationPolicy: Owner + template: + engineVersion: v2 + data: + s3.conf: | + [global] + repo1-s3-key={{ .pgo_crunchy_postgres_access_key }} + repo1-s3-key-secret={{ .pgo_crunchy_postgres_secret_key }} + + dataFrom: + - extract: + key: pgo-s3-creds + rewrite: + - regexp: + source: "[-]" + target: "_" + - regexp: + source: "(.*)" + target: "pgo_$1" diff --git a/kubernetes/apps/security/external-secrets/ks.yaml b/kubernetes/apps/security/external-secrets/ks.yaml new file mode 100644 index 0000000..a954601 --- /dev/null +++ b/kubernetes/apps/security/external-secrets/ks.yaml @@ -0,0 +1,50 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-external-secrets + namespace: flux-system +spec: + interval: 10m + path: "./kubernetes/apps/security/external-secrets/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-external-secrets-stores + namespace: flux-system +spec: + interval: 10m + path: "./kubernetes/apps/security/external-secrets/stores" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-external-secrets +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-external-secrets-cluster-secrets + namespace: flux-system +spec: + interval: 10m + path: "./kubernetes/apps/security/external-secrets/cluster-secrets" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + dependsOn: + - name: cluster-apps-external-secrets + - name: cluster-apps-external-secrets-stores diff --git a/kubernetes/apps/security/external-secrets/stores/kustomization.yaml b/kubernetes/apps/security/external-secrets/stores/kustomization.yaml new file mode 100644 index 0000000..eb23e28 --- /dev/null +++ b/kubernetes/apps/security/external-secrets/stores/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./onepassword diff --git a/kubernetes/apps/security/external-secrets/stores/onepassword/clustersecretstore.yaml b/kubernetes/apps/security/external-secrets/stores/onepassword/clustersecretstore.yaml new file mode 100644 index 0000000..a669ffe --- /dev/null +++ b/kubernetes/apps/security/external-secrets/stores/onepassword/clustersecretstore.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/clustersecretstore_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: onepassword-connect + namespace: security +spec: + provider: + onepassword: + connectHost: http://onepassword-connect:8080 + vaults: + valinor: 1 + auth: + secretRef: + connectTokenSecretRef: + name: onepassword-connect-token + key: token + namespace: security diff --git a/kubernetes/apps/security/external-secrets/stores/onepassword/helmrelease.yaml b/kubernetes/apps/security/external-secrets/stores/onepassword/helmrelease.yaml new file mode 100644 index 0000000..515a64b --- /dev/null +++ b/kubernetes/apps/security/external-secrets/stores/onepassword/helmrelease.yaml @@ -0,0 +1,142 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: onepassword-connect + namespace: security +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.4.0 + interval: 30m + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + + values: + controllers: + main: + annotations: + reloader.stakater.com/auto: "true" + containers: + main: + image: + repository: docker.io/1password/connect-api + tag: 1.7.2 + env: + OP_BUS_PORT: "11220" + OP_BUS_PEERS: "localhost:11221" + OP_HTTP_PORT: &port-connect 8080 + OP_SESSION: + valueFrom: + secretKeyRef: + name: onepassword-connect-secret + key: onepassword-credentials.json + probes: + liveness: + enabled: true + custom: true + spec: + httpGet: + path: /heartbeat + port: *port-connect + initialDelaySeconds: 15 + periodSeconds: 30 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port-connect + initialDelaySeconds: 15 + startup: + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port-connect + failureThreshold: 30 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + sync: + image: + repository: docker.io/1password/connect-sync + tag: 1.7.2 + env: + - name: OP_SESSION + valueFrom: + secretKeyRef: + name: onepassword-connect-secret + key: onepassword-credentials.json + - name: OP_HTTP_PORT + value: &port-sync 8081 + - name: OP_BUS_PORT + value: "11221" + - name: OP_BUS_PEERS + value: "localhost:11220" + probes: + readinessProbe: + httpGet: + path: /health + port: *port-sync + initialDelaySeconds: 15 + livenessProbe: + httpGet: + path: /heartbeat + port: *port-sync + failureThreshold: 3 + periodSeconds: 30 + initialDelaySeconds: 15 + volumeMounts: + - name: shared + mountPath: /home/opuser/.op/data + + service: + main: + ports: + http: + port: *port-connect + + ingress: + main: + classname: "nginx" + annotations: + nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" + hosts: + - host: &host "1pwconnect.hsn.dev" + paths: + - path: / + service: + name: main + port: http + + tls: + - hosts: + - *host + + defaultPodOptions: + securityContext: + runAsUser: 999 + runAsGroup: 999 + + persistence: + shared: + enabled: true + type: emptyDir + globalMounts: + - path: /home/opuser/.op/data + + resources: + requests: + cpu: 5m + memory: 10Mi + limits: + memory: 100Mi diff --git a/kubernetes/apps/security/external-secrets/stores/onepassword/kustomization.yaml b/kubernetes/apps/security/external-secrets/stores/onepassword/kustomization.yaml new file mode 100644 index 0000000..70fa87d --- /dev/null +++ b/kubernetes/apps/security/external-secrets/stores/onepassword/kustomization.yaml @@ -0,0 +1,15 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: security +resources: + - ./secret.sops.yaml + - ./helmrelease.yaml + - ./clustersecretstore.yaml + +labels: + - pairs: + app.kubernetes.io/name: stores + app.kubernetes.io/instance: onepassword + app.kubernetes.io/part-of: external-secrets diff --git a/kubernetes/apps/security/external-secrets/stores/onepassword/secret.sops.yaml b/kubernetes/apps/security/external-secrets/stores/onepassword/secret.sops.yaml new file mode 100644 index 0000000..cd48415 --- /dev/null +++ b/kubernetes/apps/security/external-secrets/stores/onepassword/secret.sops.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: Secret +metadata: + name: onepassword-connect-token + namespace: security +stringData: + token: ENC[AES256_GCM,data:ks4uBrkQP+oqamaYE60ubJ6XjCdGXPPMaqfUWOhaFBehL0F2jgXKildwYsEOYtUNbsOmYRXGYaoZpkIqFd8/JlOX6eYoC0ibCdKJHexV4z4KQ/o9fKcGcP2VpfdqCYlJn/Lt716MHJHA70a0kI/whdtK+FAwFHn9ulMpM4EvOTgbK9RkPO5sh6kOjHywnu3Ri7F3bp8/HXrj0ZAsIijlhQFQiNzctXvD1Q/jEaLIXpFPS0yK9SDMMQ2LYwxTu/gde7X5+J76PWE53R9hm31/eDiW9c9I52XRqWVVVXzuVFvP5SEMS5DKjcfb8ZsZB+ahL+80FKnW0tpEGWqjfGUY9aGN4VOLp3q3EFu3YW54vmUq1Eg2f/i4TeT133NCgpuntaGtfkhv5YIJ182v5fo2F5J9FalaJXNS95CjxDHSC08mGZ8XITbi6oIuvjT1R1g3myalZ9WUXcP3BIQY/kkx2zrNbmTlk/mhchkgxQZObosAnbtpUsUrpKjlMzsvODuQg1iqcuMaooJ8yjovbhdsrp7SEafK5cEVfOTmfUuzm2jzxh4R1m2TJuAqPa/SOJ1sPlCXHa1Q10dxT4kkGjWZ9Muhf7SAkB7PDPcBXfX8/0gwwai2X4Gl+3Szsyt+deZOM60bp+XZ9xRjPWcIxl1uhDoG7gykpsKeDE6tf1+PC9B2GESFHFqbPVicZ/cZv/YHXcXJb5YRKPuE8/OWjzC0A4W5CC/IH3UAzBafiAu9UU/hk7ASqIv/k0wgTZboyRmzxiH06ahdj5qMUqd3schknIsuxgM0Hjv3W/sSaW3xAkR+3gfhcCLxc1IhhPRIO0OEeoaEBQD9UMVuRKRwPo+lpVU7n/UePjuDHpjXeBu2u83sLqaJrcSyId6ksJLoayCKch+46PazE/E/1vn054rml4hDA4Zuwgnyu/g=,iv:lerOeNOfahiAJX1WFUxu5aiw51q274Cz2fmiPtqC0go=,tag:o8eDvJXG+l/YB516m6GB7A==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRVjdHNjVDN1JCNEFqa203 + bVN6cStUeWFYUUZHZGhCSFFsemM4TnBkQ0g0CmdFZTBpUVgwMWFPbmZFT01BdUpu + NG1HZURFb0o3T2JwQ3U4YnJoYzhFOUkKLS0tIDhGVnhLRmhSZ3pQbGRvRWs5dWx4 + WWxwbndNQVBOeGRoandWL256Z2s2ZFEKtIKW60qNUBPMS0yWPEkDBMokemihiWQ7 + GqSGjNHDDlkKtd1jyY/qCZGM9t1ZiD9t34wAQVOrn9P/WGJg6X/FsQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2023-08-15T16:15:14Z" + mac: ENC[AES256_GCM,data:YVC+MuYp66Ej8XRpT/fsBPBz3laCjfoXikNzc4C5k4E3QbM68+jKX81sbJDGL0B3TSwcIxTc4e8GTisqVhxdH26y/g+xOK5/n6Y+FulDuMmvIiIqBhmQXlQii+DUcLZocRhwEkKDm344M3pRliSVVHa44JRY4qf3E9wKjQhg9tk=,iv:sBTtgB0QK52EFfIxJzFRvXP5MR4ARSfR8v/pha0rDDI=,tag:7KZI8DC967fFvO83KnXkPQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 +--- +apiVersion: v1 +kind: Secret +metadata: + name: onepassword-connect-secret + namespace: security +stringData: + onepassword-credentials.json: ENC[AES256_GCM,data:Ro43t6FQ22+gIg0VAlcuwBy+jNUNM8Q9HI64InTcDSo0HoF2n5ombH1XrVw8aHYacOMM7WlNNsU5DeecC68HpSo45tOiIQTiXDNQyHDsCkoCEqgUZnEY0ggN2hpycnSmWFn7Kj8HoAnG+5q1aGsKI8PzjTAJYdbF36pmXAm01Ge2IaJAKXaWZ2WA+wUqUXtgr4wtIWgfDacQYMhonnlzzvjFFCkynOhRdCC35jiwJA5bX+OnXnN39ANZufYqlWdhoQHOVgBrrFTX5ooBp3Nvf0Ent3L+zDVvTpmmQm19gxc5jYUGHAlwUEbA/rE/Z6yvMMQhsRxTZtIR/f+ehpgd3OBYCy60ub0dTsbafinOvRJp4xcuhmNX333Li/b+OYOgmKY/vdEUn/2cxbA4Uo/PIRWXmwIaweDShRegHGBYzoCOndfLjfkOwtD7jsShPQ5Vu/aTXQCIFjl2zvcZeR2TT+3Oi3uIiLXlQsOWyU1asQz23PSFeXsiUV8G4P8eomZxiX8/k3D3okkknqvi3GfoRXY49vrewurybLBhBSvRNxYrGouk+Vxy/HtZCwUV7q2TwTlI0eGpBpnbJMGDiuzPV4LFy9SGuroHhKhrNyh2n72IovKLAh//ml+4QVFcUJw7HUP3D7VfYRuohItXi5DDZz4566gNf9a4QkPB/2GHkkcQduDVuDqIFIvxjG7vVpBxi+dbyO6zEt7kI7qo0yLoGsVX7t1laiKD28skgKBOiHyGsMwkjxLtcjjOLfYVM/BgN9BAN5nu9Wqm9HdVM8Rfbjc7U/ts/BtfxRgWNM3qvS7NFkKlYKTtD0JmnzUh7ddgi7HUOczltSl6OgdZcFLEmuQIGdtNeILdHSjp1D3wIT5vgfmKnkhwNqX14K6jLmFaxmtzgYa+65DyWnoYgA1QbvTAPQIfVFvCe2NhwBG5XFNcAKOQbwYdwjmhlKVh+Y5uUwaeTCP9VueUpnrxDzkWOsp33XixkcjCfvrQmY5HQs+sGwr+Be1T+t3ET/8p377CjUf0K3U4PIYvB6NeoD9TU+chYPwpToZoe6vwDEP+GEqhLOBBc6Kmd0VgI1vkShqGtLE/92L5xM+kVJTdUCdLZ9//jFwhfgFFITGhju5UiR4FsPY/IwLTqf+VzXI19aBzrEWN0R88KBk6koL4bORXdd4OEKX36S/Vw9NLGLxQzHxw5mihhBksPdh+kyiDgBgktL2levNF7PV/epdbNRvva/hNKSEaPRCzFV5xlVlMOqiC8wfkMnbatiOX3vGRIhzKjWOMKg3v7MUi6zPRZxs1N4l+V8mddIXsf+BGpGEK4vnlvXQFCP7g97pa6unGv+L9D1RKU4Mp9+AU7Oo82k54oWpgg5DqiCWfnQhXGJLRZabr48Hglp8tdoqKnPCLK+TalofreZxBrtlK3T8qY8g7uUYLTe2nSwU8KKJbgMCtHLtifdrvtyY/S6nN5EUHJpQA2eNqVFQwLDuwJ6crO6ipGbaQ2U36ujhuqJ1+9a8OT4PkJK/aE6/koVRzZe50K+H+rbxCVQEpPM+kG4ClSuT47vxLujEm78Npf+PeIyS36RWICxeNyHhdFtbKDyFriBVi+pEEpXFhtpgBTqmB6MQ7te/L9kglkPOIOjbF4/rnzffvDEWZW5XeKz3vYujZ6vTgosf+rDsqsnyRWU2RHbhPQ6U7aS0ujAiBYdMQY4Mtlz+sa8f/b5nCzNPuv2Y1KdwIzCLLjIcqa/UnuTPNPcI8k6arXFWXS2Mq0R5QYfPKFCStVvHzHITHvZNt8bV1g2j5gYvnJkauu66yzUSDvglJVjjvI55nJQTryUDFtZX+lpiAZxwrsepYcP/EygKdZEdg3f8KbZWNQXxt2njLeJmad7Le5UHBhiGqP9H8eCLbwi0gvGwkGkveQHtCsTswOuY3l9E2WG0R9iamwDSC,iv:9QuqDosuTy7OoTfcSJ2mTYLQY9yTa9krJvvzqA7tH30=,tag:wtN/GsxxKhYgipOz8FqsCw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRVjdHNjVDN1JCNEFqa203 + bVN6cStUeWFYUUZHZGhCSFFsemM4TnBkQ0g0CmdFZTBpUVgwMWFPbmZFT01BdUpu + NG1HZURFb0o3T2JwQ3U4YnJoYzhFOUkKLS0tIDhGVnhLRmhSZ3pQbGRvRWs5dWx4 + WWxwbndNQVBOeGRoandWL256Z2s2ZFEKtIKW60qNUBPMS0yWPEkDBMokemihiWQ7 + GqSGjNHDDlkKtd1jyY/qCZGM9t1ZiD9t34wAQVOrn9P/WGJg6X/FsQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2023-08-15T16:15:14Z" + mac: ENC[AES256_GCM,data:YVC+MuYp66Ej8XRpT/fsBPBz3laCjfoXikNzc4C5k4E3QbM68+jKX81sbJDGL0B3TSwcIxTc4e8GTisqVhxdH26y/g+xOK5/n6Y+FulDuMmvIiIqBhmQXlQii+DUcLZocRhwEkKDm344M3pRliSVVHa44JRY4qf3E9wKjQhg9tk=,iv:sBTtgB0QK52EFfIxJzFRvXP5MR4ARSfR8v/pha0rDDI=,tag:7KZI8DC967fFvO83KnXkPQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/apps/security/kustomization.yaml b/kubernetes/apps/security/kustomization.yaml new file mode 100644 index 0000000..ab51902 --- /dev/null +++ b/kubernetes/apps/security/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./external-secrets/ks.yaml diff --git a/kubernetes/apps/security/namespace.yaml b/kubernetes/apps/security/namespace.yaml new file mode 100644 index 0000000..791198c --- /dev/null +++ b/kubernetes/apps/security/namespace.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: security + labels: + kustomize.toolkit.fluxcd.io/prune: disabled + pgo-enabled-hsn.dev: "true" diff --git a/kubernetes/apps/system/kustomization.yaml b/kubernetes/apps/system/kustomization.yaml new file mode 100644 index 0000000..7d9520b --- /dev/null +++ b/kubernetes/apps/system/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./intel-device-plugins/ks.yaml + - ./node-feature-discovery/ks.yaml + - ./reloader/ks.yaml + - ./snapshot-controller/ks.yaml diff --git a/kubernetes/apps/system/namespace.yaml b/kubernetes/apps/system/namespace.yaml new file mode 100644 index 0000000..210c268 --- /dev/null +++ b/kubernetes/apps/system/namespace.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/kubernetes/apps/system/node-feature-discovery/app/helmrelease.yaml b/kubernetes/apps/system/node-feature-discovery/app/helmrelease.yaml new file mode 100644 index 0000000..6728f02 --- /dev/null +++ b/kubernetes/apps/system/node-feature-discovery/app/helmrelease.yaml @@ -0,0 +1,37 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: node-feature-discovery +spec: + interval: 30m + chart: + spec: + chart: node-feature-discovery + version: 0.15.0 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-nfd + namespace: flux-system + interval: 30m + install: + crds: CreateReplace + upgrade: + crds: CreateReplace + values: + master: + resources: + requests: + cpu: 21m + memory: 150Mi + limits: + memory: 150Mi + + worker: + resources: + requests: + cpu: 5m + memory: 150Mi + limits: + memory: 150Mi diff --git a/kubernetes/apps/system/node-feature-discovery/app/kustomization.yaml b/kubernetes/apps/system/node-feature-discovery/app/kustomization.yaml new file mode 100644 index 0000000..fbba51d --- /dev/null +++ b/kubernetes/apps/system/node-feature-discovery/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/system/node-feature-discovery/ks.yaml b/kubernetes/apps/system/node-feature-discovery/ks.yaml new file mode 100644 index 0000000..39ecff4 --- /dev/null +++ b/kubernetes/apps/system/node-feature-discovery/ks.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &appname node-feature-discovery + namespace: flux-system +spec: + targetNamespace: system + commonMetadata: + labels: + app.kubernetes.io/name: *appname + interval: 10m + path: "./kubernetes/apps/system/node-feature-discovery/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true diff --git a/kubernetes/apps/system/reloader/app/helmrelease.yaml b/kubernetes/apps/system/reloader/app/helmrelease.yaml new file mode 100644 index 0000000..485feac --- /dev/null +++ b/kubernetes/apps/system/reloader/app/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: reloader + namespace: system +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.0.58 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + interval: 30m + values: + reloader: + reloadStrategy: annotations + + deployment: + resources: + requests: + cpu: 15m + memory: 63Mi + limits: + cpu: 45m + memory: 63Mi diff --git a/kubernetes/apps/system/reloader/app/kustomization.yaml b/kubernetes/apps/system/reloader/app/kustomization.yaml new file mode 100644 index 0000000..045ec0c --- /dev/null +++ b/kubernetes/apps/system/reloader/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: system +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/system/reloader/ks.yaml b/kubernetes/apps/system/reloader/ks.yaml new file mode 100644 index 0000000..cf78456 --- /dev/null +++ b/kubernetes/apps/system/reloader/ks.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps-reloader + namespace: flux-system + labels: + substitution.flux.home.arpa/enabled: "true" +spec: + interval: 10m + path: "./kubernetes/apps/system/reloader/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true diff --git a/kubernetes/apps/system/snapshot-controller/app/helmrelease.yaml b/kubernetes/apps/system/snapshot-controller/app/helmrelease.yaml new file mode 100644 index 0000000..3e19789 --- /dev/null +++ b/kubernetes/apps/system/snapshot-controller/app/helmrelease.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: snapshot-controller +spec: + interval: 30m + chart: + spec: + chart: snapshot-controller + version: 2.0.4 + sourceRef: + kind: HelmRepository + name: piraeus + namespace: flux-system + maxHistory: 2 + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + controller: + serviceMonitor: + create: true + webhook: + enabled: false diff --git a/kubernetes/apps/system/snapshot-controller/app/kustomization.yaml b/kubernetes/apps/system/snapshot-controller/app/kustomization.yaml new file mode 100644 index 0000000..fbba51d --- /dev/null +++ b/kubernetes/apps/system/snapshot-controller/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/apps/system/snapshot-controller/app/pki.yaml b/kubernetes/apps/system/snapshot-controller/app/pki.yaml new file mode 100644 index 0000000..a2bb49e --- /dev/null +++ b/kubernetes/apps/system/snapshot-controller/app/pki.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/issuer_v1.json +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: snapshot-controller-webhook-selfsign +spec: + selfSigned: {} +--- +# yaml-language-server: $schema=https://ks.hsn.dev/cert-manager.io/issuer_v1.json +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: snapshot-controller-webhook-ca +spec: + ca: + secretName: snapshot-controller-webhook-ca diff --git a/kubernetes/apps/system/snapshot-controller/ks.yaml b/kubernetes/apps/system/snapshot-controller/ks.yaml new file mode 100644 index 0000000..1fcd60f --- /dev/null +++ b/kubernetes/apps/system/snapshot-controller/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &appname snapshot-controller + namespace: flux-system +spec: + targetNamespace: system + commonMetadata: + labels: + app.kubernetes.io/name: *appname + interval: 10m + path: "./kubernetes/apps/system/snapshot-controller/app" + prune: true + sourceRef: + kind: GitRepository + name: valinor + wait: true + timeout: 2m + dependsOn: + - name: cluster-apps-cert-manager diff --git a/kubernetes/bootstrap/flux/age-key.sops.yaml b/kubernetes/bootstrap/flux/age-key.sops.yaml new file mode 100644 index 0000000..2a2f81d --- /dev/null +++ b/kubernetes/bootstrap/flux/age-key.sops.yaml @@ -0,0 +1,28 @@ +# yamllint disable +apiVersion: v1 +kind: Secret +metadata: + name: sops-age + namespace: flux-system +stringData: + age.agekey: ENC[AES256_GCM,data:DELuczoRtBQW58s5i8Nmb4Hp+XzZ35aiOfwBJDXaqgfQMFY63QXRzBVkTDS0GxFoGt3jvLILJPwde0OHiVrkNEZdDwRr3JZKnTs=,iv:DqAaHlJRT8SUItoceaIQ7smJUcmtTeu51AJt1WM0pKA=,tag:YGbmN4hRhWCCGLPvyDLsnA==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxMURBNzFadmc1ejZ4eStp + czlYTUtWUk52NzlaZ1NJSzU2R3R4VFB4TWtZCmc2SjZ1OVhNYXlXQ21WT1I0ZjU3 + V2RzRU5PUnYzMWlRcy9vTG5JNkIwVncKLS0tIHdjU0VSaVdBQ3A5ZDlybTBiUVB1 + YVE3NVptM1Q2ZjEyZHE3N2ZIaEtlRFUKQZEkNHDnlnZYXqK62SplHa7gEsEIBVNV + 4TYZQzf+fBmlxmDCwDLTNTJZZJfgLjYPfBStvGSx+VbW2HS6PoXMFQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2023-08-15T16:24:05Z" + mac: ENC[AES256_GCM,data:QxME2bUjRTBpPpMR1ZWANlF+EskMRJuyylOiRHcPzWu9Bve1rz+4mkNdlUYzf0gdLi8psRc6ko0Jb6IH9lLZxOkMAh2YYaMrzAf3hMRBytiJKX/nUs9tIJv8Lft21nXibeaT/TcT5YNwNvd3nTZgBJcJ5nYwmU1sTn3/Lay5jrY=,iv:0uVxxRg+Dp8oZ43DnbtEx25rQcJ23Ag13eKfvvXukVk=,tag:/4Ufpkh8DCONTEWy4pc5bw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/bootstrap/flux/git-deploy-key.sops.yaml b/kubernetes/bootstrap/flux/git-deploy-key.sops.yaml new file mode 100644 index 0000000..e7d705b --- /dev/null +++ b/kubernetes/bootstrap/flux/git-deploy-key.sops.yaml @@ -0,0 +1,31 @@ +# yamllint disable +apiVersion: v1 +kind: Secret +metadata: + name: git-deploy-key + namespace: flux-system +stringData: + #ENC[AES256_GCM,data:O1eknYe94FguDRRTE4tIv0yQKVJcqHqrHe510i15Kw==,iv:aa5mj7DH/ZEXtqeG+7s/eThK8SYJDT8WmGtwDng9Zh4=,tag:kPuHF2ObA/8IlPzwsuuEqw==,type:comment] + identity: ENC[AES256_GCM,data:DSBU1rLXKta1xWzwagSPpHvT3f/j8ICa4iTkYkby63sYefFfQt9u0qFAYMQChfR9imjUtLmV4AvwF/c9pnOpyHTTLXdZMgkK/y8/17NAbmolUOFQfFvvU7WarxArOqk5QYon4vvIxI1d0Xtug46h7KXHQjJl3UWeBJ2IUnYWEfsPAoN4fNrAxI5DNtS+ppPcFZbJ6BMHx8rPadOhuKbgLR4KACg8PFKHW9RLvJ4s2Kykv9bYyHxUPaLMdL4UAC55i0+RGydZ/72CHokIXDKmwtQquf1vL5HVup/5aB9FQA4oLe0Pk5v2m001YfrvMaO+1GXodU3V2+VIWVcdjJciQsTk/IbbJbr0IIdLlT53WOKx1FNmb1HODUBAX8/mt1Kv5pNDm/BtZXk9ad0chheBB96GRyDTU+/qUzSh3WYGxGrLX+KTE3W8NCbzieOwVgwrDt5P09i0iBrAbXJzSwuDfF0mENzoNN1uRWgA+cm9FGbR+HjEv0UHHN8I9/BCBRMIeXXZ,iv:hn3PwE5mnIgzJNLw+ruu5/jUqFQOpQTYh2oZUdeOplM=,tag:2qttj/0hdChixM7rzaLr5g==,type:str] + #ENC[AES256_GCM,data:yG8yduTJrEB1oGbSQdLwFyDgjbmkT4fcbkvhMj0oCw3Yi9HvSdygq5Uo/2DQ0t+GRzpVqsedrLvB0yciVWpfEaKewXj6neGmMTcsT/llWbSvXS4dHWGBDL6Y/BXVNhyrYLRu,iv:K4dJKqM+AZE8giMcoBOlb9GDnLDCJSyhpWangKsNXkE=,tag:rfRpq8iv+2rwFRJY6sw19A==,type:comment] + known_hosts: ENC[AES256_GCM,data:Ej+HKpMCe/3FGSh/qd7cud4OyBAWpH9tAxP5lSq50kGdyNUz/pLWde4t8uxN6aLtV8Md0lewMtVQTIl3peQuOnroB+EOUnimJTTLLuno9G4zyzkcZRf1un9UY4Jvs9g1vT26DyE/paqIOvJ3u8pfotI4tyO1so60HpHc4lQoakp5L7VMzlPUY3g6hHlOGmqEMXfg8s07UUC0DRNyzBb5INXmL4YweGluI6ros1t1Rh0UKjfF7U0f/5xgzY80WvvcXmhd/NYuKaqUb5YMT7Gk/dUtD0RpxhH+iUi4WMaRyWcbwt+HvS4cUDrHi9ZypLBLxD0I6oiz8ZZYnbNAbe2vsqxiYOPbE+E+QXGY98B1eP18nVXe5YICMuclgAXV54Kcc+/Fw/25oNcJREgdvbxA0CGYwXooURan33kKAXFaBdGpGinj4mBXciepy+fPpxe3sa9YpY1pAF88lmBHzmBR77XltWebOmsagauI1gG/QEgItUaaMy3aDt1dNmrG1jzjkrXYJfCrmLdDXJ3kgd+ZS/95l+B0WfUralbZtQte1IzsgyCRTMhOv8NJ1okCetmi7i5kD2+hiZ7T90M4UMDczHfBXulq9mqlyqQKx1hzdKLP5CnmF0QLbupau+xOLvn8l8Qpt4pxZ6K/ul9jC6PlKZE2iTmgBb2WV1cXdG6sQfrxBFYKaibG4hv7jmIJAI+hzdFq9M+nQ0olIuU3FI3vQqSC05nzRMo0PH+408xnqAWKmY/43SfXpXzUa6eg1+GpoYZjK/BcWA7Nmcy1Fk1lf49JVu0eS3/dqxq+iBbPK9Zy/CQaaWXMg1wpu9y+13CwDuEYC/luz9SrryuY14T7hHldjSxDh+gp0qD2T2VrJA1f846Q7c2SWLcRSPuvYnRcGIS4gQpLjnNKLA14GqDhmFFA/cjkIkgnuILB08SxulMYFu0wu5s4p/vNYj+9BGj66CDvynyLZjCas0cT4ubfq8A=,iv:j1jftBGnQlln+7gECyaanotig27AzyHWLFOG5KWX53c=,tag:1NyHwqKx6RpruLKuYPYIxQ==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnbkdZamFHbTVoYXpCdGpx + a214aFQvUUxWSW43SHV2QWFzVjJTeTNiSXhrCnF2VmR5eFlpc3JlcGY0R2J3aWdr + aEZSL0gvRzZiYi9ELzZOeVkyRExkM0EKLS0tIGczRVRZY2U3S3F1ZVY2RnJwTWlw + L0s5YXNFUlhmTS9GSkdZNWNJeDlCSm8K8j+Pvu+DUYLjQ27N2dPU8rGXYaZORK4I + n6U4KG2qiRAZn1eVp4t/8/2A5/0UupsrcYyKvXAiMLrpsf9kaq3Xmw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2023-08-21T21:51:49Z" + mac: ENC[AES256_GCM,data:lXLx3E5CrfeVN6/a9WDVie4Mfn0v7pcadSWmiKoge9B5obhgAIVChSG8d8KFPkAN6gCBi1D/O3ukSogAwASZ2q8t4yUes6YsD3t4aZrADw6YVgOjNDeJHMiaXMP6fQ0ze665NEgyGBnIRxDuaTXHpaNXsiqSHr+51rRHi0S6K2g=,iv:I616VwtsUKqqvDfmu2KiY9i2ODaTD0tZZHaYG8DjyZA=,tag:dKFmvDZWMBsfhnuqAyMm+g==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.7.3 diff --git a/kubernetes/bootstrap/flux/kustomization.yaml b/kubernetes/bootstrap/flux/kustomization.yaml new file mode 100644 index 0000000..f4d1834 --- /dev/null +++ b/kubernetes/bootstrap/flux/kustomization.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.2.2 +patches: + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + version: v1 + kind: NetworkPolicy diff --git a/kubernetes/bootstrap/hcloud.sops.yaml b/kubernetes/bootstrap/hcloud.sops.yaml new file mode 100644 index 0000000..27e964d --- /dev/null +++ b/kubernetes/bootstrap/hcloud.sops.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Secret +metadata: + name: hcloud + namespace: kube-system +stringData: + ROBOT_ENABLED: ENC[AES256_GCM,data:tTSnWw==,iv:rSrqYIiQSOv6G0QxSYVU6DtW7b3PT7XNF/1pWx68M1g=,tag:2m6YXewARCcyXTjZGimodQ==,type:str] + token: ENC[AES256_GCM,data:DzLwUiv5JH/S6OBrzgNp0NO5U/7w0Pq2YtQ7uOAfg7Iw90qzGlzc8CqzlQOw0jHv91LzCUgjpeZn9QP93Dgprw==,iv:T6rqz1HmdKATl+8ov5qclhAo/NzHQTIN6eRSiCEyiZU=,tag:39VZ8N96NEXgvXTPQ/vvBA==,type:str] + robot-password: ENC[AES256_GCM,data:OeITzLUpgj03MyQ2n+SYgwykcw==,iv:9ZdbQW4ZAtqmGEiR4KBsziRXMAoHGHcBYXiwjep5H2A=,tag:4eGKJTfn0+NARz1k7j8jXA==,type:str] + robot-user: ENC[AES256_GCM,data:Cy2ilSDCVNaxES0N,iv:fs/fu9OOhNPDwgnw1xV8SPtbzlbDkbynvL4Z5L6aO2o=,tag:n2+BeAx8HLtD4rFbKMdUqw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSaVJMaEQvSGw1Y3h1WXVi + TGFnM1dTaHRaUEtOaVl5anpKazZjbVRpckIwCi9Bc1BueHYvMUljdWRrZFVpQldJ + bkRVMWJIdmdubGJXL2NOeUloV3RXQ0EKLS0tIEZadWZJcytYZW5ZdmtFbGcrUjZN + SGkvdTBIM1hxMTREL1JDT0NCcXo0ckUKW3fJ509OnrgKxLvWHALLvA4Ha91pN+GM + JRdKi8tSlyVEpFgumeOsan3fIrsi9urgqYjMuW5e6ApMZ8/2522MWA== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2023-12-12T18:16:51Z" + mac: ENC[AES256_GCM,data:m3jplww3Pv4UnCIdyJ2DEkA95U5+Ovddk2DhEG7KhVQ/PTtG31UFCHdoBIgHf0ZcYmAYRLeyvUfRmi19I+h0h1eDrlbTwpFSYByunLvJZqk2Dp9WWCyGnoJ2Wh/dzW/pcLRSJCZWPxUGPR48cyZTlzg+iZHm760kbXQmzAE+ZHc=,iv:xxyyd9IaTtd+Te+2T156/c+842GVeOoPEs+IBZibWrk=,tag:EruEq5+6kU+nme9NydF/bg==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/kubernetes/bootstrap/readme.md b/kubernetes/bootstrap/readme.md new file mode 100644 index 0000000..200a0fe --- /dev/null +++ b/kubernetes/bootstrap/readme.md @@ -0,0 +1,28 @@ +# Bootstrap + +## Flux + +### Install Flux + +```sh +kubectl apply --server-side --kustomize ./kubernetes/bootstrap/flux +``` + +### Apply Cluster Configuration + +_These cannot be applied with `kubectl` in the regular fashion due to be encrypted with sops_ + +```sh +sops --decrypt kubernetes/bootstrap/flux/age-key.sops.yaml | kubectl apply -f - +sops --decrypt kubernetes/bootstrap/flux/git-deploy-key.sops.yaml | kubectl apply -f - +sops --decrypt kubernetes/bootstrap/hcloud.sops.yaml | kubectl apply -f - +sops --decrypt kubernetes/flux/vars/cluster-secrets.sops.yaml | kubectl apply -f - +kubectl apply -f kubernetes/flux/vars/cluster-settings.yaml +kubectl apply -k kubernetes/apps/monitoring/kube-prometheus-stack/crds/ +``` + +### Kick off Flux applying this repository + +```sh +kubectl apply --server-side --kustomize ./kubernetes/flux/config +``` diff --git a/kubernetes/flux/cluster-apps.yaml b/kubernetes/flux/cluster-apps.yaml new file mode 100644 index 0000000..63f4801 --- /dev/null +++ b/kubernetes/flux/cluster-apps.yaml @@ -0,0 +1,45 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 10m + path: ./kubernetes/apps + prune: true + sourceRef: + kind: GitRepository + name: valinor + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/kubernetes/flux/config/cluster.yaml b/kubernetes/flux/config/cluster.yaml new file mode 100644 index 0000000..87829e0 --- /dev/null +++ b/kubernetes/flux/config/cluster.yaml @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/gitrepository-source-v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: valinor + namespace: flux-system +spec: + interval: 30m + ref: + branch: main + secretRef: + name: git-deploy-key + url: ssh://git.hsn.dev/jahanson/valinor + ignore: | + # exclude all + /* + # include gitops dirs + !/kubernetes +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: valinor + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + optional: false + - kind: Secret + name: cluster-secrets + optional: false diff --git a/kubernetes/flux/config/flux.yaml b/kubernetes/flux/config/flux.yaml new file mode 100644 index 0000000..6463b2b --- /dev/null +++ b/kubernetes/flux/config/flux.yaml @@ -0,0 +1,125 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/ocirepository-source-v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.2.2 +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + version: v1 + kind: NetworkPolicy + # Increase the number of reconciliations that can be performed in parallel and bump the resources limits + # https://fluxcd.io/flux/cheatsheets/bootstrap/#increase-the-number-of-workers + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=8 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-qps=500 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-burst=1000 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: "(kustomize-controller|helm-controller|source-controller)" + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: not-used + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + memory: 2Gi + target: + kind: Deployment + name: "(kustomize-controller|helm-controller|source-controller)" + # Enable drift detection for HelmReleases and set the log level to debug + # https://fluxcd.io/flux/components/helm/helmreleases/#drift-detection + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=DetectDrift=true,CorrectDrift=false + - op: add + path: /spec/template/spec/containers/0/args/- + value: --log-level=debug + target: + kind: Deployment + name: helm-controller + # Enable Helm near OOM detection + # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller + # Enable notifications for 3rd party Flux controllers such as tf-controller + # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-notifications-for-third-party-controllers + - patch: | + - op: add + path: /spec/versions/1/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/- + value: Terraform + target: + kind: CustomResourceDefinition + name: alerts.notification.toolkit.fluxcd.io + - patch: | + - op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/resources/items/properties/kind/enum/- + value: Terraform + target: + kind: CustomResourceDefinition + name: receivers.notification.toolkit.fluxcd.io + - patch: | + - op: add + path: /rules/- + value: + apiGroups: ["infra.contrib.fluxcd.io"] + resources: ["*"] + verbs: ["*"] + target: + kind: ClusterRole + name: crd-controller-flux-system diff --git a/kubernetes/flux/config/kustomization.yaml b/kubernetes/flux/config/kustomization.yaml new file mode 100644 index 0000000..cdf235b --- /dev/null +++ b/kubernetes/flux/config/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/kubernetes/flux/repositories/helm/authentik.yaml b/kubernetes/flux/repositories/helm/authentik.yaml new file mode 100644 index 0000000..caa4a7b --- /dev/null +++ b/kubernetes/flux/repositories/helm/authentik.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrepository-source-v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: authentik + namespace: flux-system +spec: + interval: 1h + url: https://charts.goauthentik.io diff --git a/kubernetes/flux/repositories/helm/bitnami.yaml b/kubernetes/flux/repositories/helm/bitnami.yaml new file mode 100644 index 0000000..fb7cde5 --- /dev/null +++ b/kubernetes/flux/repositories/helm/bitnami.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bitnami + namespace: flux-system +spec: + interval: 30m + url: https://charts.bitnami.com/bitnami + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/bjw-s.yaml b/kubernetes/flux/repositories/helm/bjw-s.yaml new file mode 100644 index 0000000..bdded62 --- /dev/null +++ b/kubernetes/flux/repositories/helm/bjw-s.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + interval: 30m + url: https://bjw-s.github.io/helm-charts/ + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/cilium.yaml b/kubernetes/flux/repositories/helm/cilium.yaml new file mode 100644 index 0000000..f5610f6 --- /dev/null +++ b/kubernetes/flux/repositories/helm/cilium.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 30m + url: https://helm.cilium.io + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/cloudnative-pg.yaml b/kubernetes/flux/repositories/helm/cloudnative-pg.yaml new file mode 100644 index 0000000..3711025 --- /dev/null +++ b/kubernetes/flux/repositories/helm/cloudnative-pg.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: cloudnative-pg + namespace: flux-system +spec: + interval: 2h + url: https://cloudnative-pg.github.io/charts diff --git a/kubernetes/flux/repositories/helm/crowdsec.yaml b/kubernetes/flux/repositories/helm/crowdsec.yaml new file mode 100644 index 0000000..7457b55 --- /dev/null +++ b/kubernetes/flux/repositories/helm/crowdsec.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: crowdsec + namespace: flux-system +spec: + interval: 30m + url: https://crowdsecurity.github.io/helm-charts + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/crunchydata.yaml b/kubernetes/flux/repositories/helm/crunchydata.yaml new file mode 100644 index 0000000..54a79d7 --- /dev/null +++ b/kubernetes/flux/repositories/helm/crunchydata.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: crunchydata + namespace: flux-system +spec: + type: oci + interval: 30m + url: oci://registry.developers.crunchydata.com/crunchydata + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/democratic-csi.yaml b/kubernetes/flux/repositories/helm/democratic-csi.yaml new file mode 100644 index 0000000..94f6e8f --- /dev/null +++ b/kubernetes/flux/repositories/helm/democratic-csi.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrepository-source-v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: democratic-csi + namespace: flux-system +spec: + interval: 30m + url: https://democratic-csi.github.io/charts/ + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/dragonflydb.yaml b/kubernetes/flux/repositories/helm/dragonflydb.yaml new file mode 100644 index 0000000..1864e86 --- /dev/null +++ b/kubernetes/flux/repositories/helm/dragonflydb.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: dragonflydb + namespace: flux-system +spec: + type: oci + interval: 30m + url: oci://ghcr.io/dragonflydb/dragonfly/helm diff --git a/kubernetes/flux/repositories/helm/elastic.yaml b/kubernetes/flux/repositories/helm/elastic.yaml new file mode 100644 index 0000000..0519e69 --- /dev/null +++ b/kubernetes/flux/repositories/helm/elastic.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: elastic + namespace: flux-system +spec: + interval: 30m + url: https://helm.elastic.co + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/external-secrets.yaml b/kubernetes/flux/repositories/helm/external-secrets.yaml new file mode 100644 index 0000000..2263ec9 --- /dev/null +++ b/kubernetes/flux/repositories/helm/external-secrets.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: external-secrets + namespace: flux-system +spec: + interval: 1h + url: https://charts.external-secrets.io diff --git a/kubernetes/flux/repositories/helm/fairwinds.yaml b/kubernetes/flux/repositories/helm/fairwinds.yaml new file mode 100644 index 0000000..d72d577 --- /dev/null +++ b/kubernetes/flux/repositories/helm/fairwinds.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: fairwinds + namespace: flux-system +spec: + interval: 30m + url: https://charts.fairwinds.com/stable + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/grafana.yaml b/kubernetes/flux/repositories/helm/grafana.yaml new file mode 100644 index 0000000..5324848 --- /dev/null +++ b/kubernetes/flux/repositories/helm/grafana.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: grafana + namespace: flux-system +spec: + interval: 30m + url: https://grafana.github.io/helm-charts + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/hetzner.yaml b/kubernetes/flux/repositories/helm/hetzner.yaml new file mode 100644 index 0000000..554be37 --- /dev/null +++ b/kubernetes/flux/repositories/helm/hetzner.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: hetzner + namespace: flux-system +spec: + interval: 30m + url: https://charts.hetzner.cloud + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/flux/repositories/helm/ingress-nginx.yaml new file mode 100644 index 0000000..ca4b65d --- /dev/null +++ b/kubernetes/flux/repositories/helm/ingress-nginx.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes.github.io/ingress-nginx + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/intel.yaml b/kubernetes/flux/repositories/helm/intel.yaml new file mode 100644 index 0000000..8dee3ba --- /dev/null +++ b/kubernetes/flux/repositories/helm/intel.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrepository-source-v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: intel + namespace: flux-system +spec: + interval: 30m + url: https://intel.github.io/helm-charts + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/jahanson.yaml b/kubernetes/flux/repositories/helm/jahanson.yaml new file mode 100644 index 0000000..5a692e1 --- /dev/null +++ b/kubernetes/flux/repositories/helm/jahanson.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: jahanson + namespace: flux-system +spec: + interval: 30m + url: https://jahanson.github.io/cert-manager-webhook-dnsimple/ + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/jetstack.yaml b/kubernetes/flux/repositories/helm/jetstack.yaml new file mode 100644 index 0000000..4a089fc --- /dev/null +++ b/kubernetes/flux/repositories/helm/jetstack.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 30m + url: https://charts.jetstack.io + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/kubernetes-sigs-external-dns.yaml b/kubernetes/flux/repositories/helm/kubernetes-sigs-external-dns.yaml new file mode 100644 index 0000000..54422de --- /dev/null +++ b/kubernetes/flux/repositories/helm/kubernetes-sigs-external-dns.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kubernetes-sigs-external-dns + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/external-dns + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/kubernetes-sigs-metrics-server.yaml b/kubernetes/flux/repositories/helm/kubernetes-sigs-metrics-server.yaml new file mode 100644 index 0000000..737039e --- /dev/null +++ b/kubernetes/flux/repositories/helm/kubernetes-sigs-metrics-server.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kubernetes-sigs-metrics-server + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/metrics-server/ + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/kubernetes-sigs-nfd.yaml b/kubernetes/flux/repositories/helm/kubernetes-sigs-nfd.yaml new file mode 100644 index 0000000..5040aff --- /dev/null +++ b/kubernetes/flux/repositories/helm/kubernetes-sigs-nfd.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrepository-source-v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kubernetes-sigs-nfd + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/node-feature-discovery/charts + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/kustomization.yaml b/kubernetes/flux/repositories/helm/kustomization.yaml new file mode 100644 index 0000000..ed72665 --- /dev/null +++ b/kubernetes/flux/repositories/helm/kustomization.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - authentik.yaml + - bitnami.yaml + - bjw-s.yaml + - cilium.yaml + - cloudnative-pg.yaml + - crowdsec.yaml + - crunchydata.yaml + - democratic-csi.yaml + - dragonflydb.yaml + - elastic.yaml + - external-secrets.yaml + - fairwinds.yaml + - grafana.yaml + - hetzner.yaml + - ingress-nginx.yaml + - intel.yaml + - jahanson.yaml + - jetstack.yaml + - kubernetes-sigs-nfd.yaml +# - kubernetes-sigs-descheduler.yaml + - kyverno.yaml + - kubernetes-sigs-metrics-server.yaml + - kubernetes-sigs-external-dns.yaml + - piraeus.yaml + - postfinance.yaml + - prometheus-community.yaml + - rook-ceph.yaml + - stakater.yaml +# - weave-gitops.yaml diff --git a/kubernetes/flux/repositories/helm/kyverno.yaml b/kubernetes/flux/repositories/helm/kyverno.yaml new file mode 100644 index 0000000..a3b88c4 --- /dev/null +++ b/kubernetes/flux/repositories/helm/kyverno.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kyverno + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/kyverno/charts diff --git a/kubernetes/flux/repositories/helm/piraeus.yaml b/kubernetes/flux/repositories/helm/piraeus.yaml new file mode 100644 index 0000000..8ea2897 --- /dev/null +++ b/kubernetes/flux/repositories/helm/piraeus.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: piraeus + namespace: flux-system +spec: + interval: 30m + url: https://piraeus.io/helm-charts/ + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/postfinance.yaml b/kubernetes/flux/repositories/helm/postfinance.yaml new file mode 100644 index 0000000..cdaf666 --- /dev/null +++ b/kubernetes/flux/repositories/helm/postfinance.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 30m + url: https://postfinance.github.io/kubelet-csr-approver + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/prometheus-community.yaml b/kubernetes/flux/repositories/helm/prometheus-community.yaml new file mode 100644 index 0000000..23973fc --- /dev/null +++ b/kubernetes/flux/repositories/helm/prometheus-community.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/kubernetes/flux/repositories/helm/rook-ceph.yaml b/kubernetes/flux/repositories/helm/rook-ceph.yaml new file mode 100644 index 0000000..b8003e6 --- /dev/null +++ b/kubernetes/flux/repositories/helm/rook-ceph.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: rook-ceph + namespace: flux-system +spec: + interval: 30m + url: https://charts.rook.io/release + timeout: 3m diff --git a/kubernetes/flux/repositories/helm/stakater.yaml b/kubernetes/flux/repositories/helm/stakater.yaml new file mode 100644 index 0000000..e8a14f7 --- /dev/null +++ b/kubernetes/flux/repositories/helm/stakater.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://ks.hsn.dev/source.toolkit.fluxcd.io/helmrepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + interval: 30m + url: https://stakater.github.io/stakater-charts + timeout: 3m diff --git a/kubernetes/flux/repositories/kustomization.yaml b/kubernetes/flux/repositories/kustomization.yaml new file mode 100644 index 0000000..219c6e2 --- /dev/null +++ b/kubernetes/flux/repositories/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./helm diff --git a/kubernetes/flux/vars/cluster-secrets.sops.yaml b/kubernetes/flux/vars/cluster-secrets.sops.yaml new file mode 100644 index 0000000..d4d03f5 --- /dev/null +++ b/kubernetes/flux/vars/cluster-secrets.sops.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + SECRET_PUSHOVER_USERKEY: ENC[AES256_GCM,data:MeaD8iRbieNr5W9PqpjZ5ywdbMijX9nYQJbbVj6s,iv:42QymFlr47PYNjorJc5tgDjzZ9WHPVIk543GGChalVM=,tag:qyk1chI/IpPdfyEMdOqsbQ==,type:str] + SECRET_PUSHOVER_ALERT_MANAGER_APIKEY: ENC[AES256_GCM,data:4+9e/tWQBszoPakAo+1vNhWsdKz8qfoioeUz+dTb,iv:sY4dkzMEmvi8kCLesBiknmoYHWq3uqXpWs5Y4FeFSuk=,tag:rPxH+5m6rPiSnhm2JrrT4w==,type:str] + SECRET_HEALTHCHECKS_WEBHOOK: ENC[AES256_GCM,data:a6hjTy2HRy7s2+KHxfop8077CgAzzILCF/g5I9TIXdhRiziUrLpJVzC0mqNmfdooJsZyErrJ9ihamFKLFoK8S/PmD5IgWuZu,iv:l5JTxmiWct5nr7eJM/Rtl7AclhCoIQ4KW6nJK6Slhg0=,tag:K5yGxYBTNSSoxYJt8Kmhyw==,type:str] + SECRET_CLOUDFLARE_ACCOUNT_ID: ENC[AES256_GCM,data:X63a7aMBMyd9Be6bik0knOyMXnYx/Kg3SoOrG0bkAHU=,iv:POcU1kIRWekrzUdzqPopKDovviK+fMZRVuZVWp9Vuuc=,tag:n9UamxITJCiLbH37Ta2lTg==,type:str] + K8S_SERVICE_ENDPOINT: ENC[AES256_GCM,data:mons7ADYFZv+PjnGpAg=,iv:vRkH6yn+nr2azS+kWOCG9rayB/X/02OlmQVhaIsJDkQ=,tag:RyPwMRcWgQV2kKFa6YQtMg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1g786w8t40g9y29l33rfd4jqlwhrgsxsc7ped6uju60k54j0q3enql3kfve + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzNUJOSGgzempjQS9ZQVlo + citDR1Vta2ZHWHJYNElySzA4a1ZIdktQREhFCnZyQlYvYlhRbDlwYVkxZmZJYm5S + TEU0c2R4WkFWZGNEcjYyTHE3MmVLT0kKLS0tIHZwQWNGYks1alNnYVAyOWZsL1J2 + dDhWMDZYait3UzNRZy9oVk85cHBPdEUKa7e22jHlW1chaLDKBB1in8ZTFnfKMXug + QJQ/9z6z/RjmnnFam2FWg++Xg2A8LQ7XTZcfR97csf59DQ/xwu7sVw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-01-02T23:49:24Z" + mac: ENC[AES256_GCM,data:OZzwxpqsXk2tfWmDRjWdmRZaP1pc0HRAuxt1om1Q0yN0R7LTafyRaKdWRdDYi7g76/C8qvSwgT72If5u+M10Q/KKNDy/PavDKn9yMHLkYkdmnXCbyxuWCFqlDoVoOQyPG3H4+ahZkYDnXwzcScR8klTZxdG2n5xO6FJc3PKJFlk=,iv:f2d0J2vG3amQ5UCowNU4U9X+siuWq43uq3nLndoy76A=,tag:ZbfWo82UhiR1AOh93WkpLQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + version: 3.8.1 diff --git a/kubernetes/flux/vars/cluster-settings.yaml b/kubernetes/flux/vars/cluster-settings.yaml new file mode 100644 index 0000000..0379794 --- /dev/null +++ b/kubernetes/flux/vars/cluster-settings.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-settings + namespace: flux-system +data: + CLUSTER_NAME: valinor diff --git a/kubernetes/tools/kbench.yaml b/kubernetes/tools/kbench.yaml new file mode 100644 index 0000000..fa201f8 --- /dev/null +++ b/kubernetes/tools/kbench.yaml @@ -0,0 +1,48 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: kbench-pvc +spec: + storageClassName: ceph-block + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 33Gi +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kbench +spec: + template: + metadata: + labels: + kbench: fio + spec: + containers: + - name: kbench + image: yasker/kbench:latest + imagePullPolicy: Always + env: + # - name: QUICK_MODE # for debugging + # value: "1" + - name: FILE_NAME + value: "/volume/test" + - name: SIZE + value: "30G" # must be 10% smaller than the PVC size due to filesystem also took space + - name: CPU_IDLE_PROF + value: "disabled" # must be "enabled" or "disabled" + volumeMounts: + - name: vol + mountPath: /volume/ + # volumeDevices: + # - name: vol + # devicePath: /volume/test + restartPolicy: Never + volumes: + - name: vol + persistentVolumeClaim: + claimName: kbench-pvc + backoffLimit: 0 diff --git a/kubernetes/tools/wipe-rook-fast.yaml b/kubernetes/tools/wipe-rook-fast.yaml new file mode 100644 index 0000000..84f3e1a --- /dev/null +++ b/kubernetes/tools/wipe-rook-fast.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-nessa +spec: + restartPolicy: Never + nodeName: nessa + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/nvme0n1" + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + blkdiscard $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var + +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-nienna +spec: + restartPolicy: Never + nodeName: nienna + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/nvme0n1" + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + blkdiscard $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var + +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-orome +spec: + restartPolicy: Never + nodeName: orome + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/nvme0n1" + + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + blkdiscard $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var diff --git a/kubernetes/tools/wipe-rook-slow.yaml b/kubernetes/tools/wipe-rook-slow.yaml new file mode 100644 index 0000000..0b9e2ac --- /dev/null +++ b/kubernetes/tools/wipe-rook-slow.yaml @@ -0,0 +1,105 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-slow-nessa +spec: + restartPolicy: Never + nodeName: nessa + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/sda" + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var + +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-slow-nienna +spec: + restartPolicy: Never + nodeName: nienna + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/sda" + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var + +--- +apiVersion: v1 +kind: Pod +metadata: + name: disk-wipe-slow-orome +spec: + restartPolicy: Never + nodeName: orome + containers: + - name: disk-wipe + image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819 + securityContext: + privileged: true + resources: {} + env: + - name: CEPH_DISK + value: "/dev/sda" + + command: + [ + "/bin/sh", + "-c" + ] + args: + - apk add --no-cache sgdisk util-linux parted; + sgdisk --zap-all $CEPH_DISK; + dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; + partprobe $CEPH_DISK; + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + volumes: + - name: host-var + hostPath: + path: /var diff --git a/renovate.json5 b/renovate.json5 new file mode 100644 index 0000000..0d49662 --- /dev/null +++ b/renovate.json5 @@ -0,0 +1,229 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "platform": "gitea", + "endpoint": "https://git.hsn.dev", + "extends": [ + "config:recommended", + "local>jahanson/valinor//.renovate/customManagers.json5" + ], + "repositories": [], + "flux": { + "fileMatch": [ + "kubernetes/.+\\.ya?ml$" + ] + }, + "helm-values": { + "fileMatch": [ + "kubernetes/.+\\.ya?ml$" + ] + }, + "kubernetes": { + "fileMatch": [ + "kubernetes/.+\\.ya?ml$" + ] + }, + "regexManagers": [ + { + "description": [ + "Process CRD dependencies - Chart and Github Release are the same version" + ], + "fileMatch": [ + "kubernetes/.+\\.ya?ml$" + ], + "matchStrings": [ + "# renovate: registryUrl=(?\\S+) chart=(?\\S+)\n.*?(?[^-\\s]*)\n", + ], + "datasourceTemplate": "helm" + }, + { + "description": [ + "Generic Docker image Regex manager" + ], + "fileMatch": [ + "infrastructure/.+\\.ya?ml$", + "infrastructure/.+\\.tf$" + ], + "matchStrings": [ + "# renovate: docker-image( versioning=(?.*=?))?\n .*[:|=] \"?(?.*?):(?[^\"\n]*=?)\"?", + ], + "datasourceTemplate": "docker", + "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" + }, + { + "description": [ + "Raw GitHub URL Regex manager" + ], + "fileMatch": [ + "infrastructure/.+\\.ya?ml$", + "kubernetes/.+\\.ya?ml$" + ], + "matchStrings": [ + "https:\\/\\/raw.githubusercontent.com\\/(?[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?[\\w\\d\\.\\-_]+)\\/.*", + ], + "datasourceTemplate": "github-releases", + "versioningTemplate": "semver" + } + ], + "packageRules": [ + { + "registryAliases": { + "ghcr.io": "hub.hsn.dev/ghcr.io", + "docker.io": "hub.hsn.dev/docker.io" + }, + "description": "Use custom versioning for Vector", + "matchDatasources": [ + "docker" + ], + "matchPackageNames": [ + "docker.io/timberio/vector" + ], + "versioning": "regex:^(?\\d+)\\.(?\\d+)\\.(?\\d+)-(?.*)$" + }, + { + "description": "Use custom versioning for Minio", + "matchDatasources": [ + "docker" + ], + "versioning": "regex:^RELEASE\\.(?\\d+)-(?\\d+)-(?\\d+)T.*Z(-(?.*))?$", + "matchPackageNames": [ + "quay.io/minio/minio" + ] + }, + { + "description": "Flux Group", + "groupName": "Flux", + "matchPackagePatterns": [ + "^flux", + "^ghcr.io/fluxcd/" + ], + "matchDatasources": [ + "docker", + "github-tags" + ], + "versioning": "semver", + "group": { + "commitMessageTopic": "{{{groupName}}} group", + }, + "separateMinorPatch": true, + }, + { + "description": "Mastodon images", + "groupName": "Mastodon", + "matchPackagePatterns": [ + "mastodon", + "^ghcr.io/mastodon/" + ], + "matchDatasources": [ + "docker", + "github-tags" + ], + "versioning": "semver", + "group": { + "commitMessageTopic": "{{{groupName}}} group", + }, + "separateMinorPatch": true, + }, + { + "description": "1Password Connect images", + "groupName": "1password-connect", + "matchPackageNames": [ + "docker.io/1password/connect-sync", + "docker.io/1password/connect-api", + ], + "matchDatasources": [ + "docker" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true, + }, + { + "description": "Rook-Ceph image and chart", + "groupName": "Rook Ceph", + "matchPackagePatterns": [ + "rook.ceph" + ], + "matchDatasources": [ + "docker", + "helm" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true, + }, + { + "description": "Cilium image and chart", + "groupName": "cilium", + "matchPackageNames": [ + "quay.io/cilium/cilium", + "quay.io/cilium/operator-generic", + "cilium", + ], + "matchDatasources": [ + "helm", + "docker" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true, + }, + { + "description": "External Snapshotter charts", + "groupName": "External Snapshotter", + "matchPackageNames": [ + "snapshot-controller", + "snapshot-validation-webhook" + ], + "matchDatasources": [ + "helm" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": "Thanos image and chart - versions do not match", + "groupName": "Thanos", + "matchPackagePatterns": [ + "quay.io/thanos/thanos", + "thanos" + ], + "matchDatasources": [ + "docker", + "github-releases", + "helm" + ], + "matchUpdateTypes": [ + "minor", + "patch" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + }, + { + "description": "Vector image and chart - versions do not match", + "groupName": "Vector", + "matchPackagePatterns": [ + "vector" + ], + "matchDatasources": [ + "docker", + "github-releases", + "helm" + ], + "matchUpdateTypes": [ + "minor", + "patch" + ], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + }, + // Version strategies + ] +}