Compare commits
44 commits
9c7004417e
...
c1b665fac2
Author | SHA1 | Date | |
---|---|---|---|
c1b665fac2 | |||
761690ba5e | |||
cb2e0d24c8 | |||
bc687b28fd | |||
6c6ea27957 | |||
1166d4d687 | |||
ec4f619367 | |||
d870c79589 | |||
96179f13a2 | |||
2c348267c5 | |||
8222c32fe0 | |||
01f3eaa9fa | |||
c51bd020bd | |||
1f87e3c3db | |||
20a2e63b30 | |||
8ed33b3671 | |||
2de07ac885 | |||
f1c79adc59 | |||
97829c6809 | |||
14287e4cad | |||
7a72a530a7 | |||
c282512a8b | |||
0d547458d3 | |||
ca43a3f4b3 | |||
851884bd94 | |||
2a06b673fb | |||
dbb411bc42 | |||
53fc1d997f | |||
270896cbd6 | |||
b64647cdc2 | |||
7f096e8b16 | |||
fd95d435df | |||
191678bc36 | |||
8a369a96f0 | |||
4332d24615 | |||
6a3b358f26 | |||
139260eec1 | |||
af097c7dd3 | |||
3668207a96 | |||
50833f2dde | |||
fddcb0198d | |||
b49ed58d67 | |||
366747cfd1 | |||
5ae9e7a310 |
34 changed files with 249 additions and 185 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -24,3 +24,6 @@ omniconfig.yaml
|
|||
*.pem
|
||||
*.secrets
|
||||
config.xml
|
||||
|
||||
# syncthing
|
||||
**/*sync-conflict*
|
||||
|
|
|
@ -7,7 +7,11 @@
|
|||
"automerge": true,
|
||||
"automergeType": "branch",
|
||||
"matchUpdateTypes": ["digest"],
|
||||
"matchPackagePrefixes": ["ghcr.io/onedr0p", "ghcr.io/bjw-s", "ghcr.io/bjw-s-labs"],
|
||||
"matchPackagePrefixes": [
|
||||
"ghcr.io/onedr0p",
|
||||
"ghcr.io/bjw-s",
|
||||
"ghcr.io/bjw-s-labs"
|
||||
],
|
||||
"ignoreTests": true
|
||||
},
|
||||
{
|
||||
|
|
|
@ -2,135 +2,122 @@
|
|||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||
version: "3"
|
||||
|
||||
vars:
|
||||
RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/talos/resources"
|
||||
CONTROLLER:
|
||||
sh: talosctl --context {{.cluster}} config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
|
||||
cluster: theshire
|
||||
|
||||
tasks:
|
||||
bootstrap:
|
||||
desc: Bootstrap Talos
|
||||
summary: |
|
||||
Args:
|
||||
cluster: Cluster to run command against (default: theshire)
|
||||
controller: Controller node to run command against (required) (IP/DNS)
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
prompt: Bootstrap Talos on the cluster... continue?
|
||||
CONTROLLER: Controller node to run command against (required)
|
||||
prompt: Bootstrap Talos on the '{{.K8S_CLUSTER}}' cluster... continue?
|
||||
cmds:
|
||||
- task: bootstrap-etcd
|
||||
vars: &vars
|
||||
controller: "{{.controller}}"
|
||||
CONTROLLER: "{{.CONTROLER}}"
|
||||
- task: fetch-kubeconfig
|
||||
vars: *vars
|
||||
- task: bootstrap-integrations
|
||||
vars: *vars
|
||||
requires:
|
||||
vars:
|
||||
- controller
|
||||
- K8S_CLUSTER
|
||||
- CONTROLLER
|
||||
|
||||
bootstrap-etcd:
|
||||
desc: Bootstrap Etcd
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
cmd: until talosctl --context $CLUSTER --nodes {{.controller}} bootstrap; do sleep 10; done
|
||||
cmd: until talosctl --nodes {{.CONTROLLER}} bootstrap; do sleep 10; done
|
||||
requires:
|
||||
vars:
|
||||
- controller
|
||||
- CONTROLLER
|
||||
|
||||
bootstrap-integrations:
|
||||
desc: Bootstrap core integrations needed for Talos
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
cmds:
|
||||
- until kubectl --context $CLUSTER wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
|
||||
- helmfile --kube-context $CLUSTER --file {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
|
||||
- until kubectl --context $CLUSTER wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
|
||||
- until kubectl wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
|
||||
- helmfile --kube-context {{.K8S_CLUSTER}} --file {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
|
||||
- until kubectl wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
preconditions:
|
||||
- which helmfile
|
||||
- sh: kubectl config get-contexts $CLUSTER
|
||||
msg: "Kubectl context $CLUSTER not found"
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml
|
||||
- sh: kubectl config get-contexts {{.K8S_CLUSTER}}
|
||||
msg: "Kubectl context {{.K8S_CLUSTER}} not found"
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml
|
||||
|
||||
fetch-kubeconfig:
|
||||
desc: Fetch kubeconfig from Talos controllers
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
env: *vars
|
||||
cmd: |
|
||||
talosctl --context $CLUSTER kubeconfig --nodes {{ .CONTROLLER }} \
|
||||
--force --force-context-name $CLUSTER {{.ROOT_DIR}}/kubeconfig
|
||||
preconditions:
|
||||
- talosctl config get-contexts | grep $CLUSTER
|
||||
talosctl kubeconfig --nodes {{.CONTROLLER}} \
|
||||
--force --force-context-name {{.K8S_CLUSTER}} {{.K8S_CLUSTER_DIR}}
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
|
||||
generate-clusterconfig:
|
||||
desc: Generate clusterconfig for Talos
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
cmds:
|
||||
- talhelper genconfig
|
||||
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
--secret-file {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
--out-dir {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig
|
||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
--secret-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
--out-dir {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
preconditions:
|
||||
- which talhelper
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
|
||||
upgrade:
|
||||
desc: Upgrade Talos version for a node
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
vars:
|
||||
TALOS_VERSION:
|
||||
sh: |
|
||||
yq -r ".talosVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
yq -r ".talosVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
TALOS_IMAGE:
|
||||
sh: |
|
||||
talhelper genurl installer \
|
||||
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml \
|
||||
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml \
|
||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml \
|
||||
| grep {{.NODE}} \
|
||||
| awk '{split($0,u," "); print u[2]}'
|
||||
cmds:
|
||||
- talosctl --context $CLUSTER upgrade -n {{.node}} --image {{.TALOS_IMAGE }}
|
||||
- talosctl upgrade -n {{.NODE}} --image {{.TALOS_IMAGE }}
|
||||
requires:
|
||||
vars:
|
||||
- node
|
||||
- K8S_CLUSTER
|
||||
- NODE
|
||||
preconditions:
|
||||
- which talhelper
|
||||
- talosctl config get-contexts | grep $CLUSTER
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
- msg: "Talos image could not be determined for {{.node}}"
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
- msg: "Talos image could not be determined for node={{.NODE}}"
|
||||
sh: 'test -n "{{.TALOS_IMAGE}}"'
|
||||
|
||||
upgrade-k8s:
|
||||
desc: Upgrade Kubernetes version for a Talos cluster
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
silent: false
|
||||
vars:
|
||||
KUBERNETES_VERSION:
|
||||
sh: |
|
||||
yq -r ".kubernetesVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
CONTROLPLANE_NODE:
|
||||
sh: |
|
||||
talosctl --context $CLUSTER config info \
|
||||
| grep Endpoints: \
|
||||
| awk '{split($0,u," "); print u[2]}' \
|
||||
| sed -E 's/,//'
|
||||
yq -r ".kubernetesVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
TALOS_CONTROLLER:
|
||||
sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
|
||||
cmds:
|
||||
- talosctl upgrade-k8s -n {{.CONTROLPLANE_NODE}} --to {{.KUBERNETES_VERSION}}
|
||||
- until kubectl wait --timeout=5m --for=condition=Complete jobs --all --all-namespaces; do sleep 10; done
|
||||
- talosctl upgrade-k8s -n {{.TALOS_CONTROLLER}} --to {{.KUBERNETES_VERSION}}
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
preconditions:
|
||||
- which talhelper
|
||||
- talosctl config get-contexts | grep $CLUSTER
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||
- msg: "Kubernetes version could not be determined for cluster $CLUSTER"
|
||||
sh: 'test -n "{{.KUBERNETES_VERSION}}"'
|
||||
- msg: "Control plane node could not be determined for cluster $CLUSTER"
|
||||
sh: 'test -n "{{.CONTROLPLANE_NODE}}"'
|
||||
- talosctl config info &>/dev/null
|
||||
- talosctl --nodes {{.TALOS_CONTROLLER}} get machineconfig &>/dev/null
|
||||
|
||||
apply-clusterconfig:
|
||||
desc: Apply clusterconfig for a Talos cluster
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
vars:
|
||||
CLUSTERCONFIG_FILES:
|
||||
sh: find {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
|
||||
sh: find {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
|
||||
cmds:
|
||||
- for:
|
||||
var: CLUSTERCONFIG_FILES
|
||||
|
@ -138,29 +125,24 @@ tasks:
|
|||
vars:
|
||||
filename: "{{.ITEM}}"
|
||||
hostname: |-
|
||||
{{ trimPrefix (printf "%s-" .cluster) .ITEM | trimSuffix ".yaml" }}
|
||||
dry_run: "{{ .dry_run }}"
|
||||
preconditions:
|
||||
- talosctl config get-contexts | grep $CLUSTER
|
||||
- test -d {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig
|
||||
{{ trimPrefix (printf "%s-" .K8S_CLUSTER) .ITEM | trimSuffix ".yaml" }}
|
||||
DRY_RUN: "{{ .DRY_RUN }}"
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
|
||||
_apply-machineconfig:
|
||||
internal: true
|
||||
desc: Apply a single Talos machineConfig to a Talos node
|
||||
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||
cmds:
|
||||
- talosctl --context theshire apply-config
|
||||
- talosctl apply-config
|
||||
--nodes "{{.hostname}}"
|
||||
--file "{{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
|
||||
{{ if eq "true" .dry_run }}--dry-run{{ end }}
|
||||
#--insecure
|
||||
--file "{{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
|
||||
{{ if eq "true" .DRY_RUN }}--dry-run{{ end }}
|
||||
requires:
|
||||
vars:
|
||||
- K8S_CLUSTER
|
||||
- hostname
|
||||
- filename
|
||||
preconditions:
|
||||
- talosctl config get-contexts | grep $CLUSTER
|
||||
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
|
||||
version:
|
||||
desc: Show Talos version
|
||||
cmd: talosctl version
|
||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
CLUSTER=theshire
|
|
@ -2,11 +2,10 @@
|
|||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||
version: '3'
|
||||
|
||||
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below.
|
||||
# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
|
||||
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
||||
# 2. ReplicationSource and ReplicationDestination are a Restic repository
|
||||
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet
|
||||
# 4. Each application only has one PVC that is being replicated
|
||||
# 3. Each application only has one PVC that is being replicated
|
||||
|
||||
vars:
|
||||
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
|
||||
|
@ -14,39 +13,34 @@ vars:
|
|||
tasks:
|
||||
|
||||
state-*:
|
||||
desc: Suspend or Resume Volsync
|
||||
summary: |-
|
||||
CLUSTER: Cluster to run command against (default: main)
|
||||
STATE: resume or suspend (required)
|
||||
desc: Suspend or resume Volsync [CLUSTER=main]
|
||||
cmds:
|
||||
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done
|
||||
- flux {{.STATE}} kustomization volsync
|
||||
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync
|
||||
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
||||
- flux --namespace flux-system {{.STATE}} kustomization volsync
|
||||
- flux --namespace volsync-system {{.STATE}} helmrelease volsync
|
||||
- kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
||||
vars:
|
||||
NS: '{{.NS | default "volsync-system"}}'
|
||||
STATE: '{{index .MATCH 0}}'
|
||||
requires:
|
||||
vars: [CLUSTER]
|
||||
preconditions:
|
||||
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
|
||||
- which flux kubectl
|
||||
|
||||
unlock:
|
||||
desc: Unlock all Restic repositories
|
||||
summary: |-
|
||||
CLUSTER: Cluster to run command against (default: main)
|
||||
cmd: >
|
||||
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}'
|
||||
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"'
|
||||
desc: Unlock all restic source repos [CLUSTER=main]
|
||||
cmds:
|
||||
- for: { var: SOURCES, split: "\n" }
|
||||
cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
|
||||
vars:
|
||||
SOURCES:
|
||||
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
|
||||
requires:
|
||||
vars: [CLUSTER]
|
||||
preconditions:
|
||||
- which kubectl
|
||||
|
||||
# To run backup jobs in parallel for all replicationsources:
|
||||
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
|
||||
snapshot:
|
||||
desc: Snapshot an application
|
||||
summary: |-
|
||||
CLUSTER: Cluster to run command against (default: main)
|
||||
NS: Namespace the application is in (default: default)
|
||||
APP: Application to snapshot (required)
|
||||
desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
|
||||
cmds:
|
||||
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
|
||||
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||
|
@ -58,47 +52,34 @@ tasks:
|
|||
vars: [CLUSTER, APP]
|
||||
preconditions:
|
||||
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
|
||||
- which kubectl
|
||||
|
||||
# To run restore jobs in parallel for all replicationdestinations:
|
||||
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
|
||||
restore:
|
||||
desc: Restore an application
|
||||
summary: |-
|
||||
CLUSTER: Cluster to run command against (default: main)
|
||||
NS: Namespace the application is in (default: default)
|
||||
APP: Application to restore (required)
|
||||
PREVIOUS: Previous number of snapshots to restore (default: 2)
|
||||
cmds:
|
||||
- task: .suspend
|
||||
- task: .restore
|
||||
- task: .resume
|
||||
requires:
|
||||
vars: [CLUSTER, APP]
|
||||
|
||||
.suspend:
|
||||
internal: true
|
||||
desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
|
||||
cmds:
|
||||
# Suspend
|
||||
- flux --namespace flux-system suspend kustomization {{.APP}}
|
||||
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
|
||||
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
|
||||
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||
# Restore
|
||||
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
||||
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
|
||||
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
|
||||
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
|
||||
# Resume
|
||||
- flux --namespace flux-system resume kustomization {{.APP}}
|
||||
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
||||
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
|
||||
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||
vars:
|
||||
NS: '{{.NS | default "default"}}'
|
||||
APP: '{{.APP}}'
|
||||
CONTROLLER:
|
||||
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||
|
||||
.restore:
|
||||
internal: true
|
||||
cmds:
|
||||
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
||||
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
|
||||
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
|
||||
vars:
|
||||
NS: '{{.NS | default "default"}}'
|
||||
JOB: volsync-dst-{{.APP}}
|
||||
PREVIOUS: '{{.PREVIOUS | default 2}}'
|
||||
env:
|
||||
NS: '{{.NS}}'
|
||||
APP: '{{.APP}}'
|
||||
PREVIOUS: '{{.PREVIOUS}}'
|
||||
CLAIM:
|
||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
|
||||
ACCESS_MODES:
|
||||
|
@ -109,28 +90,8 @@ tasks:
|
|||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
||||
PGID:
|
||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
||||
env:
|
||||
NS: '{{.NS}}'
|
||||
JOB: '{{.JOB}}'
|
||||
APP: '{{.APP}}'
|
||||
PREVIOUS: '{{.PREVIOUS}}'
|
||||
CLAIM: '{{.CLAIM}}'
|
||||
ACCESS_MODES: '{{.ACCESS_MODES}}'
|
||||
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
|
||||
PUID: '{{.PUID}}'
|
||||
PGID: '{{.PGID}}'
|
||||
requires:
|
||||
vars: [CLUSTER, APP, PREVIOUS]
|
||||
preconditions:
|
||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
|
||||
|
||||
.resume:
|
||||
internal: true
|
||||
cmds:
|
||||
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
||||
- flux --namespace flux-system resume kustomization {{.APP}}
|
||||
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
|
||||
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||
vars:
|
||||
NS: '{{.NS | default "default"}}'
|
||||
APP: '{{.APP}}'
|
||||
CONTROLLER:
|
||||
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||
- which flux kubectl minijinja-cli
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
apiVersion: volsync.backube/v1alpha1
|
||||
kind: ReplicationDestination
|
||||
metadata:
|
||||
name: {{ ENV.JOB }}
|
||||
name: {{ ENV.APP }}-manual
|
||||
namespace: {{ ENV.NS }}
|
||||
spec:
|
||||
trigger:
|
||||
|
|
|
@ -5,6 +5,10 @@ vars:
|
|||
KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes"
|
||||
CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env"
|
||||
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
|
||||
K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}'
|
||||
K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
|
||||
CLUSTER: '{{.CLUSTER | default "theshire"}}'
|
||||
CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
|
||||
|
||||
env:
|
||||
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"
|
||||
|
|
|
@ -35,7 +35,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: docker.io/ollama/ollama
|
||||
tag: 0.3.14
|
||||
tag: 0.4.1
|
||||
env:
|
||||
- name: OLLAMA_HOST
|
||||
value: 0.0.0.0
|
||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
|||
chart:
|
||||
spec:
|
||||
chart: coder
|
||||
version: 2.17.0
|
||||
version: 2.17.2
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: coder
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/autobrr/autobrr
|
||||
tag: v1.48.0@sha256:0ae19e3beedf491396e450b024c23e9e24df4d692286c0442a81fa699493def0
|
||||
tag: v1.49.0@sha256:dc2195ccabf8438a8f8eb0581c5e6d2a40c061754e57552bc4f67f1b20a71970
|
||||
env:
|
||||
AUTOBRR__CHECK_FOR_UPDATES: "false"
|
||||
AUTOBRR__HOST: 0.0.0.0
|
||||
|
|
|
@ -36,7 +36,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/onedr0p/home-assistant
|
||||
tag: 2024.10.4@sha256:d788b59a4ee584f7cbeee7cff896e922faa8f0673c83187045e77e0fc77c8457
|
||||
tag: 2024.11.1@sha256:a3dd7577c28771702b21f817ad86600056467c2c7f45d261a1e7241910ddc2e2
|
||||
env:
|
||||
TZ: America/Chicago
|
||||
envFrom:
|
||||
|
@ -54,7 +54,7 @@ spec:
|
|||
code-server:
|
||||
image:
|
||||
repository: ghcr.io/coder/code-server
|
||||
tag: 4.93.1@sha256:c69e398d1b64589b3b77a7becfd03f4ec524982def20e6bffbb51b1b839e72ba
|
||||
tag: 4.95.1@sha256:d9bc7797d997e1b199e333676732e075bac4bae276dc0fe1baece2e313edfa09
|
||||
args: [
|
||||
"--auth", "none",
|
||||
"--user-data-dir", "/config/.vscode",
|
||||
|
|
|
@ -16,7 +16,6 @@ resources:
|
|||
- ./morphos/ks.yaml
|
||||
- ./omegabrr/ks.yaml
|
||||
- ./overseerr/ks.yaml
|
||||
- ./piped/ks.yaml
|
||||
- ./plex/ks.yaml
|
||||
- ./prowlarr/ks.yaml
|
||||
- ./radarr/ks.yaml
|
||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/jorenn92/maintainerr
|
||||
tag: 2.2.0@sha256:fbb2c0341b8af502e4488f3664e34992f24947708c7dac10dcbee592f99a946c
|
||||
tag: 2.2.1@sha256:13121a8292ef6db7560a931bf19b601cf3cc12df0a9dea9086b757798eea5b6d
|
||||
env:
|
||||
TZ: America/Chicago
|
||||
resources:
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/autobrr/omegabrr
|
||||
tag: v1.14.0@sha256:6f65c7967609746662815933ecc8168c8c25a3b82d909f49833fcce2b47ee052
|
||||
tag: v1.15.0@sha256:4f6099a76ff9d248e9f032e29c04a92b483f21456e46f3b01eb20399f4732ad0
|
||||
env:
|
||||
TZ: America/Chicago
|
||||
securityContext:
|
||||
|
|
|
@ -33,7 +33,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/taxel/plextraktsync
|
||||
tag: 0.32.0
|
||||
tag: 0.32.1
|
||||
args:
|
||||
- sync
|
||||
env:
|
||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: ghcr.io/koush/scrypted
|
||||
tag: v0.123.0-jammy-nvidia
|
||||
tag: v0.123.1-jammy-nvidia
|
||||
probes:
|
||||
liveness:
|
||||
enabled: true
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: generic-device-plugin
|
||||
spec:
|
||||
interval: 30m
|
||||
chart:
|
||||
spec:
|
||||
chart: app-template
|
||||
version: 3.5.1
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: bjw-s
|
||||
namespace: flux-system
|
||||
driftDetection:
|
||||
mode: enabled
|
||||
install:
|
||||
remediation:
|
||||
retries: 3
|
||||
upgrade:
|
||||
cleanupOnFail: true
|
||||
remediation:
|
||||
strategy: rollback
|
||||
retries: 3
|
||||
values:
|
||||
defaultPodOptions:
|
||||
priorityClassName: system-node-critical
|
||||
controllers:
|
||||
generic-device-plugin:
|
||||
type: daemonset
|
||||
strategy: RollingUpdate
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
containers:
|
||||
generic-device-plugin:
|
||||
image:
|
||||
repository: ghcr.io/squat/generic-device-plugin
|
||||
tag: latest@sha256:ba6f0b4cf6c858d6ad29ba4d32e4da11638abbc7d96436bf04f582a97b2b8821
|
||||
args:
|
||||
- --config=/config/config.yml
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities: { drop: ["ALL"] }
|
||||
persistence:
|
||||
config:
|
||||
type: configMap
|
||||
name: generic-device-plugin-configmap
|
||||
globalMounts:
|
||||
- path: /config/config.yml
|
||||
subPath: config.yml
|
||||
readOnly: true
|
||||
dev:
|
||||
type: hostPath
|
||||
hostPath: /dev
|
||||
globalMounts:
|
||||
- path: /dev
|
||||
device-plugin:
|
||||
type: hostPath
|
||||
hostPath: /var/lib/kubelet/device-plugins
|
||||
globalMounts:
|
||||
- path: /var/lib/kubelet/device-plugins
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ./helmrelease.yaml
|
||||
configMapGenerator:
|
||||
- name: generic-device-plugin-configmap
|
||||
files:
|
||||
- ./resources/config.yml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
log-level: info
|
||||
domain: kernel.org
|
||||
devices:
|
||||
- name: tun
|
||||
groups:
|
||||
- count: 1000
|
||||
paths:
|
||||
- path: /dev/net/tun
|
20
kubernetes/apps/kube-system/generic-device-plugin/ks.yaml
Normal file
20
kubernetes/apps/kube-system/generic-device-plugin/ks.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: &app generic-device-plugin
|
||||
namespace: flux-system
|
||||
spec:
|
||||
targetNamespace: kube-system
|
||||
commonMetadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: *app
|
||||
path: "./kubernetes/apps/kube-system/generic-device-plugin/app"
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: theshire
|
||||
wait: true
|
||||
interval: 30m
|
||||
timeout: 5m
|
|
@ -12,6 +12,7 @@ resources:
|
|||
- ./descheduler/ks.yaml
|
||||
- ./dnsimple-webhook-rbac.yaml
|
||||
- ./fstrim/ks.yaml
|
||||
- ./generic-device-plugin/ks.yaml
|
||||
- ./kubelet-csr-approver/ks.yaml
|
||||
- ./metrics-server/ks.yaml
|
||||
- ./node-feature-discovery/ks.yaml
|
||||
|
|
|
@ -36,7 +36,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: docker.io/cloudflare/cloudflared
|
||||
tag: 2024.10.1@sha256:52b9529db08f7ef827a2bce04b91945b475c651e46f583c30b70dd6773262ae3
|
||||
tag: 2024.11.0@sha256:2c78df02e1f23ab19d4c636921f05b9ebec163b887e946f98e22e56254a5540f
|
||||
env:
|
||||
NO_AUTOUPDATE: "true"
|
||||
TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json
|
||||
|
|
|
@ -247,7 +247,7 @@ spec:
|
|||
victoria-agent:
|
||||
# renovate: depName="VictoriaMetrics - vmagent"
|
||||
gnetId: 12683
|
||||
revision: 20
|
||||
revision: 21
|
||||
datasource: Prometheus
|
||||
victoria-alert:
|
||||
# renovate: depName="VictoriaMetrics - vmalert"
|
||||
|
@ -262,7 +262,7 @@ spec:
|
|||
victoria-single:
|
||||
# renovate: depName="VictoriaMetrics - single-node"
|
||||
gnetId: 10229
|
||||
revision: 36
|
||||
revision: 37
|
||||
datasource: Prometheus
|
||||
postgres:
|
||||
crunchy-pgbackrest:
|
||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
|||
chart:
|
||||
spec:
|
||||
chart: prometheus-operator-crds
|
||||
version: 15.0.0
|
||||
version: 16.0.0
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: prometheus-community
|
||||
|
|
|
@ -43,7 +43,7 @@ spec:
|
|||
app:
|
||||
image:
|
||||
repository: jesec/flood
|
||||
tag: master@sha256:8d04ec24abcc879f14e744e809520f7a7ec3c66395e1f6efa4179c9399803fbe
|
||||
tag: master@sha256:8a18a3509a6c1557b769873a1ef85dcd5fa4cbce1a939be2c6c87f97eb79de45
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: flood-secret
|
||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
|||
chart:
|
||||
spec:
|
||||
chart: rook-ceph
|
||||
version: v1.15.4
|
||||
version: v1.15.5
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: rook-ceph
|
||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
|||
chart:
|
||||
spec:
|
||||
chart: rook-ceph-cluster
|
||||
version: v1.15.4
|
||||
version: v1.15.5
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: rook-ceph
|
||||
|
|
|
@ -19,7 +19,7 @@ releases:
|
|||
- name: prometheus-operator-crds
|
||||
namespace: observability
|
||||
chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds
|
||||
version: 15.0.0
|
||||
version: 16.0.0
|
||||
- name: cilium
|
||||
namespace: kube-system
|
||||
chart: cilium/cilium
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
---
|
||||
# yaml-language-server: $schema=https://ks.hsn.dev/talconfig.json
|
||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json
|
||||
clusterName: theshire
|
||||
|
||||
# renovate: datasource=github-releases depName=siderolabs/talos
|
||||
talosVersion: v1.8.1
|
||||
kubernetesVersion: 1.30.2
|
||||
# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet
|
||||
kubernetesVersion: 1.31.2
|
||||
endpoint: "https://10.1.1.57:6444"
|
||||
|
||||
additionalApiServerCertSans:
|
||||
|
@ -169,8 +171,8 @@ worker:
|
|||
fs.inotify.max_queued_events: "65536"
|
||||
fs.inotify.max_user_instances: "8192"
|
||||
fs.inotify.max_user_watches: "524288"
|
||||
net.core.rmem_max: "2500000"
|
||||
net.core.wmem_max: "2500000"
|
||||
net.core.rmem_max: "7500000"
|
||||
net.core.wmem_max: "7500000"
|
||||
- &nfsMountOptions |-
|
||||
machine:
|
||||
files:
|
||||
|
|
|
@ -19,5 +19,6 @@ pkgs.mkShell {
|
|||
age
|
||||
mqttui
|
||||
kustomize
|
||||
yq-go
|
||||
];
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue