upgrade volsync taskfile

This commit is contained in:
Joseph Hanson 2024-11-06 10:46:07 -06:00
parent 3668207a96
commit af097c7dd3
Signed by: jahanson
SSH key fingerprint: SHA256:vy6dKBECV522aPAwklFM3ReKAVB086rT3oWwiuiFG7o
3 changed files with 42 additions and 79 deletions

View file

@ -2,11 +2,10 @@
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: '3'
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below.
# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
# 2. ReplicationSource and ReplicationDestination are a Restic repository
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet
# 4. Each application only has one PVC that is being replicated
# 3. Each application only has one PVC that is being replicated
vars:
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
@ -14,39 +13,34 @@ vars:
tasks:
state-*:
desc: Suspend or Resume Volsync
summary: |-
CLUSTER: Cluster to run command against (default: main)
STATE: resume or suspend (required)
desc: Suspend or resume Volsync [CLUSTER=main]
cmds:
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done
- flux {{.STATE}} kustomization volsync
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
- flux --namespace flux-system {{.STATE}} kustomization volsync
- flux --namespace volsync-system {{.STATE}} helmrelease volsync
- kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
vars:
NS: '{{.NS | default "volsync-system"}}'
STATE: '{{index .MATCH 0}}'
requires:
vars: [CLUSTER]
preconditions:
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
- which flux kubectl
unlock:
desc: Unlock all Restic repositories
summary: |-
CLUSTER: Cluster to run command against (default: main)
cmd: >
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}'
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"'
desc: Unlock all restic source repos [CLUSTER=main]
cmds:
- for: { var: SOURCES, split: "\n" }
cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
vars:
SOURCES:
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
requires:
vars: [CLUSTER]
preconditions:
- which kubectl
# To run backup jobs in parallel for all replicationsources:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
snapshot:
desc: Snapshot an application
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to snapshot (required)
desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
cmds:
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
@ -58,47 +52,34 @@ tasks:
vars: [CLUSTER, APP]
preconditions:
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
- which kubectl
# To run restore jobs in parallel for all replicationdestinations:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
restore:
desc: Restore an application
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to restore (required)
PREVIOUS: Previous number of snapshots to restore (default: 2)
cmds:
- task: .suspend
- task: .restore
- task: .resume
requires:
vars: [CLUSTER, APP]
.suspend:
internal: true
desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
cmds:
# Suspend
- flux --namespace flux-system suspend kustomization {{.APP}}
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
# Restore
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
# Resume
- flux --namespace flux-system resume kustomization {{.APP}}
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars:
NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
.restore:
internal: true
cmds:
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
vars:
NS: '{{.NS | default "default"}}'
JOB: volsync-dst-{{.APP}}
PREVIOUS: '{{.PREVIOUS | default 2}}'
env:
NS: '{{.NS}}'
APP: '{{.APP}}'
PREVIOUS: '{{.PREVIOUS}}'
CLAIM:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
ACCESS_MODES:
@ -109,28 +90,8 @@ tasks:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
PGID:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
env:
NS: '{{.NS}}'
JOB: '{{.JOB}}'
APP: '{{.APP}}'
PREVIOUS: '{{.PREVIOUS}}'
CLAIM: '{{.CLAIM}}'
ACCESS_MODES: '{{.ACCESS_MODES}}'
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
PUID: '{{.PUID}}'
PGID: '{{.PGID}}'
requires:
vars: [CLUSTER, APP, PREVIOUS]
preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
.resume:
internal: true
cmds:
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace flux-system resume kustomization {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars:
NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
- which flux kubectl minijinja-cli

View file

@ -2,7 +2,7 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination
metadata:
name: {{ ENV.JOB }}
name: {{ ENV.APP }}-manual
namespace: {{ ENV.NS }}
spec:
trigger:

View file

@ -7,6 +7,8 @@ vars:
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}'
K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
CLUSTER: '{{.CLUSTER | default "theshire"}}'
CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
env:
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"