upgrade volsync taskfile

This commit is contained in:
Joseph Hanson 2024-11-06 10:46:07 -06:00
parent 3668207a96
commit af097c7dd3
Signed by: jahanson
SSH key fingerprint: SHA256:vy6dKBECV522aPAwklFM3ReKAVB086rT3oWwiuiFG7o
3 changed files with 42 additions and 79 deletions

View file

@ -2,11 +2,10 @@
# yaml-language-server: $schema=https://taskfile.dev/schema.json # yaml-language-server: $schema=https://taskfile.dev/schema.json
version: '3' version: '3'
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. # Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) # 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
# 2. ReplicationSource and ReplicationDestination are a Restic repository # 2. ReplicationSource and ReplicationDestination are a Restic repository
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet # 3. Each application only has one PVC that is being replicated
# 4. Each application only has one PVC that is being replicated
vars: vars:
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources' VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
@ -14,39 +13,34 @@ vars:
tasks: tasks:
state-*: state-*:
desc: Suspend or Resume Volsync desc: Suspend or resume Volsync [CLUSTER=main]
summary: |-
CLUSTER: Cluster to run command against (default: main)
STATE: resume or suspend (required)
cmds: cmds:
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done - flux --namespace flux-system {{.STATE}} kustomization volsync
- flux {{.STATE}} kustomization volsync - flux --namespace volsync-system {{.STATE}} helmrelease volsync
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync - kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
vars: vars:
NS: '{{.NS | default "volsync-system"}}'
STATE: '{{index .MATCH 0}}' STATE: '{{index .MATCH 0}}'
requires: requires:
vars: [CLUSTER] vars: [CLUSTER]
preconditions:
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
- which flux kubectl
unlock: unlock:
desc: Unlock all Restic repositories desc: Unlock all restic source repos [CLUSTER=main]
summary: |- cmds:
CLUSTER: Cluster to run command against (default: main) - for: { var: SOURCES, split: "\n" }
cmd: > cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}' vars:
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"' SOURCES:
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
requires: requires:
vars: [CLUSTER] vars: [CLUSTER]
preconditions:
- which kubectl
# To run backup jobs in parallel for all replicationsources:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
snapshot: snapshot:
desc: Snapshot an application desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to snapshot (required)
cmds: cmds:
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}' - kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done - until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
@ -58,47 +52,34 @@ tasks:
vars: [CLUSTER, APP] vars: [CLUSTER, APP]
preconditions: preconditions:
- kubectl --namespace {{.NS}} get replicationsources {{.APP}} - kubectl --namespace {{.NS}} get replicationsources {{.APP}}
- which kubectl
# To run restore jobs in parallel for all replicationdestinations:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
restore: restore:
desc: Restore an application desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to restore (required)
PREVIOUS: Previous number of snapshots to restore (default: 2)
cmds:
- task: .suspend
- task: .restore
- task: .resume
requires:
vars: [CLUSTER, APP]
.suspend:
internal: true
cmds: cmds:
# Suspend
- flux --namespace flux-system suspend kustomization {{.APP}} - flux --namespace flux-system suspend kustomization {{.APP}}
- flux --namespace {{.NS}} suspend helmrelease {{.APP}} - flux --namespace {{.NS}} suspend helmrelease {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0 - kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m - kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
# Restore
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
# Resume
- flux --namespace flux-system resume kustomization {{.APP}}
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars: vars:
NS: '{{.NS | default "default"}}' NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER: CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
env:
.restore: NS: '{{.NS}}'
internal: true APP: '{{.APP}}'
cmds: PREVIOUS: '{{.PREVIOUS}}'
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
vars:
NS: '{{.NS | default "default"}}'
JOB: volsync-dst-{{.APP}}
PREVIOUS: '{{.PREVIOUS | default 2}}'
CLAIM: CLAIM:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
ACCESS_MODES: ACCESS_MODES:
@ -109,28 +90,8 @@ tasks:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
PGID: PGID:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
env: requires:
NS: '{{.NS}}' vars: [CLUSTER, APP, PREVIOUS]
JOB: '{{.JOB}}'
APP: '{{.APP}}'
PREVIOUS: '{{.PREVIOUS}}'
CLAIM: '{{.CLAIM}}'
ACCESS_MODES: '{{.ACCESS_MODES}}'
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
PUID: '{{.PUID}}'
PGID: '{{.PGID}}'
preconditions: preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
- which flux kubectl minijinja-cli
.resume:
internal: true
cmds:
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace flux-system resume kustomization {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars:
NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset

View file

@ -2,7 +2,7 @@
apiVersion: volsync.backube/v1alpha1 apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination kind: ReplicationDestination
metadata: metadata:
name: {{ ENV.JOB }} name: {{ ENV.APP }}-manual
namespace: {{ ENV.NS }} namespace: {{ ENV.NS }}
spec: spec:
trigger: trigger:

View file

@ -7,6 +7,8 @@ vars:
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env" CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}' K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}'
K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}' K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
CLUSTER: '{{.CLUSTER | default "theshire"}}'
CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
env: env:
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"