Compare commits
3 commits
8d0fc88a7d
...
bc53134cb5
Author | SHA1 | Date | |
---|---|---|---|
bc53134cb5 | |||
c3443e87be | |||
a070f8381b |
10 changed files with 119 additions and 319 deletions
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||||
version: "3"
|
version: '3'
|
||||||
|
|
||||||
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below.
|
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below.
|
||||||
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
||||||
|
@ -8,215 +8,129 @@ version: "3"
|
||||||
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet
|
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet
|
||||||
# 4. Each application only has one PVC that is being replicated
|
# 4. Each application only has one PVC that is being replicated
|
||||||
|
|
||||||
x-env-vars: &env-vars
|
|
||||||
app: "{{.app}}"
|
|
||||||
claim: "{{.claim}}"
|
|
||||||
controller: "{{.controller}}"
|
|
||||||
job: "{{.job}}"
|
|
||||||
ns: "{{.ns}}"
|
|
||||||
pgid: "{{.pgid}}"
|
|
||||||
previous: "{{.previous}}"
|
|
||||||
puid: "{{.puid}}"
|
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
VOLSYNC_RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/volsync/resources"
|
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
state-*:
|
state-*:
|
||||||
desc: Suspend or Resume Volsync
|
desc: Suspend or Resume Volsync
|
||||||
summary: |
|
summary: |-
|
||||||
state: resume or suspend (required)
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
dotenv: ['{{.VOLSYNC_RESOURCES_DIR}}/.env']
|
STATE: resume or suspend (required)
|
||||||
cmds:
|
cmds:
|
||||||
- flux --context $CLUSTER {{.state}} kustomization volsync
|
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done
|
||||||
- flux --context $CLUSTER -n {{.ns}} {{.state}} helmrelease volsync
|
- flux {{.STATE}} kustomization volsync
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} scale deployment volsync --replicas {{if eq "suspend" .state}}0{{else}}1{{end}}
|
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync
|
||||||
env: *env-vars
|
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
||||||
vars:
|
vars:
|
||||||
ns: '{{.ns | default "volsync-system"}}'
|
NS: '{{.NS | default "volsync-system"}}'
|
||||||
state: '{{index .MATCH 0}}'
|
STATE: '{{index .MATCH 0}}'
|
||||||
|
|
||||||
list:
|
|
||||||
desc: List snapshots for an application
|
|
||||||
summary: |
|
|
||||||
ns: Namespace the PVC is in (default: default)
|
|
||||||
app: Application to list snapshots for (required)
|
|
||||||
dotenv: ['{{.VOLSYNC_RESOURCES_DIR}}/.env']
|
|
||||||
cmds:
|
|
||||||
- /etc/profiles/per-user/jahanson/bin/envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml) | kubectl --context $CLUSTER apply -f -
|
|
||||||
- bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} $CLUSTER
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} logs job/{{.job}} --container main
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} delete job {{.job}}
|
|
||||||
env: *env-vars
|
|
||||||
requires:
|
requires:
|
||||||
vars: ["app"]
|
vars: [CLUSTER]
|
||||||
vars:
|
|
||||||
ns: '{{.ns | default "default"}}'
|
|
||||||
job: volsync-list-{{.app}}
|
|
||||||
preconditions:
|
|
||||||
- test -f /etc/profiles/per-user/jahanson/bin/envsubst
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml
|
|
||||||
silent: true
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
desc: Unlock a Restic repository for an application
|
desc: Unlock all Restic repositories
|
||||||
summary: |
|
summary: |-
|
||||||
ns: Namespace the PVC is in (default: default)
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
app: Application to unlock (required)
|
cmd: >
|
||||||
dotenv: ['{{.VOLSYNC_RESOURCES_DIR}}/.env']
|
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}'
|
||||||
cmds:
|
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"'
|
||||||
- /etc/profiles/per-user/jahanson/bin/envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml) | kubectl --context $CLUSTER apply -f -
|
|
||||||
- bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} $CLUSTER
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} logs job/{{.job}} --container minio
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} logs job/{{.job}} --container r2
|
|
||||||
- kubectl --context $CLUSTER -n {{.ns}} delete job {{.job}}
|
|
||||||
env: *env-vars
|
|
||||||
requires:
|
requires:
|
||||||
vars: ["app"]
|
vars: [CLUSTER]
|
||||||
vars:
|
|
||||||
ns: '{{.ns | default "default"}}'
|
|
||||||
job: volsync-unlock-{{.app}}
|
|
||||||
preconditions:
|
|
||||||
- test -f /etc/profiles/per-user/jahanson/bin/envsubst
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml
|
|
||||||
silent: true
|
|
||||||
|
|
||||||
# To run backup jobs in parallel for all replicationsources:
|
# To run backup jobs in parallel for all replicationsources:
|
||||||
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1'
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
|
||||||
snapshot:
|
snapshot:
|
||||||
desc: Snapshot a PVC for an application
|
desc: Snapshot an application
|
||||||
summary: |
|
summary: |-
|
||||||
cluster: Cluster to run command against (required)
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
ns: Namespace the PVC is in (default: default)
|
NS: Namespace the application is in (default: default)
|
||||||
app: Application to snapshot (required)
|
APP: Application to snapshot (required)
|
||||||
cmds:
|
cmds:
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}'
|
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
|
||||||
- bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}}
|
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m
|
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
|
||||||
env: *env-vars
|
|
||||||
requires:
|
|
||||||
vars: ["cluster", "app"]
|
|
||||||
vars:
|
vars:
|
||||||
now: '{{now | date "150405"}}'
|
NS: '{{.NS | default "default"}}'
|
||||||
ns: '{{.ns | default "default"}}'
|
JOB: volsync-src-{{.APP}}
|
||||||
job: volsync-src-{{.app}}
|
requires:
|
||||||
controller:
|
vars: [CLUSTER, APP]
|
||||||
sh: true && {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}} {{.cluster}}
|
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh
|
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh
|
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} get replicationsources {{.app}}
|
|
||||||
|
|
||||||
# To run restore jobs in parallel for all replicationdestinations:
|
# To run restore jobs in parallel for all replicationdestinations:
|
||||||
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1'
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
|
||||||
restore:
|
restore:
|
||||||
desc: Restore a PVC for an application
|
desc: Restore an application
|
||||||
summary: |
|
summary: |-
|
||||||
cluster: Cluster to run command against (required)
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
ns: Namespace the PVC is in (default: default)
|
NS: Namespace the application is in (default: default)
|
||||||
app: Application to restore (required)
|
APP: Application to restore (required)
|
||||||
previous: Previous number of snapshots to restore (default: 2)
|
PREVIOUS: Previous number of snapshots to restore (default: 2)
|
||||||
cmds:
|
cmds:
|
||||||
- { task: .suspend, vars: *env-vars }
|
- task: .suspend
|
||||||
- { task: .wipe, vars: *env-vars }
|
- task: .restore
|
||||||
- { task: .restore, vars: *env-vars }
|
- task: .resume
|
||||||
- { task: .resume, vars: *env-vars }
|
|
||||||
env: *env-vars
|
|
||||||
requires:
|
requires:
|
||||||
vars: ["cluster", "app"]
|
vars: [CLUSTER, APP]
|
||||||
vars:
|
|
||||||
ns: '{{.ns | default "default"}}'
|
|
||||||
previous: '{{.previous | default 2}}'
|
|
||||||
controller:
|
|
||||||
sh: "{{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}}"
|
|
||||||
claim:
|
|
||||||
sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}"
|
|
||||||
puid:
|
|
||||||
sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
|
||||||
pgid:
|
|
||||||
sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
|
||||||
preconditions:
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh
|
|
||||||
|
|
||||||
cleanup:
|
|
||||||
desc: Delete volume populator PVCs in all namespaces
|
|
||||||
summary: |
|
|
||||||
cluster: Cluster to run command against (required)
|
|
||||||
cmds:
|
|
||||||
- for: { var: dest }
|
|
||||||
cmd: |
|
|
||||||
{{- $items := (split "/" .ITEM) }}
|
|
||||||
kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }}
|
|
||||||
- for: { var: cache }
|
|
||||||
cmd: |
|
|
||||||
{{- $items := (split "/" .ITEM) }}
|
|
||||||
kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }}
|
|
||||||
- for: { var: snaps }
|
|
||||||
cmd: |
|
|
||||||
{{- $items := (split "/" .ITEM) }}
|
|
||||||
kubectl --context {{.cluster}} delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }}
|
|
||||||
env: *env-vars
|
|
||||||
requires:
|
|
||||||
vars: ["cluster"]
|
|
||||||
vars:
|
|
||||||
dest:
|
|
||||||
sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}'
|
|
||||||
cache:
|
|
||||||
sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-cache" | awk '{print $1 "/" $2}'
|
|
||||||
snaps:
|
|
||||||
sh: kubectl --context {{.cluster}} get volumesnapshot --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}'
|
|
||||||
|
|
||||||
# Suspend the Flux ks and hr
|
|
||||||
.suspend:
|
.suspend:
|
||||||
internal: true
|
internal: true
|
||||||
cmds:
|
cmds:
|
||||||
- flux --context {{.cluster}} -n flux-system suspend kustomization {{.app}}
|
- flux --namespace flux-system suspend kustomization {{.APP}}
|
||||||
- flux --context {{.cluster}} -n {{.ns}} suspend helmrelease {{.app}}
|
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} scale {{.controller}} --replicas 0
|
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m
|
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||||
env: *env-vars
|
|
||||||
|
|
||||||
# Wipe the PVC of all data
|
|
||||||
.wipe:
|
|
||||||
internal: true
|
|
||||||
cmds:
|
|
||||||
- /etc/profiles/per-user/jahanson/bin/envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml) | kubectl --context {{.cluster}} apply -f -
|
|
||||||
- bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}}
|
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m
|
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container main
|
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}}
|
|
||||||
env: *env-vars
|
|
||||||
vars:
|
vars:
|
||||||
job: volsync-wipe-{{.app}}
|
NS: '{{.NS | default "default"}}'
|
||||||
preconditions:
|
APP: '{{.APP}}'
|
||||||
- test -f /etc/profiles/per-user/jahanson/bin/envsubst
|
CONTROLLER:
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml
|
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh
|
|
||||||
|
|
||||||
# Create VolSync replicationdestination CR to restore data
|
|
||||||
.restore:
|
.restore:
|
||||||
internal: true
|
internal: true
|
||||||
cmds:
|
cmds:
|
||||||
- /etc/profiles/per-user/jahanson/bin/envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml) | kubectl --context {{.cluster}} apply -f -
|
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
||||||
- bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}}
|
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m
|
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
|
||||||
- kubectl --context {{.cluster}} -n {{.ns}} delete replicationdestination {{.job}}
|
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
|
||||||
env: *env-vars
|
|
||||||
vars:
|
vars:
|
||||||
job: volsync-dst-{{.app}}
|
NS: '{{.NS | default "default"}}'
|
||||||
|
JOB: volsync-dst-{{.APP}}
|
||||||
|
PREVIOUS: '{{.PREVIOUS | default 2}}'
|
||||||
|
CLAIM:
|
||||||
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
|
||||||
|
ACCESS_MODES:
|
||||||
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.accessModes}"
|
||||||
|
STORAGE_CLASS_NAME:
|
||||||
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.storageClassName}"
|
||||||
|
PUID:
|
||||||
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
||||||
|
PGID:
|
||||||
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
||||||
|
env:
|
||||||
|
NS: '{{.NS}}'
|
||||||
|
JOB: '{{.JOB}}'
|
||||||
|
APP: '{{.APP}}'
|
||||||
|
PREVIOUS: '{{.PREVIOUS}}'
|
||||||
|
CLAIM: '{{.CLAIM}}'
|
||||||
|
ACCESS_MODES: '{{.ACCESS_MODES}}'
|
||||||
|
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
|
||||||
|
PUID: '{{.PUID}}'
|
||||||
|
PGID: '{{.PGID}}'
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f /etc/profiles/per-user/jahanson/bin/envsubst
|
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh
|
|
||||||
|
|
||||||
# Resume Flux ks and hr
|
|
||||||
.resume:
|
.resume:
|
||||||
internal: true
|
internal: true
|
||||||
cmds:
|
cmds:
|
||||||
- flux --context {{.cluster}} -n {{.ns}} resume helmrelease {{.app}}
|
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
||||||
- flux --context {{.cluster}} -n flux-system resume kustomization {{.app}}
|
- flux --namespace flux-system resume kustomization {{.APP}}
|
||||||
env: *env-vars
|
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
|
||||||
|
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||||
|
vars:
|
||||||
|
NS: '{{.NS | default "default"}}'
|
||||||
|
APP: '{{.APP}}'
|
||||||
|
CONTROLLER:
|
||||||
|
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
CLUSTER=theshire
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: ${job}
|
|
||||||
namespace: ${ns}
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 3600
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
containers:
|
|
||||||
- name: main
|
|
||||||
image: docker.io/restic/restic:latest
|
|
||||||
args: ["snapshots"]
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: ${app}-volsync-r2-secret
|
|
||||||
resources: {}
|
|
|
@ -1,31 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: volsync.backube/v1alpha1
|
|
||||||
kind: ReplicationDestination
|
|
||||||
metadata:
|
|
||||||
name: ${job}
|
|
||||||
namespace: ${ns}
|
|
||||||
spec:
|
|
||||||
trigger:
|
|
||||||
manual: restore-once
|
|
||||||
restic:
|
|
||||||
repository: ${app}-volsync-r2-secret
|
|
||||||
destinationPVC: ${claim}
|
|
||||||
copyMethod: Direct
|
|
||||||
storageClassName: ceph-block
|
|
||||||
# storageClassName: ceph-filesystem
|
|
||||||
# accessModes: ["ReadWriteMany"]
|
|
||||||
# IMPORTANT NOTE:
|
|
||||||
# Set to the last X number of snapshots to restore from
|
|
||||||
previous: ${previous}
|
|
||||||
# OR;
|
|
||||||
# IMPORTANT NOTE:
|
|
||||||
# On bootstrap set `restoreAsOf` to the time the old cluster was destroyed.
|
|
||||||
# This will essentially prevent volsync from trying to restore a backup
|
|
||||||
# from a application that started with default data in the PVC.
|
|
||||||
# Do not restore snapshots made after the following RFC3339 Timestamp.
|
|
||||||
# date --rfc-3339=seconds (--utc)
|
|
||||||
# restoreAsOf: "2022-12-10T16:00:00-05:00"
|
|
||||||
moverSecurityContext:
|
|
||||||
runAsUser: ${puid}
|
|
||||||
runAsGroup: ${pgid}
|
|
||||||
fsGroup: ${pgid}
|
|
23
.taskfiles/volsync/resources/replicationdestination.yaml.j2
Normal file
23
.taskfiles/volsync/resources/replicationdestination.yaml.j2
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
apiVersion: volsync.backube/v1alpha1
|
||||||
|
kind: ReplicationDestination
|
||||||
|
metadata:
|
||||||
|
name: {{ ENV.JOB }}
|
||||||
|
namespace: {{ ENV.NS }}
|
||||||
|
spec:
|
||||||
|
trigger:
|
||||||
|
manual: restore-once
|
||||||
|
restic:
|
||||||
|
repository: {{ ENV.APP }}-volsync-secret
|
||||||
|
destinationPVC: {{ ENV.CLAIM }}
|
||||||
|
copyMethod: Direct
|
||||||
|
storageClassName: {{ ENV.STORAGE_CLASS_NAME }}
|
||||||
|
accessModes: {{ ENV.ACCESS_MODES }}
|
||||||
|
previous: {{ ENV.PREVIOUS }}
|
||||||
|
enableFileDeletion: true
|
||||||
|
cleanupCachePVC: true
|
||||||
|
cleanupTempPVC: true
|
||||||
|
moverSecurityContext:
|
||||||
|
runAsUser: {{ ENV.PUID }}
|
||||||
|
runAsGroup: {{ ENV.PGID }}
|
||||||
|
fsGroup: {{ ENV.PGID }}
|
|
@ -1,27 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: ${job}
|
|
||||||
namespace: ${ns}
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 3600
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
containers:
|
|
||||||
- name: minio
|
|
||||||
image: docker.io/restic/restic:latest
|
|
||||||
args: ["unlock", "--remove-all"]
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: ${app}-volsync-secret
|
|
||||||
resources: {}
|
|
||||||
- name: r2
|
|
||||||
image: docker.io/restic/restic:latest
|
|
||||||
args: ["unlock", "--remove-all"]
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: ${app}-volsync-r2-secret
|
|
||||||
resources: {}
|
|
|
@ -1,14 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
JOB=$1
|
|
||||||
NAMESPACE="${2:-default}"
|
|
||||||
CLUSTER="${3:-main}"
|
|
||||||
|
|
||||||
[[ -z "${JOB}" ]] && echo "Job name not specified" && exit 1
|
|
||||||
while true; do
|
|
||||||
STATUS="$(kubectl --context "${CLUSTER}" -n "${NAMESPACE}" get pod -l job-name="${JOB}" -o jsonpath='{.items[*].status.phase}')"
|
|
||||||
if [ "${STATUS}" == "Pending" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
|
@ -1,22 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
APP=$1
|
|
||||||
NAMESPACE="${2:-default}"
|
|
||||||
CLUSTER="${3:-theshire}"
|
|
||||||
|
|
||||||
is_deployment() {
|
|
||||||
kubectl --context "${CLUSTER}" -n "${NAMESPACE}" get deployment "${APP}" >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
is_statefulset() {
|
|
||||||
kubectl --context "${CLUSTER}" -n "${NAMESPACE}" get statefulset "${APP}" >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_deployment; then
|
|
||||||
echo "deployment.apps/${APP}"
|
|
||||||
elif is_statefulset; then
|
|
||||||
echo "statefulset.apps/${APP}"
|
|
||||||
else
|
|
||||||
echo "No deployment or statefulset found for ${APP}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
|
@ -1,26 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: ${job}
|
|
||||||
namespace: ${ns}
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 3600
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
containers:
|
|
||||||
- name: main
|
|
||||||
image: docker.io/library/alpine:latest
|
|
||||||
command: ["/bin/sh", "-c", "cd /config; find . -delete"]
|
|
||||||
volumeMounts:
|
|
||||||
- name: config
|
|
||||||
mountPath: /config
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
resources: {}
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: ${claim}
|
|
|
@ -14,13 +14,17 @@ spec:
|
||||||
rules:
|
rules:
|
||||||
- name: set-volsync-movers-custom-config
|
- name: set-volsync-movers-custom-config
|
||||||
match:
|
match:
|
||||||
any:
|
all:
|
||||||
- resources:
|
- resources:
|
||||||
kinds: ["batch/v1/Job"]
|
kinds: ["batch/v1/Job"]
|
||||||
namespaces: ["default"]
|
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app.kubernetes.io/created-by: volsync
|
app.kubernetes.io/created-by: volsync
|
||||||
|
- resources:
|
||||||
|
kinds: ["batch/v1/Job"]
|
||||||
|
namespaceSelector:
|
||||||
|
matchLabels:
|
||||||
|
volsync.backube/privileged-movers: "true"
|
||||||
mutate:
|
mutate:
|
||||||
patchStrategicMerge:
|
patchStrategicMerge:
|
||||||
spec:
|
spec:
|
||||||
|
|
Loading…
Reference in a new issue