Compare commits
1 commit
c1b665fac2
...
9c7004417e
Author | SHA1 | Date | |
---|---|---|---|
9c7004417e |
34 changed files with 185 additions and 249 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -24,6 +24,3 @@ omniconfig.yaml
|
||||||
*.pem
|
*.pem
|
||||||
*.secrets
|
*.secrets
|
||||||
config.xml
|
config.xml
|
||||||
|
|
||||||
# syncthing
|
|
||||||
**/*sync-conflict*
|
|
||||||
|
|
|
@ -7,11 +7,7 @@
|
||||||
"automerge": true,
|
"automerge": true,
|
||||||
"automergeType": "branch",
|
"automergeType": "branch",
|
||||||
"matchUpdateTypes": ["digest"],
|
"matchUpdateTypes": ["digest"],
|
||||||
"matchPackagePrefixes": [
|
"matchPackagePrefixes": ["ghcr.io/onedr0p", "ghcr.io/bjw-s", "ghcr.io/bjw-s-labs"],
|
||||||
"ghcr.io/onedr0p",
|
|
||||||
"ghcr.io/bjw-s",
|
|
||||||
"ghcr.io/bjw-s-labs"
|
|
||||||
],
|
|
||||||
"ignoreTests": true
|
"ignoreTests": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,122 +2,135 @@
|
||||||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
|
vars:
|
||||||
|
RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/talos/resources"
|
||||||
|
CONTROLLER:
|
||||||
|
sh: talosctl --context {{.cluster}} config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
|
||||||
|
cluster: theshire
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
bootstrap:
|
bootstrap:
|
||||||
desc: Bootstrap Talos
|
desc: Bootstrap Talos
|
||||||
summary: |
|
summary: |
|
||||||
Args:
|
Args:
|
||||||
CONTROLLER: Controller node to run command against (required)
|
cluster: Cluster to run command against (default: theshire)
|
||||||
prompt: Bootstrap Talos on the '{{.K8S_CLUSTER}}' cluster... continue?
|
controller: Controller node to run command against (required) (IP/DNS)
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
|
prompt: Bootstrap Talos on the cluster... continue?
|
||||||
cmds:
|
cmds:
|
||||||
- task: bootstrap-etcd
|
- task: bootstrap-etcd
|
||||||
vars: &vars
|
vars: &vars
|
||||||
CONTROLLER: "{{.CONTROLER}}"
|
controller: "{{.controller}}"
|
||||||
- task: fetch-kubeconfig
|
- task: fetch-kubeconfig
|
||||||
vars: *vars
|
vars: *vars
|
||||||
- task: bootstrap-integrations
|
- task: bootstrap-integrations
|
||||||
vars: *vars
|
vars: *vars
|
||||||
requires:
|
requires:
|
||||||
vars:
|
vars:
|
||||||
- K8S_CLUSTER
|
- controller
|
||||||
- CONTROLLER
|
|
||||||
|
|
||||||
bootstrap-etcd:
|
bootstrap-etcd:
|
||||||
desc: Bootstrap Etcd
|
desc: Bootstrap Etcd
|
||||||
cmd: until talosctl --nodes {{.CONTROLLER}} bootstrap; do sleep 10; done
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
|
cmd: until talosctl --context $CLUSTER --nodes {{.controller}} bootstrap; do sleep 10; done
|
||||||
requires:
|
requires:
|
||||||
vars:
|
vars:
|
||||||
- CONTROLLER
|
- controller
|
||||||
|
|
||||||
bootstrap-integrations:
|
bootstrap-integrations:
|
||||||
desc: Bootstrap core integrations needed for Talos
|
desc: Bootstrap core integrations needed for Talos
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
cmds:
|
cmds:
|
||||||
- until kubectl wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
|
- until kubectl --context $CLUSTER wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
|
||||||
- helmfile --kube-context {{.K8S_CLUSTER}} --file {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
|
- helmfile --kube-context $CLUSTER --file {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
|
||||||
- until kubectl wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
|
- until kubectl --context $CLUSTER wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
preconditions:
|
||||||
- which helmfile
|
- which helmfile
|
||||||
- sh: kubectl config get-contexts {{.K8S_CLUSTER}}
|
- sh: kubectl config get-contexts $CLUSTER
|
||||||
msg: "Kubectl context {{.K8S_CLUSTER}} not found"
|
msg: "Kubectl context $CLUSTER not found"
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml
|
||||||
|
|
||||||
fetch-kubeconfig:
|
fetch-kubeconfig:
|
||||||
desc: Fetch kubeconfig from Talos controllers
|
desc: Fetch kubeconfig from Talos controllers
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
|
env: *vars
|
||||||
cmd: |
|
cmd: |
|
||||||
talosctl kubeconfig --nodes {{.CONTROLLER}} \
|
talosctl --context $CLUSTER kubeconfig --nodes {{ .CONTROLLER }} \
|
||||||
--force --force-context-name {{.K8S_CLUSTER}} {{.K8S_CLUSTER_DIR}}
|
--force --force-context-name $CLUSTER {{.ROOT_DIR}}/kubeconfig
|
||||||
requires:
|
preconditions:
|
||||||
vars:
|
- talosctl config get-contexts | grep $CLUSTER
|
||||||
- K8S_CLUSTER
|
|
||||||
|
|
||||||
generate-clusterconfig:
|
generate-clusterconfig:
|
||||||
desc: Generate clusterconfig for Talos
|
desc: Generate clusterconfig for Talos
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
cmds:
|
cmds:
|
||||||
- talhelper genconfig
|
- talhelper genconfig
|
||||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||||
--secret-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
--secret-file {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
--out-dir {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig
|
--out-dir {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
- which talhelper
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
||||||
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
|
|
||||||
upgrade:
|
upgrade:
|
||||||
desc: Upgrade Talos version for a node
|
desc: Upgrade Talos version for a node
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
vars:
|
vars:
|
||||||
TALOS_VERSION:
|
TALOS_VERSION:
|
||||||
sh: |
|
sh: |
|
||||||
yq -r ".talosVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
yq -r ".talosVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
TALOS_IMAGE:
|
TALOS_IMAGE:
|
||||||
sh: |
|
sh: |
|
||||||
talhelper genurl installer \
|
talhelper genurl installer \
|
||||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml \
|
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml \
|
||||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml \
|
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
| grep {{.NODE}} \
|
|
||||||
| awk '{split($0,u," "); print u[2]}'
|
|
||||||
cmds:
|
cmds:
|
||||||
- talosctl upgrade -n {{.NODE}} --image {{.TALOS_IMAGE }}
|
- talosctl --context $CLUSTER upgrade -n {{.node}} --image {{.TALOS_IMAGE }}
|
||||||
requires:
|
requires:
|
||||||
vars:
|
vars:
|
||||||
- K8S_CLUSTER
|
- node
|
||||||
- NODE
|
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
- which talhelper
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
- talosctl config get-contexts | grep $CLUSTER
|
||||||
- msg: "Talos image could not be determined for node={{.NODE}}"
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||||
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
|
- msg: "Talos image could not be determined for {{.node}}"
|
||||||
sh: 'test -n "{{.TALOS_IMAGE}}"'
|
sh: 'test -n "{{.TALOS_IMAGE}}"'
|
||||||
|
|
||||||
upgrade-k8s:
|
upgrade-k8s:
|
||||||
desc: Upgrade Kubernetes version for a Talos cluster
|
desc: Upgrade Kubernetes version for a Talos cluster
|
||||||
silent: false
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
vars:
|
vars:
|
||||||
KUBERNETES_VERSION:
|
KUBERNETES_VERSION:
|
||||||
sh: |
|
sh: |
|
||||||
yq -r ".kubernetesVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
yq -r ".kubernetesVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
TALOS_CONTROLLER:
|
CONTROLPLANE_NODE:
|
||||||
sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
|
sh: |
|
||||||
|
talosctl --context $CLUSTER config info \
|
||||||
|
| grep Endpoints: \
|
||||||
|
| awk '{split($0,u," "); print u[2]}' \
|
||||||
|
| sed -E 's/,//'
|
||||||
cmds:
|
cmds:
|
||||||
- until kubectl wait --timeout=5m --for=condition=Complete jobs --all --all-namespaces; do sleep 10; done
|
- talosctl upgrade-k8s -n {{.CONTROLPLANE_NODE}} --to {{.KUBERNETES_VERSION}}
|
||||||
- talosctl upgrade-k8s -n {{.TALOS_CONTROLLER}} --to {{.KUBERNETES_VERSION}}
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
preconditions:
|
||||||
- talosctl config info &>/dev/null
|
- which talhelper
|
||||||
- talosctl --nodes {{.TALOS_CONTROLLER}} get machineconfig &>/dev/null
|
- talosctl config get-contexts | grep $CLUSTER
|
||||||
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
|
||||||
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
|
||||||
|
- msg: "Kubernetes version could not be determined for cluster $CLUSTER"
|
||||||
|
sh: 'test -n "{{.KUBERNETES_VERSION}}"'
|
||||||
|
- msg: "Control plane node could not be determined for cluster $CLUSTER"
|
||||||
|
sh: 'test -n "{{.CONTROLPLANE_NODE}}"'
|
||||||
|
|
||||||
apply-clusterconfig:
|
apply-clusterconfig:
|
||||||
desc: Apply clusterconfig for a Talos cluster
|
desc: Apply clusterconfig for a Talos cluster
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
vars:
|
vars:
|
||||||
CLUSTERCONFIG_FILES:
|
CLUSTERCONFIG_FILES:
|
||||||
sh: find {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
|
sh: find {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
|
||||||
cmds:
|
cmds:
|
||||||
- for:
|
- for:
|
||||||
var: CLUSTERCONFIG_FILES
|
var: CLUSTERCONFIG_FILES
|
||||||
|
@ -125,24 +138,29 @@ tasks:
|
||||||
vars:
|
vars:
|
||||||
filename: "{{.ITEM}}"
|
filename: "{{.ITEM}}"
|
||||||
hostname: |-
|
hostname: |-
|
||||||
{{ trimPrefix (printf "%s-" .K8S_CLUSTER) .ITEM | trimSuffix ".yaml" }}
|
{{ trimPrefix (printf "%s-" .cluster) .ITEM | trimSuffix ".yaml" }}
|
||||||
DRY_RUN: "{{ .DRY_RUN }}"
|
dry_run: "{{ .dry_run }}"
|
||||||
requires:
|
preconditions:
|
||||||
vars:
|
- talosctl config get-contexts | grep $CLUSTER
|
||||||
- K8S_CLUSTER
|
- test -d {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig
|
||||||
|
|
||||||
_apply-machineconfig:
|
_apply-machineconfig:
|
||||||
internal: true
|
internal: true
|
||||||
desc: Apply a single Talos machineConfig to a Talos node
|
desc: Apply a single Talos machineConfig to a Talos node
|
||||||
|
dotenv: ["{{.RESOURCES_DIR}}/.env"]
|
||||||
cmds:
|
cmds:
|
||||||
- talosctl apply-config
|
- talosctl --context theshire apply-config
|
||||||
--nodes "{{.hostname}}"
|
--nodes "{{.hostname}}"
|
||||||
--file "{{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
|
--file "{{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
|
||||||
{{ if eq "true" .DRY_RUN }}--dry-run{{ end }}
|
{{ if eq "true" .dry_run }}--dry-run{{ end }}
|
||||||
|
#--insecure
|
||||||
requires:
|
requires:
|
||||||
vars:
|
vars:
|
||||||
- K8S_CLUSTER
|
|
||||||
- hostname
|
- hostname
|
||||||
- filename
|
- filename
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
|
- talosctl config get-contexts | grep $CLUSTER
|
||||||
|
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
|
||||||
|
version:
|
||||||
|
desc: Show Talos version
|
||||||
|
cmd: talosctl version
|
||||||
|
|
1
.taskfiles/talos/resources/.env
Normal file
1
.taskfiles/talos/resources/.env
Normal file
|
@ -0,0 +1 @@
|
||||||
|
CLUSTER=theshire
|
|
@ -2,10 +2,11 @@
|
||||||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||||
version: '3'
|
version: '3'
|
||||||
|
|
||||||
# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
|
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below.
|
||||||
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
||||||
# 2. ReplicationSource and ReplicationDestination are a Restic repository
|
# 2. ReplicationSource and ReplicationDestination are a Restic repository
|
||||||
# 3. Each application only has one PVC that is being replicated
|
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet
|
||||||
|
# 4. Each application only has one PVC that is being replicated
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
|
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
|
||||||
|
@ -13,34 +14,39 @@ vars:
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
state-*:
|
state-*:
|
||||||
desc: Suspend or resume Volsync [CLUSTER=main]
|
desc: Suspend or Resume Volsync
|
||||||
|
summary: |-
|
||||||
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
|
STATE: resume or suspend (required)
|
||||||
cmds:
|
cmds:
|
||||||
- flux --namespace flux-system {{.STATE}} kustomization volsync
|
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done
|
||||||
- flux --namespace volsync-system {{.STATE}} helmrelease volsync
|
- flux {{.STATE}} kustomization volsync
|
||||||
- kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync
|
||||||
|
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
||||||
vars:
|
vars:
|
||||||
|
NS: '{{.NS | default "volsync-system"}}'
|
||||||
STATE: '{{index .MATCH 0}}'
|
STATE: '{{index .MATCH 0}}'
|
||||||
requires:
|
requires:
|
||||||
vars: [CLUSTER]
|
vars: [CLUSTER]
|
||||||
preconditions:
|
|
||||||
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
|
|
||||||
- which flux kubectl
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
desc: Unlock all restic source repos [CLUSTER=main]
|
desc: Unlock all Restic repositories
|
||||||
cmds:
|
summary: |-
|
||||||
- for: { var: SOURCES, split: "\n" }
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
|
cmd: >
|
||||||
vars:
|
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}'
|
||||||
SOURCES:
|
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"'
|
||||||
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
|
|
||||||
requires:
|
requires:
|
||||||
vars: [CLUSTER]
|
vars: [CLUSTER]
|
||||||
preconditions:
|
|
||||||
- which kubectl
|
|
||||||
|
|
||||||
|
# To run backup jobs in parallel for all replicationsources:
|
||||||
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
|
||||||
snapshot:
|
snapshot:
|
||||||
desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
|
desc: Snapshot an application
|
||||||
|
summary: |-
|
||||||
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
|
NS: Namespace the application is in (default: default)
|
||||||
|
APP: Application to snapshot (required)
|
||||||
cmds:
|
cmds:
|
||||||
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
|
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
|
||||||
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||||
|
@ -52,34 +58,47 @@ tasks:
|
||||||
vars: [CLUSTER, APP]
|
vars: [CLUSTER, APP]
|
||||||
preconditions:
|
preconditions:
|
||||||
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
|
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
|
||||||
- which kubectl
|
|
||||||
|
|
||||||
|
# To run restore jobs in parallel for all replicationdestinations:
|
||||||
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
|
||||||
restore:
|
restore:
|
||||||
desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
|
desc: Restore an application
|
||||||
|
summary: |-
|
||||||
|
CLUSTER: Cluster to run command against (default: main)
|
||||||
|
NS: Namespace the application is in (default: default)
|
||||||
|
APP: Application to restore (required)
|
||||||
|
PREVIOUS: Previous number of snapshots to restore (default: 2)
|
||||||
|
cmds:
|
||||||
|
- task: .suspend
|
||||||
|
- task: .restore
|
||||||
|
- task: .resume
|
||||||
|
requires:
|
||||||
|
vars: [CLUSTER, APP]
|
||||||
|
|
||||||
|
.suspend:
|
||||||
|
internal: true
|
||||||
cmds:
|
cmds:
|
||||||
# Suspend
|
|
||||||
- flux --namespace flux-system suspend kustomization {{.APP}}
|
- flux --namespace flux-system suspend kustomization {{.APP}}
|
||||||
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
|
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
|
||||||
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
|
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
|
||||||
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||||
# Restore
|
|
||||||
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
|
||||||
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
|
|
||||||
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
|
|
||||||
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
|
|
||||||
# Resume
|
|
||||||
- flux --namespace flux-system resume kustomization {{.APP}}
|
|
||||||
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
|
||||||
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
|
|
||||||
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
|
||||||
vars:
|
vars:
|
||||||
NS: '{{.NS | default "default"}}'
|
NS: '{{.NS | default "default"}}'
|
||||||
|
APP: '{{.APP}}'
|
||||||
CONTROLLER:
|
CONTROLLER:
|
||||||
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||||
env:
|
|
||||||
NS: '{{.NS}}'
|
.restore:
|
||||||
APP: '{{.APP}}'
|
internal: true
|
||||||
PREVIOUS: '{{.PREVIOUS}}'
|
cmds:
|
||||||
|
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
||||||
|
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
||||||
|
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
|
||||||
|
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
|
||||||
|
vars:
|
||||||
|
NS: '{{.NS | default "default"}}'
|
||||||
|
JOB: volsync-dst-{{.APP}}
|
||||||
|
PREVIOUS: '{{.PREVIOUS | default 2}}'
|
||||||
CLAIM:
|
CLAIM:
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
|
||||||
ACCESS_MODES:
|
ACCESS_MODES:
|
||||||
|
@ -90,8 +109,28 @@ tasks:
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
||||||
PGID:
|
PGID:
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
||||||
requires:
|
env:
|
||||||
vars: [CLUSTER, APP, PREVIOUS]
|
NS: '{{.NS}}'
|
||||||
|
JOB: '{{.JOB}}'
|
||||||
|
APP: '{{.APP}}'
|
||||||
|
PREVIOUS: '{{.PREVIOUS}}'
|
||||||
|
CLAIM: '{{.CLAIM}}'
|
||||||
|
ACCESS_MODES: '{{.ACCESS_MODES}}'
|
||||||
|
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
|
||||||
|
PUID: '{{.PUID}}'
|
||||||
|
PGID: '{{.PGID}}'
|
||||||
preconditions:
|
preconditions:
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
|
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
|
||||||
- which flux kubectl minijinja-cli
|
|
||||||
|
.resume:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
||||||
|
- flux --namespace flux-system resume kustomization {{.APP}}
|
||||||
|
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
|
||||||
|
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
||||||
|
vars:
|
||||||
|
NS: '{{.NS | default "default"}}'
|
||||||
|
APP: '{{.APP}}'
|
||||||
|
CONTROLLER:
|
||||||
|
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
apiVersion: volsync.backube/v1alpha1
|
apiVersion: volsync.backube/v1alpha1
|
||||||
kind: ReplicationDestination
|
kind: ReplicationDestination
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ ENV.APP }}-manual
|
name: {{ ENV.JOB }}
|
||||||
namespace: {{ ENV.NS }}
|
namespace: {{ ENV.NS }}
|
||||||
spec:
|
spec:
|
||||||
trigger:
|
trigger:
|
||||||
|
|
|
@ -5,10 +5,6 @@ vars:
|
||||||
KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes"
|
KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes"
|
||||||
CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env"
|
CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env"
|
||||||
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
|
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
|
||||||
K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}'
|
|
||||||
K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
|
|
||||||
CLUSTER: '{{.CLUSTER | default "theshire"}}'
|
|
||||||
CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"
|
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"
|
||||||
|
|
|
@ -35,7 +35,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: docker.io/ollama/ollama
|
repository: docker.io/ollama/ollama
|
||||||
tag: 0.4.1
|
tag: 0.3.14
|
||||||
env:
|
env:
|
||||||
- name: OLLAMA_HOST
|
- name: OLLAMA_HOST
|
||||||
value: 0.0.0.0
|
value: 0.0.0.0
|
||||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: coder
|
chart: coder
|
||||||
version: 2.17.2
|
version: 2.17.0
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: coder
|
name: coder
|
||||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/autobrr/autobrr
|
repository: ghcr.io/autobrr/autobrr
|
||||||
tag: v1.49.0@sha256:dc2195ccabf8438a8f8eb0581c5e6d2a40c061754e57552bc4f67f1b20a71970
|
tag: v1.48.0@sha256:0ae19e3beedf491396e450b024c23e9e24df4d692286c0442a81fa699493def0
|
||||||
env:
|
env:
|
||||||
AUTOBRR__CHECK_FOR_UPDATES: "false"
|
AUTOBRR__CHECK_FOR_UPDATES: "false"
|
||||||
AUTOBRR__HOST: 0.0.0.0
|
AUTOBRR__HOST: 0.0.0.0
|
||||||
|
|
|
@ -36,7 +36,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/onedr0p/home-assistant
|
repository: ghcr.io/onedr0p/home-assistant
|
||||||
tag: 2024.11.1@sha256:a3dd7577c28771702b21f817ad86600056467c2c7f45d261a1e7241910ddc2e2
|
tag: 2024.10.4@sha256:d788b59a4ee584f7cbeee7cff896e922faa8f0673c83187045e77e0fc77c8457
|
||||||
env:
|
env:
|
||||||
TZ: America/Chicago
|
TZ: America/Chicago
|
||||||
envFrom:
|
envFrom:
|
||||||
|
@ -54,7 +54,7 @@ spec:
|
||||||
code-server:
|
code-server:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/coder/code-server
|
repository: ghcr.io/coder/code-server
|
||||||
tag: 4.95.1@sha256:d9bc7797d997e1b199e333676732e075bac4bae276dc0fe1baece2e313edfa09
|
tag: 4.93.1@sha256:c69e398d1b64589b3b77a7becfd03f4ec524982def20e6bffbb51b1b839e72ba
|
||||||
args: [
|
args: [
|
||||||
"--auth", "none",
|
"--auth", "none",
|
||||||
"--user-data-dir", "/config/.vscode",
|
"--user-data-dir", "/config/.vscode",
|
||||||
|
|
|
@ -16,6 +16,7 @@ resources:
|
||||||
- ./morphos/ks.yaml
|
- ./morphos/ks.yaml
|
||||||
- ./omegabrr/ks.yaml
|
- ./omegabrr/ks.yaml
|
||||||
- ./overseerr/ks.yaml
|
- ./overseerr/ks.yaml
|
||||||
|
- ./piped/ks.yaml
|
||||||
- ./plex/ks.yaml
|
- ./plex/ks.yaml
|
||||||
- ./prowlarr/ks.yaml
|
- ./prowlarr/ks.yaml
|
||||||
- ./radarr/ks.yaml
|
- ./radarr/ks.yaml
|
||||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/jorenn92/maintainerr
|
repository: ghcr.io/jorenn92/maintainerr
|
||||||
tag: 2.2.1@sha256:13121a8292ef6db7560a931bf19b601cf3cc12df0a9dea9086b757798eea5b6d
|
tag: 2.2.0@sha256:fbb2c0341b8af502e4488f3664e34992f24947708c7dac10dcbee592f99a946c
|
||||||
env:
|
env:
|
||||||
TZ: America/Chicago
|
TZ: America/Chicago
|
||||||
resources:
|
resources:
|
||||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/autobrr/omegabrr
|
repository: ghcr.io/autobrr/omegabrr
|
||||||
tag: v1.15.0@sha256:4f6099a76ff9d248e9f032e29c04a92b483f21456e46f3b01eb20399f4732ad0
|
tag: v1.14.0@sha256:6f65c7967609746662815933ecc8168c8c25a3b82d909f49833fcce2b47ee052
|
||||||
env:
|
env:
|
||||||
TZ: America/Chicago
|
TZ: America/Chicago
|
||||||
securityContext:
|
securityContext:
|
||||||
|
|
|
@ -33,7 +33,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/taxel/plextraktsync
|
repository: ghcr.io/taxel/plextraktsync
|
||||||
tag: 0.32.1
|
tag: 0.32.0
|
||||||
args:
|
args:
|
||||||
- sync
|
- sync
|
||||||
env:
|
env:
|
||||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/koush/scrypted
|
repository: ghcr.io/koush/scrypted
|
||||||
tag: v0.123.1-jammy-nvidia
|
tag: v0.123.0-jammy-nvidia
|
||||||
probes:
|
probes:
|
||||||
liveness:
|
liveness:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
|
@ -1,67 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: generic-device-plugin
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
driftDetection:
|
|
||||||
mode: enabled
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
strategy: rollback
|
|
||||||
retries: 3
|
|
||||||
values:
|
|
||||||
defaultPodOptions:
|
|
||||||
priorityClassName: system-node-critical
|
|
||||||
controllers:
|
|
||||||
generic-device-plugin:
|
|
||||||
type: daemonset
|
|
||||||
strategy: RollingUpdate
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
containers:
|
|
||||||
generic-device-plugin:
|
|
||||||
image:
|
|
||||||
repository: ghcr.io/squat/generic-device-plugin
|
|
||||||
tag: latest@sha256:ba6f0b4cf6c858d6ad29ba4d32e4da11638abbc7d96436bf04f582a97b2b8821
|
|
||||||
args:
|
|
||||||
- --config=/config/config.yml
|
|
||||||
ports:
|
|
||||||
- containerPort: 8080
|
|
||||||
name: http
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
capabilities: { drop: ["ALL"] }
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
type: configMap
|
|
||||||
name: generic-device-plugin-configmap
|
|
||||||
globalMounts:
|
|
||||||
- path: /config/config.yml
|
|
||||||
subPath: config.yml
|
|
||||||
readOnly: true
|
|
||||||
dev:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /dev
|
|
||||||
globalMounts:
|
|
||||||
- path: /dev
|
|
||||||
device-plugin:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /var/lib/kubelet/device-plugins
|
|
||||||
globalMounts:
|
|
||||||
- path: /var/lib/kubelet/device-plugins
|
|
|
@ -1,12 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
configMapGenerator:
|
|
||||||
- name: generic-device-plugin-configmap
|
|
||||||
files:
|
|
||||||
- ./resources/config.yml
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
log-level: info
|
|
||||||
domain: kernel.org
|
|
||||||
devices:
|
|
||||||
- name: tun
|
|
||||||
groups:
|
|
||||||
- count: 1000
|
|
||||||
paths:
|
|
||||||
- path: /dev/net/tun
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app generic-device-plugin
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: kube-system
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
path: "./kubernetes/apps/kube-system/generic-device-plugin/app"
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: true
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
|
@ -12,7 +12,6 @@ resources:
|
||||||
- ./descheduler/ks.yaml
|
- ./descheduler/ks.yaml
|
||||||
- ./dnsimple-webhook-rbac.yaml
|
- ./dnsimple-webhook-rbac.yaml
|
||||||
- ./fstrim/ks.yaml
|
- ./fstrim/ks.yaml
|
||||||
- ./generic-device-plugin/ks.yaml
|
|
||||||
- ./kubelet-csr-approver/ks.yaml
|
- ./kubelet-csr-approver/ks.yaml
|
||||||
- ./metrics-server/ks.yaml
|
- ./metrics-server/ks.yaml
|
||||||
- ./node-feature-discovery/ks.yaml
|
- ./node-feature-discovery/ks.yaml
|
||||||
|
|
|
@ -36,7 +36,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: docker.io/cloudflare/cloudflared
|
repository: docker.io/cloudflare/cloudflared
|
||||||
tag: 2024.11.0@sha256:2c78df02e1f23ab19d4c636921f05b9ebec163b887e946f98e22e56254a5540f
|
tag: 2024.10.1@sha256:52b9529db08f7ef827a2bce04b91945b475c651e46f583c30b70dd6773262ae3
|
||||||
env:
|
env:
|
||||||
NO_AUTOUPDATE: "true"
|
NO_AUTOUPDATE: "true"
|
||||||
TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json
|
TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json
|
||||||
|
|
|
@ -247,7 +247,7 @@ spec:
|
||||||
victoria-agent:
|
victoria-agent:
|
||||||
# renovate: depName="VictoriaMetrics - vmagent"
|
# renovate: depName="VictoriaMetrics - vmagent"
|
||||||
gnetId: 12683
|
gnetId: 12683
|
||||||
revision: 21
|
revision: 20
|
||||||
datasource: Prometheus
|
datasource: Prometheus
|
||||||
victoria-alert:
|
victoria-alert:
|
||||||
# renovate: depName="VictoriaMetrics - vmalert"
|
# renovate: depName="VictoriaMetrics - vmalert"
|
||||||
|
@ -262,7 +262,7 @@ spec:
|
||||||
victoria-single:
|
victoria-single:
|
||||||
# renovate: depName="VictoriaMetrics - single-node"
|
# renovate: depName="VictoriaMetrics - single-node"
|
||||||
gnetId: 10229
|
gnetId: 10229
|
||||||
revision: 37
|
revision: 36
|
||||||
datasource: Prometheus
|
datasource: Prometheus
|
||||||
postgres:
|
postgres:
|
||||||
crunchy-pgbackrest:
|
crunchy-pgbackrest:
|
||||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: prometheus-operator-crds
|
chart: prometheus-operator-crds
|
||||||
version: 16.0.0
|
version: 15.0.0
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: prometheus-community
|
name: prometheus-community
|
||||||
|
|
|
@ -43,7 +43,7 @@ spec:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: jesec/flood
|
repository: jesec/flood
|
||||||
tag: master@sha256:8a18a3509a6c1557b769873a1ef85dcd5fa4cbce1a939be2c6c87f97eb79de45
|
tag: master@sha256:8d04ec24abcc879f14e744e809520f7a7ec3c66395e1f6efa4179c9399803fbe
|
||||||
envFrom:
|
envFrom:
|
||||||
- secretRef:
|
- secretRef:
|
||||||
name: flood-secret
|
name: flood-secret
|
||||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: rook-ceph
|
chart: rook-ceph
|
||||||
version: v1.15.5
|
version: v1.15.4
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: rook-ceph
|
name: rook-ceph
|
||||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: rook-ceph-cluster
|
chart: rook-ceph-cluster
|
||||||
version: v1.15.5
|
version: v1.15.4
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: rook-ceph
|
name: rook-ceph
|
||||||
|
|
|
@ -19,7 +19,7 @@ releases:
|
||||||
- name: prometheus-operator-crds
|
- name: prometheus-operator-crds
|
||||||
namespace: observability
|
namespace: observability
|
||||||
chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds
|
chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds
|
||||||
version: 16.0.0
|
version: 15.0.0
|
||||||
- name: cilium
|
- name: cilium
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
chart: cilium/cilium
|
chart: cilium/cilium
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json
|
# yaml-language-server: $schema=https://ks.hsn.dev/talconfig.json
|
||||||
clusterName: theshire
|
clusterName: theshire
|
||||||
|
|
||||||
# renovate: datasource=github-releases depName=siderolabs/talos
|
|
||||||
talosVersion: v1.8.1
|
talosVersion: v1.8.1
|
||||||
# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet
|
kubernetesVersion: 1.30.2
|
||||||
kubernetesVersion: 1.31.2
|
|
||||||
endpoint: "https://10.1.1.57:6444"
|
endpoint: "https://10.1.1.57:6444"
|
||||||
|
|
||||||
additionalApiServerCertSans:
|
additionalApiServerCertSans:
|
||||||
|
@ -171,8 +169,8 @@ worker:
|
||||||
fs.inotify.max_queued_events: "65536"
|
fs.inotify.max_queued_events: "65536"
|
||||||
fs.inotify.max_user_instances: "8192"
|
fs.inotify.max_user_instances: "8192"
|
||||||
fs.inotify.max_user_watches: "524288"
|
fs.inotify.max_user_watches: "524288"
|
||||||
net.core.rmem_max: "7500000"
|
net.core.rmem_max: "2500000"
|
||||||
net.core.wmem_max: "7500000"
|
net.core.wmem_max: "2500000"
|
||||||
- &nfsMountOptions |-
|
- &nfsMountOptions |-
|
||||||
machine:
|
machine:
|
||||||
files:
|
files:
|
||||||
|
|
|
@ -19,6 +19,5 @@ pkgs.mkShell {
|
||||||
age
|
age
|
||||||
mqttui
|
mqttui
|
||||||
kustomize
|
kustomize
|
||||||
yq-go
|
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue