Compare commits

...

44 commits

Author SHA1 Message Date
c1b665fac2 Update chart coder to 2.17.2 2024-11-12 19:34:03 +00:00
761690ba5e Merge pull request 'Update image ghcr.io/siderolabs/kubelet to v1.31.2' (#838) from renovate/ghcr.io-siderolabs-kubelet-1.x into main
Reviewed-on: #838
2024-11-08 19:40:40 -06:00
cb2e0d24c8 Merge pull request 'Update image ghcr.io/onedr0p/home-assistant to v2024.11.1' (#841) from renovate/ghcr.io-onedr0p-home-assistant-2024.x into main
Reviewed-on: #841
2024-11-08 19:08:54 -06:00
bc687b28fd Merge pull request 'Update image docker.io/ollama/ollama to v0.4.1' (#840) from renovate/docker.io-ollama-ollama-0.x into main
Reviewed-on: #840
2024-11-08 19:07:48 -06:00
6c6ea27957 Merge pull request 'Update image ghcr.io/autobrr/autobrr to v1.49.0' (#834) from renovate/ghcr.io-autobrr-autobrr-1.x into main
Reviewed-on: #834
2024-11-08 19:07:33 -06:00
1166d4d687 Update image ghcr.io/onedr0p/home-assistant to v2024.11.1 2024-11-09 01:06:18 +00:00
ec4f619367 Update image docker.io/ollama/ollama to v0.4.1 2024-11-09 01:06:15 +00:00
d870c79589 Merge pull request 'Update image jesec/flood to 8a18a35' (#824) from renovate/jesec-flood-master into main
Reviewed-on: #824
2024-11-08 18:56:08 -06:00
96179f13a2 Merge pull request 'Update image ghcr.io/autobrr/omegabrr to v1.15.0' (#825) from renovate/ghcr.io-autobrr-omegabrr-1.x into main
Reviewed-on: #825
2024-11-08 18:55:33 -06:00
2c348267c5 Merge pull request 'Update image ghcr.io/coder/code-server to v4.95.1' (#835) from renovate/ghcr.io-coder-code-server-4.x into main
Reviewed-on: #835
2024-11-08 18:55:07 -06:00
8222c32fe0 Merge pull request 'Update image prometheus-operator-crds to v16' (#836) from renovate/prometheus-operator-crds-16.x into main
Reviewed-on: #836
2024-11-08 18:54:44 -06:00
01f3eaa9fa Merge pull request 'Update image ghcr.io/koush/scrypted to v0.123.1' (#837) from renovate/ghcr.io-koush-scrypted-0.x into main
Reviewed-on: #837
2024-11-08 18:54:28 -06:00
c51bd020bd Merge pull request 'Update image ghcr.io/taxel/plextraktsync to v0.32.1' (#839) from renovate/ghcr.io-taxel-plextraktsync-0.x into main
Reviewed-on: #839
2024-11-08 18:53:48 -06:00
1f87e3c3db Update image ghcr.io/taxel/plextraktsync to v0.32.1 2024-11-08 19:06:35 +00:00
20a2e63b30 Update image prometheus-operator-crds to v16 2024-11-08 18:21:08 +00:00
8ed33b3671 Update image ghcr.io/siderolabs/kubelet to v1.31.2 2024-11-08 18:20:42 +00:00
2de07ac885
renovate test 2024-11-08 12:13:27 -06:00
f1c79adc59 Update image ghcr.io/koush/scrypted to v0.123.1 2024-11-08 18:06:32 +00:00
97829c6809
ignore sync-conflicts 2024-11-07 22:20:12 -06:00
14287e4cad Merge pull request 'Update image ghcr.io/jorenn92/maintainerr to v2.2.1' (#833) from renovate/ghcr.io-jorenn92-maintainerr-2.x into main
Reviewed-on: #833
2024-11-07 16:08:38 -06:00
7a72a530a7
undeploy piped 2024-11-07 16:07:58 -06:00
c282512a8b
add generic-device-plugin 2024-11-07 15:49:06 -06:00
0d547458d3
format 2024-11-07 15:45:03 -06:00
ca43a3f4b3 Merge pull request 'Update image docker.io/cloudflare/cloudflared to v2024.11.0' (#826) from renovate/docker.io-cloudflare-cloudflared-2024.x into main
Reviewed-on: #826
2024-11-07 14:48:17 -06:00
851884bd94 Update image ghcr.io/coder/code-server to v4.95.1 2024-11-07 20:06:55 +00:00
2a06b673fb Update image ghcr.io/autobrr/autobrr to v1.49.0 2024-11-07 20:06:47 +00:00
dbb411bc42 Update image ghcr.io/jorenn92/maintainerr to v2.2.1 2024-11-07 19:07:54 +00:00
53fc1d997f Merge pull request 'Update image docker.io/ollama/ollama to v0.4.0' (#827) from renovate/docker.io-ollama-ollama-0.x into main
Reviewed-on: #827
2024-11-07 12:57:01 -06:00
270896cbd6 Merge pull request 'Update Rook Ceph group to v1.15.5 (patch)' (#829) from renovate/patch-rook-ceph into main
Reviewed-on: #829
2024-11-07 12:55:25 -06:00
b64647cdc2 Merge pull request 'Update image ghcr.io/onedr0p/home-assistant to v2024.11.0' (#828) from renovate/ghcr.io-onedr0p-home-assistant-2024.x into main
Reviewed-on: #828
2024-11-07 12:12:39 -06:00
7f096e8b16 Merge pull request 'Update dashboard VictoriaMetrics - single-node ( 36 → 37 )' (#830) from renovate/victoriametrics-single-node-37.x into main
Reviewed-on: #830
2024-11-07 12:11:53 -06:00
fd95d435df Merge pull request 'Update dashboard VictoriaMetrics - vmagent ( 20 → 21 )' (#831) from renovate/victoriametrics-vmagent-21.x into main
Reviewed-on: #831
2024-11-07 12:11:43 -06:00
191678bc36 Update image ghcr.io/onedr0p/home-assistant to v2024.11.0 2024-11-07 10:37:02 +00:00
8a369a96f0 Update Rook Ceph group to v1.15.5 2024-11-06 21:06:19 +00:00
4332d24615 Update dashboard VictoriaMetrics - vmagent ( 20 → 21 ) 2024-11-06 17:06:46 +00:00
6a3b358f26 Update dashboard VictoriaMetrics - single-node ( 36 → 37 ) 2024-11-06 17:06:42 +00:00
139260eec1 Update image docker.io/ollama/ollama to v0.4.0 2024-11-06 17:06:36 +00:00
af097c7dd3
upgrade volsync taskfile 2024-11-06 10:46:07 -06:00
3668207a96
upgrade to talos v1.8.2 2024-11-06 10:45:46 -06:00
50833f2dde Update image docker.io/cloudflare/cloudflared to v2024.11.0 2024-11-06 16:11:41 +00:00
fddcb0198d
update to k8s 1.31.1 2024-11-06 08:04:23 -06:00
b49ed58d67
update talos taskfile and yq version 2024-11-06 08:04:05 -06:00
366747cfd1 Update image ghcr.io/autobrr/omegabrr to v1.15.0 2024-11-06 12:05:44 +00:00
5ae9e7a310 Update image jesec/flood to 8a18a35 2024-11-06 05:35:26 +00:00
34 changed files with 249 additions and 185 deletions

3
.gitignore vendored
View file

@ -24,3 +24,6 @@ omniconfig.yaml
*.pem *.pem
*.secrets *.secrets
config.xml config.xml
# syncthing
**/*sync-conflict*

View file

@ -7,7 +7,11 @@
"automerge": true, "automerge": true,
"automergeType": "branch", "automergeType": "branch",
"matchUpdateTypes": ["digest"], "matchUpdateTypes": ["digest"],
"matchPackagePrefixes": ["ghcr.io/onedr0p", "ghcr.io/bjw-s", "ghcr.io/bjw-s-labs"], "matchPackagePrefixes": [
"ghcr.io/onedr0p",
"ghcr.io/bjw-s",
"ghcr.io/bjw-s-labs"
],
"ignoreTests": true "ignoreTests": true
}, },
{ {

View file

@ -2,135 +2,122 @@
# yaml-language-server: $schema=https://taskfile.dev/schema.json # yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3" version: "3"
vars:
RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/talos/resources"
CONTROLLER:
sh: talosctl --context {{.cluster}} config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
cluster: theshire
tasks: tasks:
bootstrap: bootstrap:
desc: Bootstrap Talos desc: Bootstrap Talos
summary: | summary: |
Args: Args:
cluster: Cluster to run command against (default: theshire) CONTROLLER: Controller node to run command against (required)
controller: Controller node to run command against (required) (IP/DNS) prompt: Bootstrap Talos on the '{{.K8S_CLUSTER}}' cluster... continue?
dotenv: ["{{.RESOURCES_DIR}}/.env"]
prompt: Bootstrap Talos on the cluster... continue?
cmds: cmds:
- task: bootstrap-etcd - task: bootstrap-etcd
vars: &vars vars: &vars
controller: "{{.controller}}" CONTROLLER: "{{.CONTROLER}}"
- task: fetch-kubeconfig - task: fetch-kubeconfig
vars: *vars vars: *vars
- task: bootstrap-integrations - task: bootstrap-integrations
vars: *vars vars: *vars
requires: requires:
vars: vars:
- controller - K8S_CLUSTER
- CONTROLLER
bootstrap-etcd: bootstrap-etcd:
desc: Bootstrap Etcd desc: Bootstrap Etcd
dotenv: ["{{.RESOURCES_DIR}}/.env"] cmd: until talosctl --nodes {{.CONTROLLER}} bootstrap; do sleep 10; done
cmd: until talosctl --context $CLUSTER --nodes {{.controller}} bootstrap; do sleep 10; done
requires: requires:
vars: vars:
- controller - CONTROLLER
bootstrap-integrations: bootstrap-integrations:
desc: Bootstrap core integrations needed for Talos desc: Bootstrap core integrations needed for Talos
dotenv: ["{{.RESOURCES_DIR}}/.env"]
cmds: cmds:
- until kubectl --context $CLUSTER wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done - until kubectl wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
- helmfile --kube-context $CLUSTER --file {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff - helmfile --kube-context {{.K8S_CLUSTER}} --file {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
- until kubectl --context $CLUSTER wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done - until kubectl wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
requires:
vars:
- K8S_CLUSTER
preconditions: preconditions:
- which helmfile - which helmfile
- sh: kubectl config get-contexts $CLUSTER - sh: kubectl config get-contexts {{.K8S_CLUSTER}}
msg: "Kubectl context $CLUSTER not found" msg: "Kubectl context {{.K8S_CLUSTER}} not found"
- test -f {{.KUBERNETES_DIR}}/bootstrap/helmfile.yaml - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml
fetch-kubeconfig: fetch-kubeconfig:
desc: Fetch kubeconfig from Talos controllers desc: Fetch kubeconfig from Talos controllers
dotenv: ["{{.RESOURCES_DIR}}/.env"]
env: *vars
cmd: | cmd: |
talosctl --context $CLUSTER kubeconfig --nodes {{ .CONTROLLER }} \ talosctl kubeconfig --nodes {{.CONTROLLER}} \
--force --force-context-name $CLUSTER {{.ROOT_DIR}}/kubeconfig --force --force-context-name {{.K8S_CLUSTER}} {{.K8S_CLUSTER_DIR}}
preconditions: requires:
- talosctl config get-contexts | grep $CLUSTER vars:
- K8S_CLUSTER
generate-clusterconfig: generate-clusterconfig:
desc: Generate clusterconfig for Talos desc: Generate clusterconfig for Talos
dotenv: ["{{.RESOURCES_DIR}}/.env"]
cmds: cmds:
- talhelper genconfig - talhelper genconfig
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml --env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
--secret-file {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml --secret-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml --config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
--out-dir {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig --out-dir {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig
requires:
vars:
- K8S_CLUSTER
preconditions: preconditions:
- which talhelper - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talsecret.sops.yaml - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
upgrade: upgrade:
desc: Upgrade Talos version for a node desc: Upgrade Talos version for a node
dotenv: ["{{.RESOURCES_DIR}}/.env"]
vars: vars:
TALOS_VERSION: TALOS_VERSION:
sh: | sh: |
yq -r ".talosVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml yq -r ".talosVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
TALOS_IMAGE: TALOS_IMAGE:
sh: | sh: |
talhelper genurl installer \ talhelper genurl installer \
--env-file {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml \ --env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml \
--config-file {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml --config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml \
| grep {{.NODE}} \
| awk '{split($0,u," "); print u[2]}'
cmds: cmds:
- talosctl --context $CLUSTER upgrade -n {{.node}} --image {{.TALOS_IMAGE }} - talosctl upgrade -n {{.NODE}} --image {{.TALOS_IMAGE }}
requires: requires:
vars: vars:
- node - K8S_CLUSTER
- NODE
preconditions: preconditions:
- which talhelper - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
- talosctl config get-contexts | grep $CLUSTER - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml - msg: "Talos image could not be determined for node={{.NODE}}"
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
- msg: "Talos image could not be determined for {{.node}}"
sh: 'test -n "{{.TALOS_IMAGE}}"' sh: 'test -n "{{.TALOS_IMAGE}}"'
upgrade-k8s: upgrade-k8s:
desc: Upgrade Kubernetes version for a Talos cluster desc: Upgrade Kubernetes version for a Talos cluster
dotenv: ["{{.RESOURCES_DIR}}/.env"] silent: false
vars: vars:
KUBERNETES_VERSION: KUBERNETES_VERSION:
sh: | sh: |
yq -r ".kubernetesVersion" {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml yq -r ".kubernetesVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
CONTROLPLANE_NODE: TALOS_CONTROLLER:
sh: | sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
talosctl --context $CLUSTER config info \
| grep Endpoints: \
| awk '{split($0,u," "); print u[2]}' \
| sed -E 's/,//'
cmds: cmds:
- talosctl upgrade-k8s -n {{.CONTROLPLANE_NODE}} --to {{.KUBERNETES_VERSION}} - until kubectl wait --timeout=5m --for=condition=Complete jobs --all --all-namespaces; do sleep 10; done
- talosctl upgrade-k8s -n {{.TALOS_CONTROLLER}} --to {{.KUBERNETES_VERSION}}
requires:
vars:
- K8S_CLUSTER
preconditions: preconditions:
- which talhelper - talosctl config info &>/dev/null
- talosctl config get-contexts | grep $CLUSTER - talosctl --nodes {{.TALOS_CONTROLLER}} get machineconfig &>/dev/null
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talenv.sops.yaml
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/talconfig.yaml
- msg: "Kubernetes version could not be determined for cluster $CLUSTER"
sh: 'test -n "{{.KUBERNETES_VERSION}}"'
- msg: "Control plane node could not be determined for cluster $CLUSTER"
sh: 'test -n "{{.CONTROLPLANE_NODE}}"'
apply-clusterconfig: apply-clusterconfig:
desc: Apply clusterconfig for a Talos cluster desc: Apply clusterconfig for a Talos cluster
dotenv: ["{{.RESOURCES_DIR}}/.env"]
vars: vars:
CLUSTERCONFIG_FILES: CLUSTERCONFIG_FILES:
sh: find {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n' sh: find {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
cmds: cmds:
- for: - for:
var: CLUSTERCONFIG_FILES var: CLUSTERCONFIG_FILES
@ -138,29 +125,24 @@ tasks:
vars: vars:
filename: "{{.ITEM}}" filename: "{{.ITEM}}"
hostname: |- hostname: |-
{{ trimPrefix (printf "%s-" .cluster) .ITEM | trimSuffix ".yaml" }} {{ trimPrefix (printf "%s-" .K8S_CLUSTER) .ITEM | trimSuffix ".yaml" }}
dry_run: "{{ .dry_run }}" DRY_RUN: "{{ .DRY_RUN }}"
preconditions: requires:
- talosctl config get-contexts | grep $CLUSTER vars:
- test -d {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig - K8S_CLUSTER
_apply-machineconfig: _apply-machineconfig:
internal: true internal: true
desc: Apply a single Talos machineConfig to a Talos node desc: Apply a single Talos machineConfig to a Talos node
dotenv: ["{{.RESOURCES_DIR}}/.env"]
cmds: cmds:
- talosctl --context theshire apply-config - talosctl apply-config
--nodes "{{.hostname}}" --nodes "{{.hostname}}"
--file "{{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}" --file "{{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
{{ if eq "true" .dry_run }}--dry-run{{ end }} {{ if eq "true" .DRY_RUN }}--dry-run{{ end }}
#--insecure
requires: requires:
vars: vars:
- K8S_CLUSTER
- hostname - hostname
- filename - filename
preconditions: preconditions:
- talosctl config get-contexts | grep $CLUSTER - test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
- test -f {{.KUBERNETES_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
version:
desc: Show Talos version
cmd: talosctl version

View file

@ -1 +0,0 @@
CLUSTER=theshire

View file

@ -2,11 +2,10 @@
# yaml-language-server: $schema=https://taskfile.dev/schema.json # yaml-language-server: $schema=https://taskfile.dev/schema.json
version: '3' version: '3'
# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. # Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) # 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
# 2. ReplicationSource and ReplicationDestination are a Restic repository # 2. ReplicationSource and ReplicationDestination are a Restic repository
# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet # 3. Each application only has one PVC that is being replicated
# 4. Each application only has one PVC that is being replicated
vars: vars:
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources' VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
@ -14,39 +13,34 @@ vars:
tasks: tasks:
state-*: state-*:
desc: Suspend or Resume Volsync desc: Suspend or resume Volsync [CLUSTER=main]
summary: |-
CLUSTER: Cluster to run command against (default: main)
STATE: resume or suspend (required)
cmds: cmds:
# - until kubectl wait jobs --all --all-namespaces --for=condition=complete --timeout=5m &>/dev/null; do sleep 5; done - flux --namespace flux-system {{.STATE}} kustomization volsync
- flux {{.STATE}} kustomization volsync - flux --namespace volsync-system {{.STATE}} helmrelease volsync
- flux --namespace {{.NS}} {{.STATE}} helmrelease volsync - kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
- kubectl --namespace {{.NS}} scale deployment --all --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
vars: vars:
NS: '{{.NS | default "volsync-system"}}'
STATE: '{{index .MATCH 0}}' STATE: '{{index .MATCH 0}}'
requires: requires:
vars: [CLUSTER] vars: [CLUSTER]
preconditions:
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
- which flux kubectl
unlock: unlock:
desc: Unlock all Restic repositories desc: Unlock all restic source repos [CLUSTER=main]
summary: |- cmds:
CLUSTER: Cluster to run command against (default: main) - for: { var: SOURCES, split: "\n" }
cmd: > cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
kubectl get replicationsources --all-namespaces --no-headers -A | awk '{print $1, $2}' vars:
| xargs --max-procs=2 -l bash -c 'kubectl --namespace "$0" patch --field-manager=flux-client-side-apply replicationsources "$1" --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"' SOURCES:
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
requires: requires:
vars: [CLUSTER] vars: [CLUSTER]
preconditions:
- which kubectl
# To run backup jobs in parallel for all replicationsources:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot APP=$0 NS=$1'
snapshot: snapshot:
desc: Snapshot an application desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to snapshot (required)
cmds: cmds:
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}' - kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done - until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
@ -58,47 +52,34 @@ tasks:
vars: [CLUSTER, APP] vars: [CLUSTER, APP]
preconditions: preconditions:
- kubectl --namespace {{.NS}} get replicationsources {{.APP}} - kubectl --namespace {{.NS}} get replicationsources {{.APP}}
- which kubectl
# To run restore jobs in parallel for all replicationdestinations:
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore APP=$0 NS=$1'
restore: restore:
desc: Restore an application desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
summary: |-
CLUSTER: Cluster to run command against (default: main)
NS: Namespace the application is in (default: default)
APP: Application to restore (required)
PREVIOUS: Previous number of snapshots to restore (default: 2)
cmds:
- task: .suspend
- task: .restore
- task: .resume
requires:
vars: [CLUSTER, APP]
.suspend:
internal: true
cmds: cmds:
# Suspend
- flux --namespace flux-system suspend kustomization {{.APP}} - flux --namespace flux-system suspend kustomization {{.APP}}
- flux --namespace {{.NS}} suspend helmrelease {{.APP}} - flux --namespace {{.NS}} suspend helmrelease {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0 - kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m - kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
# Restore
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
# Resume
- flux --namespace flux-system resume kustomization {{.APP}}
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars: vars:
NS: '{{.NS | default "default"}}' NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER: CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
env:
.restore: NS: '{{.NS}}'
internal: true APP: '{{.APP}}'
cmds: PREVIOUS: '{{.PREVIOUS}}'
- minijinja-cli --env --trim-blocks --lstrip-blocks --autoescape=none {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
- kubectl --namespace {{.NS}} delete replicationdestination {{.JOB}}
vars:
NS: '{{.NS | default "default"}}'
JOB: volsync-dst-{{.APP}}
PREVIOUS: '{{.PREVIOUS | default 2}}'
CLAIM: CLAIM:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
ACCESS_MODES: ACCESS_MODES:
@ -109,28 +90,8 @@ tasks:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
PGID: PGID:
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
env: requires:
NS: '{{.NS}}' vars: [CLUSTER, APP, PREVIOUS]
JOB: '{{.JOB}}'
APP: '{{.APP}}'
PREVIOUS: '{{.PREVIOUS}}'
CLAIM: '{{.CLAIM}}'
ACCESS_MODES: '{{.ACCESS_MODES}}'
STORAGE_CLASS_NAME: '{{.STORAGE_CLASS_NAME}}'
PUID: '{{.PUID}}'
PGID: '{{.PGID}}'
preconditions: preconditions:
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
- which flux kubectl minijinja-cli
.resume:
internal: true
cmds:
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
- flux --namespace flux-system resume kustomization {{.APP}}
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 1
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
vars:
NS: '{{.NS | default "default"}}'
APP: '{{.APP}}'
CONTROLLER:
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset

View file

@ -2,7 +2,7 @@
apiVersion: volsync.backube/v1alpha1 apiVersion: volsync.backube/v1alpha1
kind: ReplicationDestination kind: ReplicationDestination
metadata: metadata:
name: {{ ENV.JOB }} name: {{ ENV.APP }}-manual
namespace: {{ ENV.NS }} namespace: {{ ENV.NS }}
spec: spec:
trigger: trigger:

View file

@ -5,6 +5,10 @@ vars:
KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes"
CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env" CLUSTER_SECRETS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-secrets.sops.env"
CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env" CLUSTER_SETTINGS_FILE: "{{.CLUSTER_DIR}}/flux/vars/cluster-settings.env"
K8S_CLUSTER: '{{.K8S_CLUSTER | default "theshire"}}'
K8S_CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
CLUSTER: '{{.CLUSTER | default "theshire"}}'
CLUSTER_DIR: '{{.KUBERNETES_DIR}}'
env: env:
KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig"

View file

@ -35,7 +35,7 @@ spec:
app: app:
image: image:
repository: docker.io/ollama/ollama repository: docker.io/ollama/ollama
tag: 0.3.14 tag: 0.4.1
env: env:
- name: OLLAMA_HOST - name: OLLAMA_HOST
value: 0.0.0.0 value: 0.0.0.0

View file

@ -9,7 +9,7 @@ spec:
chart: chart:
spec: spec:
chart: coder chart: coder
version: 2.16.1 version: 2.17.2
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: coder name: coder

View file

@ -31,7 +31,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/autobrr/autobrr repository: ghcr.io/autobrr/autobrr
tag: v1.48.0@sha256:0ae19e3beedf491396e450b024c23e9e24df4d692286c0442a81fa699493def0 tag: v1.49.0@sha256:dc2195ccabf8438a8f8eb0581c5e6d2a40c061754e57552bc4f67f1b20a71970
env: env:
AUTOBRR__CHECK_FOR_UPDATES: "false" AUTOBRR__CHECK_FOR_UPDATES: "false"
AUTOBRR__HOST: 0.0.0.0 AUTOBRR__HOST: 0.0.0.0

View file

@ -36,7 +36,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/onedr0p/home-assistant repository: ghcr.io/onedr0p/home-assistant
tag: 2024.10.4@sha256:d788b59a4ee584f7cbeee7cff896e922faa8f0673c83187045e77e0fc77c8457 tag: 2024.11.1@sha256:a3dd7577c28771702b21f817ad86600056467c2c7f45d261a1e7241910ddc2e2
env: env:
TZ: America/Chicago TZ: America/Chicago
envFrom: envFrom:
@ -54,7 +54,7 @@ spec:
code-server: code-server:
image: image:
repository: ghcr.io/coder/code-server repository: ghcr.io/coder/code-server
tag: 4.93.1@sha256:c69e398d1b64589b3b77a7becfd03f4ec524982def20e6bffbb51b1b839e72ba tag: 4.95.1@sha256:d9bc7797d997e1b199e333676732e075bac4bae276dc0fe1baece2e313edfa09
args: [ args: [
"--auth", "none", "--auth", "none",
"--user-data-dir", "/config/.vscode", "--user-data-dir", "/config/.vscode",

View file

@ -16,7 +16,6 @@ resources:
- ./morphos/ks.yaml - ./morphos/ks.yaml
- ./omegabrr/ks.yaml - ./omegabrr/ks.yaml
- ./overseerr/ks.yaml - ./overseerr/ks.yaml
- ./piped/ks.yaml
- ./plex/ks.yaml - ./plex/ks.yaml
- ./prowlarr/ks.yaml - ./prowlarr/ks.yaml
- ./radarr/ks.yaml - ./radarr/ks.yaml

View file

@ -32,7 +32,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/jorenn92/maintainerr repository: ghcr.io/jorenn92/maintainerr
tag: 2.2.0@sha256:fbb2c0341b8af502e4488f3664e34992f24947708c7dac10dcbee592f99a946c tag: 2.2.1@sha256:13121a8292ef6db7560a931bf19b601cf3cc12df0a9dea9086b757798eea5b6d
env: env:
TZ: America/Chicago TZ: America/Chicago
resources: resources:

View file

@ -31,7 +31,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/autobrr/omegabrr repository: ghcr.io/autobrr/omegabrr
tag: v1.14.0@sha256:6f65c7967609746662815933ecc8168c8c25a3b82d909f49833fcce2b47ee052 tag: v1.15.0@sha256:4f6099a76ff9d248e9f032e29c04a92b483f21456e46f3b01eb20399f4732ad0
env: env:
TZ: America/Chicago TZ: America/Chicago
securityContext: securityContext:

View file

@ -33,7 +33,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/taxel/plextraktsync repository: ghcr.io/taxel/plextraktsync
tag: 0.32.0 tag: 0.32.1
args: args:
- sync - sync
env: env:

View file

@ -32,7 +32,7 @@ spec:
app: app:
image: image:
repository: ghcr.io/koush/scrypted repository: ghcr.io/koush/scrypted
tag: v0.123.0-jammy-nvidia tag: v0.123.1-jammy-nvidia
probes: probes:
liveness: liveness:
enabled: true enabled: true

View file

@ -0,0 +1,67 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: generic-device-plugin
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
driftDetection:
mode: enabled
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: rollback
retries: 3
values:
defaultPodOptions:
priorityClassName: system-node-critical
controllers:
generic-device-plugin:
type: daemonset
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
containers:
generic-device-plugin:
image:
repository: ghcr.io/squat/generic-device-plugin
tag: latest@sha256:ba6f0b4cf6c858d6ad29ba4d32e4da11638abbc7d96436bf04f582a97b2b8821
args:
- --config=/config/config.yml
ports:
- containerPort: 8080
name: http
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities: { drop: ["ALL"] }
persistence:
config:
type: configMap
name: generic-device-plugin-configmap
globalMounts:
- path: /config/config.yml
subPath: config.yml
readOnly: true
dev:
type: hostPath
hostPath: /dev
globalMounts:
- path: /dev
device-plugin:
type: hostPath
hostPath: /var/lib/kubelet/device-plugins
globalMounts:
- path: /var/lib/kubelet/device-plugins

View file

@ -0,0 +1,12 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
configMapGenerator:
- name: generic-device-plugin-configmap
files:
- ./resources/config.yml
generatorOptions:
disableNameSuffixHash: true

View file

@ -0,0 +1,9 @@
---
log-level: info
domain: kernel.org
devices:
- name: tun
groups:
- count: 1000
paths:
- path: /dev/net/tun

View file

@ -0,0 +1,20 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app generic-device-plugin
namespace: flux-system
spec:
targetNamespace: kube-system
commonMetadata:
labels:
app.kubernetes.io/name: *app
path: "./kubernetes/apps/kube-system/generic-device-plugin/app"
prune: true
sourceRef:
kind: GitRepository
name: theshire
wait: true
interval: 30m
timeout: 5m

View file

@ -12,6 +12,7 @@ resources:
- ./descheduler/ks.yaml - ./descheduler/ks.yaml
- ./dnsimple-webhook-rbac.yaml - ./dnsimple-webhook-rbac.yaml
- ./fstrim/ks.yaml - ./fstrim/ks.yaml
- ./generic-device-plugin/ks.yaml
- ./kubelet-csr-approver/ks.yaml - ./kubelet-csr-approver/ks.yaml
- ./metrics-server/ks.yaml - ./metrics-server/ks.yaml
- ./node-feature-discovery/ks.yaml - ./node-feature-discovery/ks.yaml

View file

@ -36,7 +36,7 @@ spec:
app: app:
image: image:
repository: docker.io/cloudflare/cloudflared repository: docker.io/cloudflare/cloudflared
tag: 2024.10.1@sha256:52b9529db08f7ef827a2bce04b91945b475c651e46f583c30b70dd6773262ae3 tag: 2024.11.0@sha256:2c78df02e1f23ab19d4c636921f05b9ebec163b887e946f98e22e56254a5540f
env: env:
NO_AUTOUPDATE: "true" NO_AUTOUPDATE: "true"
TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json

View file

@ -247,7 +247,7 @@ spec:
victoria-agent: victoria-agent:
# renovate: depName="VictoriaMetrics - vmagent" # renovate: depName="VictoriaMetrics - vmagent"
gnetId: 12683 gnetId: 12683
revision: 20 revision: 21
datasource: Prometheus datasource: Prometheus
victoria-alert: victoria-alert:
# renovate: depName="VictoriaMetrics - vmalert" # renovate: depName="VictoriaMetrics - vmalert"
@ -262,7 +262,7 @@ spec:
victoria-single: victoria-single:
# renovate: depName="VictoriaMetrics - single-node" # renovate: depName="VictoriaMetrics - single-node"
gnetId: 10229 gnetId: 10229
revision: 36 revision: 37
datasource: Prometheus datasource: Prometheus
postgres: postgres:
crunchy-pgbackrest: crunchy-pgbackrest:

View file

@ -9,7 +9,7 @@ spec:
chart: chart:
spec: spec:
chart: prometheus-operator-crds chart: prometheus-operator-crds
version: 15.0.0 version: 16.0.0
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: prometheus-community name: prometheus-community

View file

@ -43,7 +43,7 @@ spec:
app: app:
image: image:
repository: jesec/flood repository: jesec/flood
tag: master@sha256:8d04ec24abcc879f14e744e809520f7a7ec3c66395e1f6efa4179c9399803fbe tag: master@sha256:8a18a3509a6c1557b769873a1ef85dcd5fa4cbce1a939be2c6c87f97eb79de45
envFrom: envFrom:
- secretRef: - secretRef:
name: flood-secret name: flood-secret

View file

@ -10,7 +10,7 @@ spec:
chart: chart:
spec: spec:
chart: rook-ceph chart: rook-ceph
version: v1.15.4 version: v1.15.5
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: rook-ceph name: rook-ceph

View file

@ -10,7 +10,7 @@ spec:
chart: chart:
spec: spec:
chart: rook-ceph-cluster chart: rook-ceph-cluster
version: v1.15.4 version: v1.15.5
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: rook-ceph name: rook-ceph

View file

@ -19,7 +19,7 @@ releases:
- name: prometheus-operator-crds - name: prometheus-operator-crds
namespace: observability namespace: observability
chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds
version: 15.0.0 version: 16.0.0
- name: cilium - name: cilium
namespace: kube-system namespace: kube-system
chart: cilium/cilium chart: cilium/cilium

View file

@ -1,9 +1,11 @@
--- ---
# yaml-language-server: $schema=https://ks.hsn.dev/talconfig.json # yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json
clusterName: theshire clusterName: theshire
# renovate: datasource=github-releases depName=siderolabs/talos
talosVersion: v1.8.1 talosVersion: v1.8.1
kubernetesVersion: 1.30.2 # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet
kubernetesVersion: 1.31.2
endpoint: "https://10.1.1.57:6444" endpoint: "https://10.1.1.57:6444"
additionalApiServerCertSans: additionalApiServerCertSans:
@ -169,8 +171,8 @@ worker:
fs.inotify.max_queued_events: "65536" fs.inotify.max_queued_events: "65536"
fs.inotify.max_user_instances: "8192" fs.inotify.max_user_instances: "8192"
fs.inotify.max_user_watches: "524288" fs.inotify.max_user_watches: "524288"
net.core.rmem_max: "2500000" net.core.rmem_max: "7500000"
net.core.wmem_max: "2500000" net.core.wmem_max: "7500000"
- &nfsMountOptions |- - &nfsMountOptions |-
machine: machine:
files: files:

View file

@ -19,5 +19,6 @@ pkgs.mkShell {
age age
mqttui mqttui
kustomize kustomize
yq-go
]; ];
} }