Compare commits
1 commit
main
...
renovate/c
Author | SHA1 | Date | |
---|---|---|---|
|
616744ba7d |
550 changed files with 26010 additions and 19573 deletions
9
.ansible-lint
Normal file
9
.ansible-lint
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
skip_list:
|
||||||
|
- yaml[line-length]
|
||||||
|
- var-naming
|
||||||
|
warn_list:
|
||||||
|
- command-instead-of-shell
|
||||||
|
- deprecated-command-syntax
|
||||||
|
- experimental
|
||||||
|
- no-changed-when
|
52
.archive/.taskfiles/Ansible/Taskfile.yaml
Normal file
52
.archive/.taskfiles/Ansible/Taskfile.yaml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
vars:
|
||||||
|
PYTHON_BIN: python3
|
||||||
|
|
||||||
|
env:
|
||||||
|
PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH"
|
||||||
|
VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv"
|
||||||
|
ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy"
|
||||||
|
ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles"
|
||||||
|
ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
deps:
|
||||||
|
desc: Set up Ansible dependencies for the environment
|
||||||
|
cmds:
|
||||||
|
- task: .venv
|
||||||
|
|
||||||
|
run:
|
||||||
|
desc: Run an Ansible playbook for configuring a cluster
|
||||||
|
summary: |
|
||||||
|
Args:
|
||||||
|
cluster: Cluster to run command against (required)
|
||||||
|
playbook: Playbook to run (required)
|
||||||
|
prompt: Run Ansible playbook '{{.playbook}}' against the '{{.cluster}}' cluster... continue?
|
||||||
|
deps: ["deps"]
|
||||||
|
cmd: |
|
||||||
|
.venv/bin/ansible-playbook \
|
||||||
|
--inventory {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml \
|
||||||
|
{{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}
|
||||||
|
preconditions:
|
||||||
|
- { msg: "Argument (cluster) is required", sh: "test -n {{.cluster}}" }
|
||||||
|
- { msg: "Argument (playbook) is required", sh: "test -n {{.playbook}}" }
|
||||||
|
- { msg: "Venv not found", sh: "test -d {{.ROOT_DIR}}/.venv" }
|
||||||
|
- { msg: "Inventory not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml" }
|
||||||
|
- { msg: "Playbook not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml" }
|
||||||
|
|
||||||
|
.venv:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- true && {{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv
|
||||||
|
- .venv/bin/python3 -m pip install --upgrade pip setuptools wheel
|
||||||
|
- .venv/bin/python3 -m pip install --upgrade --requirement {{.ANSIBLE_DIR}}/requirements.txt
|
||||||
|
- .venv/bin/ansible-galaxy install --role-file "{{.ANSIBLE_DIR}}/requirements.yaml" --force
|
||||||
|
sources:
|
||||||
|
- "{{.ANSIBLE_DIR}}/requirements.txt"
|
||||||
|
- "{{.ANSIBLE_DIR}}/requirements.yaml"
|
||||||
|
generates:
|
||||||
|
- "{{.ROOT_DIR}}/.venv/pyvenv.cfg"
|
|
@ -1,87 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: &app ollama
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
strategy: rollback
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
ollama:
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
pod:
|
|
||||||
nodeSelector:
|
|
||||||
nvidia.com/gpu.present: "true"
|
|
||||||
runtimeClassName: nvidia
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: docker.io/ollama/ollama
|
|
||||||
tag: 0.4.2
|
|
||||||
env:
|
|
||||||
- name: OLLAMA_HOST
|
|
||||||
value: 0.0.0.0
|
|
||||||
- name: OLLAMA_ORIGINS
|
|
||||||
value: "*"
|
|
||||||
- name: OLLAMA_MODELS
|
|
||||||
value: &modelPath "/models"
|
|
||||||
- name: OLLAMA_KEEP_ALIVE
|
|
||||||
value: "24h"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 2Gi
|
|
||||||
limits:
|
|
||||||
memory: 16Gi
|
|
||||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
|
||||||
service:
|
|
||||||
app:
|
|
||||||
controller: ollama
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 11434
|
|
||||||
ingress:
|
|
||||||
app:
|
|
||||||
enabled: true
|
|
||||||
className: internal-nginx
|
|
||||||
hosts:
|
|
||||||
- host: &host "{{ .Release.Name }}.jahanson.tech"
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: app
|
|
||||||
port: http
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- *host
|
|
||||||
persistence:
|
|
||||||
models:
|
|
||||||
enabled: true
|
|
||||||
existingClaim: ollama-models
|
|
||||||
advancedMounts:
|
|
||||||
ollama:
|
|
||||||
app:
|
|
||||||
- path: *modelPath
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
existingClaim: ollama
|
|
||||||
globalMounts:
|
|
||||||
- path: /root/.ollama
|
|
|
@ -1,12 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: ollama-models
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 100Gi
|
|
||||||
storageClassName: openebs-hostpath
|
|
|
@ -1,29 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app ollama
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: ai
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: nvidia-device-plugin
|
|
||||||
- name: node-feature-discovery
|
|
||||||
- name: volsync
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
path: ./kubernetes/apps/ai/ollama/app
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
||||||
VOLSYNC_CAPACITY: 1Gi
|
|
|
@ -1,81 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: &app comfyui
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
strategy: rollback
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
comfyui:
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
pod:
|
|
||||||
nodeSelector:
|
|
||||||
nvidia.com/gpu.present: "true"
|
|
||||||
runtimeClassName: nvidia
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: docker.io/jahanson/comfyui
|
|
||||||
tag: v0.0.1
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 2Gi
|
|
||||||
limits:
|
|
||||||
memory: 60Gi
|
|
||||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
|
||||||
service:
|
|
||||||
app:
|
|
||||||
controller: comfyui
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 7860
|
|
||||||
ingress:
|
|
||||||
app:
|
|
||||||
enabled: true
|
|
||||||
className: internal-nginx
|
|
||||||
hosts:
|
|
||||||
- host: &host "{{ .Release.Name }}.jahanson.tech"
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: app
|
|
||||||
port: http
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- *host
|
|
||||||
persistence:
|
|
||||||
models:
|
|
||||||
enabled: true
|
|
||||||
existingClaim: stablediffusion-checkpoints
|
|
||||||
globalMounts:
|
|
||||||
- path: /data/models
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
existingClaim: comfyui
|
|
||||||
globalMounts:
|
|
||||||
- path: /data/config
|
|
||||||
output:
|
|
||||||
enabled: true
|
|
||||||
type: emptyDir
|
|
||||||
globalMounts:
|
|
||||||
- path: /output
|
|
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
- ./pvc.yaml
|
|
||||||
- ../../../../templates/volsync
|
|
||||||
- ../../../../templates/gatus/internal
|
|
|
@ -1,12 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: stablediffusion-checkpoints
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 300Gi
|
|
||||||
storageClassName: openebs-hostpath
|
|
|
@ -1,31 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app comfyui
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: ai
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: nvidia-device-plugin
|
|
||||||
- name: node-feature-discovery
|
|
||||||
- name: volsync
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
path: ./kubernetes/apps/ai/stable-diffusion/comfyui
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
interval: 30m
|
|
||||||
retryInterval: 1m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
||||||
VOLSYNC_CAPACITY: 5Gi
|
|
||||||
GATUS_SUBDOMAIN: comfyui
|
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
- ../../../../templates/gatus/external
|
|
||||||
- ../../../../templates/volsync
|
|
|
@ -1,29 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app jellyfin
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: anime
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: node-feature-discovery
|
|
||||||
- name: nvidia-device-plugin
|
|
||||||
- name: volsync
|
|
||||||
path: ./kubernetes/apps/anime/jellyfin/app
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
||||||
GATUS_PATH: /web/index.html
|
|
||||||
VOLSYNC_CAPACITY: 20Gi
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/external-secrets.io/externalsecret_v1beta1.json
|
|
||||||
apiVersion: external-secrets.io/v1beta1
|
|
||||||
kind: ExternalSecret
|
|
||||||
metadata:
|
|
||||||
name: nicehash
|
|
||||||
spec:
|
|
||||||
refreshInterval: 1m
|
|
||||||
secretStoreRef:
|
|
||||||
kind: ClusterSecretStore
|
|
||||||
name: onepassword-connect
|
|
||||||
target:
|
|
||||||
name: nicehash-secret
|
|
||||||
template:
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
MINING_ADDRESS: "{{ .MINING_ADDRESS }}"
|
|
||||||
dataFrom:
|
|
||||||
- extract:
|
|
||||||
key: nicehash
|
|
|
@ -1,72 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: nicehash
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
strategy: rollback
|
|
||||||
retries: 3
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
nicehash:
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: docker.io/dockerhubnh/nicehash
|
|
||||||
tag: latest
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: nicehash-secret
|
|
||||||
env:
|
|
||||||
TZ: America/Chicago
|
|
||||||
MINING_WORKER_NAME: shadowfax
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
capabilities: { drop: ["ALL"] }
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
limits:
|
|
||||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
|
||||||
memory: 10Gi
|
|
||||||
defaultPodOptions:
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 568
|
|
||||||
runAsGroup: 568
|
|
||||||
fsGroup: 568
|
|
||||||
fsGroupChangePolicy: OnRootMismatch
|
|
||||||
seccompProfile: { type: RuntimeDefault }
|
|
||||||
nodeSelector:
|
|
||||||
nvidia.com/gpu.present: "true"
|
|
||||||
runtimeClassName: nvidia
|
|
||||||
persistence:
|
|
||||||
logs:
|
|
||||||
type: emptyDir
|
|
||||||
globalMounts:
|
|
||||||
- path: /var/log/
|
|
||||||
tmp:
|
|
||||||
type: emptyDir
|
|
||||||
cache:
|
|
||||||
existingClaim: nicehash
|
|
||||||
globalMounts:
|
|
||||||
- path: /var/cache/nhm4/
|
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./externalsecret.yaml
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
- ../../../../templates/volsync
|
|
|
@ -1,27 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app nicehash
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: default
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: external-secrets-stores
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
path: ./kubernetes/apps/default/nicehash/app
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
||||||
VOLSYNC_CAPACITY: 1Gi
|
|
|
@ -1,34 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/external-secrets.io/externalsecret_v1beta1.json
|
|
||||||
apiVersion: external-secrets.io/v1beta1
|
|
||||||
kind: ExternalSecret
|
|
||||||
metadata:
|
|
||||||
name: piped
|
|
||||||
spec:
|
|
||||||
refreshInterval: 1m
|
|
||||||
secretStoreRef:
|
|
||||||
name: crunchy-pgo-secrets
|
|
||||||
kind: ClusterSecretStore
|
|
||||||
target:
|
|
||||||
name: piped-secret
|
|
||||||
template:
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
config.properties: |
|
|
||||||
API_URL: https://piped-api.hsn.dev
|
|
||||||
COMPROMISED_PASSWORD_CHECK: true
|
|
||||||
DISABLE_REGISTRATION: true
|
|
||||||
FEED_RETENTION: 30
|
|
||||||
FRONTEND_URL: https://piped.hsn.dev
|
|
||||||
HTTP_WORKERS: 4
|
|
||||||
MATRIX_SERVER: https://element.infosec.exchange
|
|
||||||
PORT: 8080
|
|
||||||
PROXY_PART: https://piped-proxy.jahanson.tech
|
|
||||||
SENTRY_DSN:
|
|
||||||
hibernate.connection.driver_class: org.postgresql.Driver
|
|
||||||
hibernate.connection.url: jdbc:postgresql://{{ index . "host" }}:5432/{{ index . "dbname" }}
|
|
||||||
hibernate.connection.username: {{ index . "user" }}
|
|
||||||
hibernate.connection.password: {{ index . "password" }}
|
|
||||||
dataFrom:
|
|
||||||
- extract:
|
|
||||||
key: postgres-pguser-piped
|
|
|
@ -1,182 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: piped
|
|
||||||
spec:
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
interval: 30m
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
interval: 30m
|
|
||||||
values:
|
|
||||||
defaultPodOptions:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 1000
|
|
||||||
runAsGroup: 1000
|
|
||||||
fsGroup: 1000
|
|
||||||
fsGroupChangePolicy: "OnRootMismatch"
|
|
||||||
|
|
||||||
controllers:
|
|
||||||
backend:
|
|
||||||
strategy: RollingUpdate
|
|
||||||
annotations:
|
|
||||||
secret.reloader.stakater.com/reload: piped-secret
|
|
||||||
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: 1337kavin/piped
|
|
||||||
tag: latest@sha256:18e77857414236edc7245bebb3fb8ab3ac49c44bd76701bfce24f6ba0170d4b8
|
|
||||||
probes:
|
|
||||||
liveness:
|
|
||||||
enabled: true
|
|
||||||
readiness:
|
|
||||||
enabled: true
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 500Mi
|
|
||||||
limits:
|
|
||||||
memory: 2000Mi
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
strategy: RollingUpdate
|
|
||||||
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: ghcr.io/bjw-s-labs/piped-frontend
|
|
||||||
tag: 2024.11.4@sha256:0e413986606f39cdc6afa0379feca912d4a4abbdcbe67b408c9fbe19fbabd10f
|
|
||||||
env:
|
|
||||||
BACKEND_HOSTNAME: piped-api.hsn.dev
|
|
||||||
probes:
|
|
||||||
liveness:
|
|
||||||
enabled: true
|
|
||||||
readiness:
|
|
||||||
enabled: true
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 32Mi
|
|
||||||
limits:
|
|
||||||
memory: 256Mi
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
|
|
||||||
ytproxy:
|
|
||||||
strategy: RollingUpdate
|
|
||||||
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: 1337kavin/piped-proxy
|
|
||||||
tag: latest@sha256:ab9e472107337886d71b0151b6e777fc4cba0dd8251a21d4788a7a7f165f545a
|
|
||||||
command:
|
|
||||||
- /app/piped-proxy
|
|
||||||
probes:
|
|
||||||
liveness:
|
|
||||||
enabled: true
|
|
||||||
readiness:
|
|
||||||
enabled: true
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 500Mi
|
|
||||||
limits:
|
|
||||||
memory: 2000Mi
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
service:
|
|
||||||
backend:
|
|
||||||
controller: backend
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 8080
|
|
||||||
frontend:
|
|
||||||
controller: frontend
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 8080
|
|
||||||
ytproxy:
|
|
||||||
controller: ytproxy
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 8080
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
backend:
|
|
||||||
className: "external-nginx"
|
|
||||||
annotations:
|
|
||||||
external-dns.alpha.kubernetes.io/target: external.hsn.dev
|
|
||||||
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
|
||||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
|
||||||
nginx.ingress.kubernetes.io/cors-allow-origin: "https://piped.hsn.dev, https://piped-api.hsn.dev, https://piped-proxy.jahanson.tech"
|
|
||||||
hosts:
|
|
||||||
- host: piped-api.hsn.dev
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: backend
|
|
||||||
port: http
|
|
||||||
frontend:
|
|
||||||
className: "external-nginx"
|
|
||||||
annotations:
|
|
||||||
external-dns.alpha.kubernetes.io/target: external.hsn.dev
|
|
||||||
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
|
||||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
|
||||||
nginx.ingress.kubernetes.io/cors-allow-origin: "https://piped.hsn.dev, https://piped-api.hsn.dev, https://piped-proxy.jahanson.tech"
|
|
||||||
hosts:
|
|
||||||
- host: piped.hsn.dev
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: frontend
|
|
||||||
port: http
|
|
||||||
ytproxy:
|
|
||||||
className: "internal-nginx"
|
|
||||||
annotations:
|
|
||||||
nginx.ingress.kubernetes.io/enable-cors: "true"
|
|
||||||
nginx.ingress.kubernetes.io/cors-allow-origin: "https://piped.hsn.dev, https://piped-api.hsn.dev, https://piped-proxy.jahanson.tech"
|
|
||||||
hosts:
|
|
||||||
- host: piped-proxy.jahanson.tech
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: ytproxy
|
|
||||||
port: http
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
type: secret
|
|
||||||
name: piped-secret
|
|
||||||
advancedMounts:
|
|
||||||
backend:
|
|
||||||
app:
|
|
||||||
- path: /app/config.properties
|
|
||||||
subPath: config.properties
|
|
||||||
readOnly: true
|
|
|
@ -1,134 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: plex
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
strategy: rollback
|
|
||||||
retries: 3
|
|
||||||
dependsOn:
|
|
||||||
- name: nvidia-device-plugin
|
|
||||||
namespace: kube-system
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
namespace: rook-ceph
|
|
||||||
- name: volsync
|
|
||||||
namespace: volsync-system
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
plex:
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: ghcr.io/onedr0p/plex
|
|
||||||
tag: 1.41.2.9200-c6bbc1b53@sha256:47c6f3d85f4e739210860934a0bb24126170fa2f6a602fb909467f17a035c311
|
|
||||||
env:
|
|
||||||
TZ: America/Chicago
|
|
||||||
PLEX_ADVERTISE_URL: https://plex.hsn.dev:443,http://10.1.1.39:32400
|
|
||||||
PLEX_NO_AUTH_NETWORKS: 10.1.1.0/24,10.244.0.0/16
|
|
||||||
probes:
|
|
||||||
liveness: &probes
|
|
||||||
enabled: true
|
|
||||||
custom: true
|
|
||||||
spec:
|
|
||||||
httpGet:
|
|
||||||
path: /identity
|
|
||||||
port: 32400
|
|
||||||
initialDelaySeconds: 0
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
readiness: *probes
|
|
||||||
startup:
|
|
||||||
enabled: true
|
|
||||||
spec:
|
|
||||||
failureThreshold: 30
|
|
||||||
periodSeconds: 10
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
capabilities: { drop: ["ALL"] }
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
limits:
|
|
||||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
|
||||||
memory: 16Gi
|
|
||||||
defaultPodOptions:
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 568
|
|
||||||
runAsGroup: 568
|
|
||||||
fsGroup: 568
|
|
||||||
fsGroupChangePolicy: OnRootMismatch
|
|
||||||
supplementalGroups: [44, 10000]
|
|
||||||
seccompProfile: { type: RuntimeDefault }
|
|
||||||
nodeSelector:
|
|
||||||
nvidia.com/gpu.present: "true"
|
|
||||||
runtimeClassName: nvidia
|
|
||||||
service:
|
|
||||||
app:
|
|
||||||
controller: plex
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
io.cilium/lb-ipam-ips: 10.1.1.39
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 32400
|
|
||||||
ingress:
|
|
||||||
app:
|
|
||||||
annotations:
|
|
||||||
external-dns.alpha.kubernetes.io/target: external.hsn.dev
|
|
||||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
|
||||||
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
|
||||||
className: external-nginx
|
|
||||||
hosts:
|
|
||||||
- host: "{{ .Release.Name }}.hsn.dev"
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: app
|
|
||||||
port: http
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
existingClaim: plex
|
|
||||||
# TODO: If setting up Plex for the first time, you'll want to add the globalMounts section
|
|
||||||
globalMounts:
|
|
||||||
- path: /config/Library/Application Support/Plex Media Server
|
|
||||||
# Separate PVC for cache to avoid backing up cache files
|
|
||||||
cache:
|
|
||||||
existingClaim: plex-cache
|
|
||||||
globalMounts:
|
|
||||||
- path: /config/Library/Application Support/Plex Media Server/Cache
|
|
||||||
logs:
|
|
||||||
type: emptyDir
|
|
||||||
globalMounts:
|
|
||||||
- path: /config/Library/Application Support/Plex Media Server/Logs
|
|
||||||
tmp:
|
|
||||||
type: emptyDir
|
|
||||||
transcode:
|
|
||||||
type: emptyDir
|
|
||||||
media:
|
|
||||||
type: nfs
|
|
||||||
server: 10.1.1.61
|
|
||||||
path: /moria/media
|
|
||||||
globalMounts:
|
|
||||||
- path: /media
|
|
||||||
readOnly: true
|
|
|
@ -1,11 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./pvc.yaml
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
- ../../../../templates/gatus/external
|
|
||||||
- ../../../../templates/volsync
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
|
@ -1,11 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
metadata:
|
|
||||||
name: plex-cache
|
|
||||||
spec:
|
|
||||||
accessModes: ["ReadWriteOnce"]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 75Gi
|
|
||||||
storageClassName: ceph-block
|
|
|
@ -1,19 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
|
|
||||||
apiVersion: external-secrets.io/v1beta1
|
|
||||||
kind: ExternalSecret
|
|
||||||
metadata:
|
|
||||||
name: kometa-image-maid
|
|
||||||
spec:
|
|
||||||
refreshInterval: 5m
|
|
||||||
secretStoreRef:
|
|
||||||
kind: ClusterSecretStore
|
|
||||||
name: onepassword-connect
|
|
||||||
target:
|
|
||||||
name: kometa-image-maid-secret
|
|
||||||
creationPolicy: Owner
|
|
||||||
data:
|
|
||||||
- secretKey: PLEX_TOKEN
|
|
||||||
remoteRef:
|
|
||||||
key: Plex
|
|
||||||
property: token
|
|
|
@ -1,93 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: plex-image-cleanup
|
|
||||||
spec:
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
interval: 30m
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
interval: 30m
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
kometa-image-maid:
|
|
||||||
type: cronjob
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
cronjob:
|
|
||||||
schedule: "30 8 * * 6"
|
|
||||||
pod:
|
|
||||||
affinity:
|
|
||||||
podAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- labelSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: app.kubernetes.io/name
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- plex
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 568
|
|
||||||
runAsGroup: 568
|
|
||||||
runAsNonRoot: true
|
|
||||||
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: docker.io/kometateam/imagemaid
|
|
||||||
tag: v1.1.1
|
|
||||||
env:
|
|
||||||
PLEX_URL: http://plex.default.svc.cluster.local:32400
|
|
||||||
PLEX_TOKEN:
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: kometa-image-maid-secret
|
|
||||||
key: PLEX_TOKEN
|
|
||||||
PLEX_PATH: /data/plex_config/Library/Application Support/Plex Media Server
|
|
||||||
MODE: remove
|
|
||||||
PHOTO_TRANSCODER: true
|
|
||||||
probes:
|
|
||||||
liveness:
|
|
||||||
enabled: false
|
|
||||||
readiness:
|
|
||||||
enabled: false
|
|
||||||
startup:
|
|
||||||
enabled: false
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 25m
|
|
||||||
memory: 128Mi
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
type: emptyDir
|
|
||||||
advancedMounts:
|
|
||||||
kometa-image-maid:
|
|
||||||
app:
|
|
||||||
- path: /config
|
|
||||||
plex-config:
|
|
||||||
existingClaim: plex
|
|
||||||
advancedMounts:
|
|
||||||
kometa-image-maid:
|
|
||||||
app:
|
|
||||||
- path: /data/plex_config/Library/Application Support/Plex Media Server/
|
|
||||||
plex-cache:
|
|
||||||
existingClaim: plex-cache
|
|
||||||
advancedMounts:
|
|
||||||
kometa-image-maid:
|
|
||||||
app:
|
|
||||||
- path: /data/plex_config/Library/Application Support/Plex Media Server/Cache
|
|
|
@ -1,55 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app plex
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: default
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
path: ./kubernetes/apps/default/plex/app
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: true
|
|
||||||
dependsOn:
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
- name: volsync
|
|
||||||
- name: external-secrets-stores
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
||||||
GATUS_PATH: /web/index.html
|
|
||||||
VOLSYNC_CAPACITY: 30Gi
|
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app kometa-image-maid
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: default
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
path: "./kubernetes/apps/default/plex/kometa-image-maid"
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
dependsOn:
|
|
||||||
- name: external-secrets-stores
|
|
||||||
- name: plex
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *app
|
|
|
@ -1,120 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: &app scrypted
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
interval: 30m
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
scrypted:
|
|
||||||
annotations:
|
|
||||||
reloader.stakater.com/auto: "true"
|
|
||||||
pod:
|
|
||||||
nodeSelector:
|
|
||||||
google.feature.node.kubernetes.io/coral: "true"
|
|
||||||
nvidia.com/gpu.present: "true"
|
|
||||||
securityContext:
|
|
||||||
supplementalGroups:
|
|
||||||
- 568
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: ghcr.io/koush/scrypted
|
|
||||||
tag: v0.123.31-jammy-nvidia
|
|
||||||
probes:
|
|
||||||
liveness:
|
|
||||||
enabled: true
|
|
||||||
readiness:
|
|
||||||
enabled: true
|
|
||||||
startup:
|
|
||||||
enabled: true
|
|
||||||
spec:
|
|
||||||
failureThreshold: 30
|
|
||||||
periodSeconds: 5
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 136m
|
|
||||||
memory: 1024Mi
|
|
||||||
limits:
|
|
||||||
nvidia.com/gpu: 1
|
|
||||||
memory: 8192Mi
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
service:
|
|
||||||
app:
|
|
||||||
controller: *app
|
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
io.cilium/lb-ipam-ips: 10.1.1.33
|
|
||||||
nameOverride: *app
|
|
||||||
ports:
|
|
||||||
http:
|
|
||||||
port: 11080
|
|
||||||
primary: true
|
|
||||||
rebroadcast1: # driveway
|
|
||||||
port: 39655
|
|
||||||
rebroadcast2: # sideyard
|
|
||||||
port: 46561
|
|
||||||
rebroadcast3: # doorbell
|
|
||||||
port: 44759
|
|
||||||
homekit: # homekit
|
|
||||||
port: 42010
|
|
||||||
homekit-bridge: # bridge
|
|
||||||
port: 33961
|
|
||||||
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
app:
|
|
||||||
className: "internal-nginx"
|
|
||||||
annotations:
|
|
||||||
hosts:
|
|
||||||
- host: &host scrypted.jahanson.tech
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: app
|
|
||||||
port: http
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- *host
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
existingClaim: scrypted
|
|
||||||
advancedMounts:
|
|
||||||
scrypted:
|
|
||||||
app:
|
|
||||||
- path: /server/volume
|
|
||||||
cache:
|
|
||||||
type: emptyDir
|
|
||||||
globalMounts:
|
|
||||||
- path: /.cache
|
|
||||||
cache-npm:
|
|
||||||
type: emptyDir
|
|
||||||
globalMounts:
|
|
||||||
- path: /.npm
|
|
||||||
dev-bus-usb:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /dev/bus/usb
|
|
||||||
hostPathType: Directory
|
|
||||||
sys-bus-usb:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /sys/bus/usb
|
|
||||||
hostPathType: Directory
|
|
||||||
recordings:
|
|
||||||
type: nfs
|
|
||||||
server: shadowfax.jahanson.tech
|
|
||||||
path: /nahar/scrypted
|
|
||||||
globalMounts:
|
|
||||||
- path: /recordings
|
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- ./helmrelease.yaml
|
|
||||||
- ../../../../templates/gatus/internal
|
|
||||||
- ../../../../templates/volsync
|
|
|
@ -1,30 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &appname scrypted
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: default
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *appname
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
path: "./kubernetes/apps/default/scrypted/app"
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
dependsOn:
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
- name: volsync
|
|
||||||
- name: external-secrets-stores
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
APP: *appname
|
|
||||||
APP_UID: "0"
|
|
||||||
APP_GID: "0"
|
|
||||||
VOLSYNC_CAPACITY: 5Gi
|
|
|
@ -1,15 +1,16 @@
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
name: &app jellyfin
|
name: jellyfin
|
||||||
|
namespace: default
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: app-template
|
chart: app-template
|
||||||
version: 3.5.1
|
version: 3.1.0
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: bjw-s
|
name: bjw-s
|
||||||
|
@ -20,31 +21,26 @@ spec:
|
||||||
upgrade:
|
upgrade:
|
||||||
cleanupOnFail: true
|
cleanupOnFail: true
|
||||||
remediation:
|
remediation:
|
||||||
strategy: rollback
|
|
||||||
retries: 3
|
retries: 3
|
||||||
dependsOn:
|
strategy: rollback
|
||||||
- name: nvidia-device-plugin
|
|
||||||
namespace: kube-system
|
|
||||||
- name: node-feature-discovery
|
|
||||||
namespace: kube-system
|
|
||||||
- name: rook-ceph-cluster
|
|
||||||
namespace: rook-ceph
|
|
||||||
- name: volsync
|
|
||||||
namespace: volsync-system
|
|
||||||
values:
|
values:
|
||||||
controllers:
|
controllers:
|
||||||
jellyfin:
|
jellyfin:
|
||||||
|
type: statefulset
|
||||||
annotations:
|
annotations:
|
||||||
reloader.stakater.com/auto: "true"
|
reloader.stakater.com/auto: "true"
|
||||||
containers:
|
containers:
|
||||||
app:
|
app:
|
||||||
image:
|
image:
|
||||||
repository: ghcr.io/jellyfin/jellyfin
|
repository: jellyfin/jellyfin
|
||||||
tag: 10.10.3@sha256:17c3a8d9dddb97789b5f37112840ebf96566442c14d4754193a6c2eb154bc221
|
tag: 10.8.13
|
||||||
env:
|
env:
|
||||||
|
NVIDIA_VISIBLE_DEVICES: "all"
|
||||||
|
NVIDIA_DRIVER_CAPABILITIES: "compute,video,utility"
|
||||||
DOTNET_SYSTEM_IO_DISABLEFILELOCKING: "true"
|
DOTNET_SYSTEM_IO_DISABLEFILELOCKING: "true"
|
||||||
JELLYFIN_FFmpeg__probesize: 50000000
|
JELLYFIN_FFmpeg__probesize: 50000000
|
||||||
JELLYFIN_FFmpeg__analyzeduration: 50000000
|
JELLYFIN_FFmpeg__analyzeduration: 50000000
|
||||||
|
JELLYFIN_PublishedServerUrl: jelly.hsn.dev
|
||||||
TZ: America/Chicago
|
TZ: America/Chicago
|
||||||
probes:
|
probes:
|
||||||
liveness: &probes
|
liveness: &probes
|
||||||
|
@ -63,76 +59,58 @@ spec:
|
||||||
enabled: false
|
enabled: false
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
|
||||||
limits:
|
|
||||||
nvidia.com/gpu: 1 # requesting 1 GPU
|
nvidia.com/gpu: 1 # requesting 1 GPU
|
||||||
|
cpu: 100m
|
||||||
|
memory: 512Mi
|
||||||
|
limits:
|
||||||
|
nvidia.com/gpu: 1
|
||||||
memory: 4Gi
|
memory: 4Gi
|
||||||
defaultPodOptions:
|
pod:
|
||||||
securityContext:
|
runtimeClassName: nvidia
|
||||||
runAsNonRoot: true
|
enableServiceLinks: false
|
||||||
runAsUser: 568
|
nodeSelector:
|
||||||
runAsGroup: 568
|
nvidia.com/gpu.present: "true"
|
||||||
fsGroup: 568
|
securityContext:
|
||||||
fsGroupChangePolicy: OnRootMismatch
|
runAsUser: 568
|
||||||
supplementalGroups: [44, 10000]
|
runAsGroup: 568
|
||||||
seccompProfile: { type: RuntimeDefault }
|
fsGroup: 568
|
||||||
nodeSelector:
|
fsGroupChangePolicy: OnRootMismatch
|
||||||
nvidia.com/gpu.present: "true"
|
supplementalGroups: [44, 105, 10000]
|
||||||
runtimeClassName: nvidia
|
|
||||||
service:
|
service:
|
||||||
app:
|
app:
|
||||||
controller: *app
|
controller: jellyfin
|
||||||
type: LoadBalancer
|
|
||||||
annotations:
|
|
||||||
io.cilium/lb-ipam-ips: 10.1.1.40
|
|
||||||
ports:
|
ports:
|
||||||
http:
|
http:
|
||||||
port: *port
|
port: *port
|
||||||
ingress:
|
ingress:
|
||||||
app:
|
app:
|
||||||
annotations:
|
enabled: true
|
||||||
external-dns.alpha.kubernetes.io/target: external.hsn.dev
|
|
||||||
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
|
||||||
className: external-nginx
|
className: external-nginx
|
||||||
|
annotations:
|
||||||
|
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
|
||||||
|
external-dns.alpha.kubernetes.io/target: external.hsn.dev
|
||||||
hosts:
|
hosts:
|
||||||
- host: "{{ .Release.Name }}.hsn.dev"
|
- host: &host "jelly.hsn.dev"
|
||||||
paths:
|
paths:
|
||||||
- path: /
|
- path: /
|
||||||
service:
|
service:
|
||||||
identifier: app
|
identifier: app
|
||||||
port: *port
|
port: http
|
||||||
internal:
|
|
||||||
className: internal-nginx
|
|
||||||
hosts:
|
|
||||||
- host: &host "{{ .Release.Name }}.jahanson.tech"
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
service:
|
|
||||||
identifier: app
|
|
||||||
port: *port
|
|
||||||
tls:
|
tls:
|
||||||
- hosts:
|
- hosts:
|
||||||
- *host
|
- *host
|
||||||
persistence:
|
persistence:
|
||||||
config:
|
config:
|
||||||
|
existingClaim: jellyfin
|
||||||
enabled: true
|
enabled: true
|
||||||
existingClaim: *app
|
|
||||||
globalMounts:
|
|
||||||
- path: /config
|
|
||||||
media:
|
|
||||||
type: nfs
|
|
||||||
server: shadowfax.jahanson.tech
|
|
||||||
path: /moria/media
|
|
||||||
globalMounts:
|
|
||||||
- path: /media
|
|
||||||
readOnly: true
|
|
||||||
transcode:
|
transcode:
|
||||||
enabled: true
|
|
||||||
type: emptyDir
|
type: emptyDir
|
||||||
globalMounts:
|
globalMounts:
|
||||||
- path: /transcode
|
- path: /transcode
|
||||||
cache:
|
media:
|
||||||
enabled: true
|
enabled: true
|
||||||
type: emptyDir
|
type: nfs
|
||||||
|
server: 10.1.1.12
|
||||||
|
path: /mnt/users/Media
|
||||||
globalMounts:
|
globalMounts:
|
||||||
- path: /cache
|
- path: /media
|
|
@ -2,7 +2,7 @@
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
|
namespace: default
|
||||||
resources:
|
resources:
|
||||||
- ./gatus.yaml
|
|
||||||
- ./helmrelease.yaml
|
- ./helmrelease.yaml
|
||||||
- ../../../../templates/volsync
|
- ../../../../templates/volsync
|
|
@ -3,23 +3,21 @@
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
metadata:
|
metadata:
|
||||||
name: &app redlib
|
name: &app jellyfin
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
targetNamespace: default
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: external-secrets-stores
|
- name: external-secrets-stores
|
||||||
path: ./kubernetes/apps/default/redlib/app
|
path: ./kubernetes/apps/default/jellyfin/app
|
||||||
prune: true
|
prune: true
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: theshire
|
name: homelab
|
||||||
wait: false
|
wait: false
|
||||||
interval: 30m
|
interval: 30m
|
||||||
|
retryInterval: 1m
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
postBuild:
|
postBuild:
|
||||||
substitute:
|
substitute:
|
||||||
APP: *app
|
APP: *app
|
||||||
|
VOLSYNC_CAPACITY: 10Gi
|
588
.archive/kubernetes/kube-system/cilium/app/bgpcrd.yaml
Normal file
588
.archive/kubernetes/kube-system/cilium/app/bgpcrd.yaml
Normal file
|
@ -0,0 +1,588 @@
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
controller-gen.kubebuilder.io/version: v0.14.0
|
||||||
|
creationTimestamp: null
|
||||||
|
name: ciliumbgppeeringpolicies.cilium.io
|
||||||
|
spec:
|
||||||
|
group: cilium.io
|
||||||
|
names:
|
||||||
|
categories:
|
||||||
|
- cilium
|
||||||
|
- ciliumbgp
|
||||||
|
kind: CiliumBGPPeeringPolicy
|
||||||
|
listKind: CiliumBGPPeeringPolicyList
|
||||||
|
plural: ciliumbgppeeringpolicies
|
||||||
|
shortNames:
|
||||||
|
- bgpp
|
||||||
|
singular: ciliumbgppeeringpolicy
|
||||||
|
scope: Cluster
|
||||||
|
versions:
|
||||||
|
- additionalPrinterColumns:
|
||||||
|
- jsonPath: .metadata.creationTimestamp
|
||||||
|
name: Age
|
||||||
|
type: date
|
||||||
|
name: v2alpha1
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: CiliumBGPPeeringPolicy is a Kubernetes third-party resource for
|
||||||
|
instructing Cilium's BGP control plane to create virtual BGP routers.
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: Spec is a human readable description of a BGP peering policy
|
||||||
|
properties:
|
||||||
|
nodeSelector:
|
||||||
|
description: "NodeSelector selects a group of nodes where this BGP
|
||||||
|
Peering Policy applies. \n If empty / nil this policy applies to
|
||||||
|
all nodes."
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements.
|
||||||
|
The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that
|
||||||
|
contains values, a key, and an operator that relates the key
|
||||||
|
and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies
|
||||||
|
to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to
|
||||||
|
a set of values. Valid operators are In, NotIn, Exists
|
||||||
|
and DoesNotExist.
|
||||||
|
enum:
|
||||||
|
- In
|
||||||
|
- NotIn
|
||||||
|
- Exists
|
||||||
|
- DoesNotExist
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the
|
||||||
|
operator is In or NotIn, the values array must be non-empty.
|
||||||
|
If the operator is Exists or DoesNotExist, the values
|
||||||
|
array must be empty. This array is replaced during a strategic
|
||||||
|
merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
description: MatchLabelsValue represents the value from the
|
||||||
|
MatchLabels {key,value} pair.
|
||||||
|
maxLength: 63
|
||||||
|
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single
|
||||||
|
{key,value} in the matchLabels map is equivalent to an element
|
||||||
|
of matchExpressions, whose key field is "key", the operator
|
||||||
|
is "In", and the values array contains only "value". The requirements
|
||||||
|
are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
virtualRouters:
|
||||||
|
description: A list of CiliumBGPVirtualRouter(s) which instructs the
|
||||||
|
BGP control plane how to instantiate virtual BGP routers.
|
||||||
|
items:
|
||||||
|
description: CiliumBGPVirtualRouter defines a discrete BGP virtual
|
||||||
|
router configuration.
|
||||||
|
properties:
|
||||||
|
exportPodCIDR:
|
||||||
|
default: false
|
||||||
|
description: ExportPodCIDR determines whether to export the
|
||||||
|
Node's private CIDR block to the configured neighbors.
|
||||||
|
type: boolean
|
||||||
|
localASN:
|
||||||
|
description: LocalASN is the ASN of this virtual router. Supports
|
||||||
|
extended 32bit ASNs
|
||||||
|
format: int64
|
||||||
|
maximum: 4294967295
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
neighbors:
|
||||||
|
description: Neighbors is a list of neighboring BGP peers for
|
||||||
|
this virtual router
|
||||||
|
items:
|
||||||
|
description: CiliumBGPNeighbor is a neighboring peer for use
|
||||||
|
in a CiliumBGPVirtualRouter configuration.
|
||||||
|
properties:
|
||||||
|
advertisedPathAttributes:
|
||||||
|
description: AdvertisedPathAttributes can be used to apply
|
||||||
|
additional path attributes to selected routes when advertising
|
||||||
|
them to the peer. If empty / nil, no additional path
|
||||||
|
attributes are advertised.
|
||||||
|
items:
|
||||||
|
description: CiliumBGPPathAttributes can be used to
|
||||||
|
apply additional path attributes to matched routes
|
||||||
|
when advertising them to a BGP peer.
|
||||||
|
properties:
|
||||||
|
communities:
|
||||||
|
description: Communities defines a set of community
|
||||||
|
values advertised in the supported BGP Communities
|
||||||
|
path attributes. If nil / not set, no BGP Communities
|
||||||
|
path attribute will be advertised.
|
||||||
|
properties:
|
||||||
|
large:
|
||||||
|
description: Large holds a list of the BGP Large
|
||||||
|
Communities Attribute (RFC 8092) values.
|
||||||
|
items:
|
||||||
|
description: BGPLargeCommunity type represents
|
||||||
|
a value of the BGP Large Communities Attribute
|
||||||
|
(RFC 8092), as three 4-byte decimal numbers
|
||||||
|
separated by colons.
|
||||||
|
pattern: ^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
standard:
|
||||||
|
description: Standard holds a list of "standard"
|
||||||
|
32-bit BGP Communities Attribute (RFC 1997)
|
||||||
|
values defined as numeric values.
|
||||||
|
items:
|
||||||
|
description: BGPStandardCommunity type represents
|
||||||
|
a value of the "standard" 32-bit BGP Communities
|
||||||
|
Attribute (RFC 1997) as a 4-byte decimal
|
||||||
|
number or two 2-byte decimal numbers separated
|
||||||
|
by a colon (<0-65535>:<0-65535>). For example,
|
||||||
|
no-export community value is 65553:65281.
|
||||||
|
pattern: ^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
wellKnown:
|
||||||
|
description: WellKnown holds a list "standard"
|
||||||
|
32-bit BGP Communities Attribute (RFC 1997)
|
||||||
|
values defined as well-known string aliases
|
||||||
|
to their numeric values.
|
||||||
|
items:
|
||||||
|
description: "BGPWellKnownCommunity type represents
|
||||||
|
a value of the \"standard\" 32-bit BGP Communities
|
||||||
|
Attribute (RFC 1997) as a well-known string
|
||||||
|
alias to its numeric value. Allowed values
|
||||||
|
and their mapping to the numeric values:
|
||||||
|
\n internet = 0x00000000
|
||||||
|
(0:0) planned-shut = 0xffff0000
|
||||||
|
(65535:0) accept-own = 0xffff0001
|
||||||
|
(65535:1) route-filter-translated-v4 = 0xffff0002
|
||||||
|
(65535:2) route-filter-v4 = 0xffff0003
|
||||||
|
(65535:3) route-filter-translated-v6 = 0xffff0004
|
||||||
|
(65535:4) route-filter-v6 = 0xffff0005
|
||||||
|
(65535:5) llgr-stale = 0xffff0006
|
||||||
|
(65535:6) no-llgr = 0xffff0007
|
||||||
|
(65535:7) blackhole = 0xffff029a
|
||||||
|
(65535:666) no-export =
|
||||||
|
0xffffff01\t(65535:65281) no-advertise =
|
||||||
|
0xffffff02 (65535:65282) no-export-subconfed
|
||||||
|
\ = 0xffffff03 (65535:65283) no-peer
|
||||||
|
\ = 0xffffff04 (65535:65284)"
|
||||||
|
enum:
|
||||||
|
- internet
|
||||||
|
- planned-shut
|
||||||
|
- accept-own
|
||||||
|
- route-filter-translated-v4
|
||||||
|
- route-filter-v4
|
||||||
|
- route-filter-translated-v6
|
||||||
|
- route-filter-v6
|
||||||
|
- llgr-stale
|
||||||
|
- no-llgr
|
||||||
|
- blackhole
|
||||||
|
- no-export
|
||||||
|
- no-advertise
|
||||||
|
- no-export-subconfed
|
||||||
|
- no-peer
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
localPreference:
|
||||||
|
description: LocalPreference defines the preference
|
||||||
|
value advertised in the BGP Local Preference path
|
||||||
|
attribute. As Local Preference is only valid for
|
||||||
|
iBGP peers, this value will be ignored for eBGP
|
||||||
|
peers (no Local Preference path attribute will
|
||||||
|
be advertised). If nil / not set, the default
|
||||||
|
Local Preference of 100 will be advertised in
|
||||||
|
the Local Preference path attribute for iBGP peers.
|
||||||
|
format: int64
|
||||||
|
maximum: 4294967295
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: Selector selects a group of objects
|
||||||
|
of the SelectorType resulting into routes that
|
||||||
|
will be announced with the configured Attributes.
|
||||||
|
If nil / not set, all objects of the SelectorType
|
||||||
|
are selected.
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label
|
||||||
|
selector requirements. The requirements are
|
||||||
|
ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement
|
||||||
|
is a selector that contains values, a key,
|
||||||
|
and an operator that relates the key and
|
||||||
|
values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that
|
||||||
|
the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's
|
||||||
|
relationship to a set of values. Valid
|
||||||
|
operators are In, NotIn, Exists and
|
||||||
|
DoesNotExist.
|
||||||
|
enum:
|
||||||
|
- In
|
||||||
|
- NotIn
|
||||||
|
- Exists
|
||||||
|
- DoesNotExist
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string
|
||||||
|
values. If the operator is In or NotIn,
|
||||||
|
the values array must be non-empty.
|
||||||
|
If the operator is Exists or DoesNotExist,
|
||||||
|
the values array must be empty. This
|
||||||
|
array is replaced during a strategic
|
||||||
|
merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
description: MatchLabelsValue represents the
|
||||||
|
value from the MatchLabels {key,value} pair.
|
||||||
|
maxLength: 63
|
||||||
|
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value}
|
||||||
|
pairs. A single {key,value} in the matchLabels
|
||||||
|
map is equivalent to an element of matchExpressions,
|
||||||
|
whose key field is "key", the operator is
|
||||||
|
"In", and the values array contains only "value".
|
||||||
|
The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
selectorType:
|
||||||
|
description: 'SelectorType defines the object type
|
||||||
|
on which the Selector applies: - For "PodCIDR"
|
||||||
|
the Selector matches k8s CiliumNode resources
|
||||||
|
(path attributes apply to routes announced for
|
||||||
|
PodCIDRs of selected CiliumNodes. Only affects
|
||||||
|
routes of cluster scope / Kubernetes IPAM CIDRs,
|
||||||
|
not Multi-Pool IPAM CIDRs. - For "CiliumLoadBalancerIPPool"
|
||||||
|
the Selector matches CiliumLoadBalancerIPPool
|
||||||
|
custom resources (path attributes apply to routes
|
||||||
|
announced for selected CiliumLoadBalancerIPPools).
|
||||||
|
- For "CiliumPodIPPool" the Selector matches CiliumPodIPPool
|
||||||
|
custom resources (path attributes apply to routes
|
||||||
|
announced for allocated CIDRs of selected CiliumPodIPPools).'
|
||||||
|
enum:
|
||||||
|
- PodCIDR
|
||||||
|
- CiliumLoadBalancerIPPool
|
||||||
|
- CiliumPodIPPool
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- selectorType
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
authSecretRef:
|
||||||
|
description: AuthSecretRef is the name of the secret to
|
||||||
|
use to fetch a TCP authentication password for this
|
||||||
|
peer.
|
||||||
|
type: string
|
||||||
|
connectRetryTimeSeconds:
|
||||||
|
default: 120
|
||||||
|
description: ConnectRetryTimeSeconds defines the initial
|
||||||
|
value for the BGP ConnectRetryTimer (RFC 4271, Section
|
||||||
|
8).
|
||||||
|
format: int32
|
||||||
|
maximum: 2147483647
|
||||||
|
minimum: 1
|
||||||
|
type: integer
|
||||||
|
eBGPMultihopTTL:
|
||||||
|
default: 1
|
||||||
|
description: EBGPMultihopTTL controls the multi-hop feature
|
||||||
|
for eBGP peers. Its value defines the Time To Live (TTL)
|
||||||
|
value used in BGP packets sent to the neighbor. The
|
||||||
|
value 1 implies that eBGP multi-hop feature is disabled
|
||||||
|
(only a single hop is allowed). This field is ignored
|
||||||
|
for iBGP peers.
|
||||||
|
format: int32
|
||||||
|
maximum: 255
|
||||||
|
minimum: 1
|
||||||
|
type: integer
|
||||||
|
families:
|
||||||
|
description: "Families, if provided, defines a set of
|
||||||
|
AFI/SAFIs the speaker will negotiate with it's peer.
|
||||||
|
\n If this slice is not provided the default families
|
||||||
|
of IPv6 and IPv4 will be provided."
|
||||||
|
items:
|
||||||
|
description: CiliumBGPFamily represents a AFI/SAFI address
|
||||||
|
family pair.
|
||||||
|
properties:
|
||||||
|
afi:
|
||||||
|
description: Afi is the Address Family Identifier
|
||||||
|
(AFI) of the family.
|
||||||
|
enum:
|
||||||
|
- ipv4
|
||||||
|
- ipv6
|
||||||
|
- l2vpn
|
||||||
|
- ls
|
||||||
|
- opaque
|
||||||
|
type: string
|
||||||
|
safi:
|
||||||
|
description: Safi is the Subsequent Address Family
|
||||||
|
Identifier (SAFI) of the family.
|
||||||
|
enum:
|
||||||
|
- unicast
|
||||||
|
- multicast
|
||||||
|
- mpls_label
|
||||||
|
- encapsulation
|
||||||
|
- vpls
|
||||||
|
- evpn
|
||||||
|
- ls
|
||||||
|
- sr_policy
|
||||||
|
- mup
|
||||||
|
- mpls_vpn
|
||||||
|
- mpls_vpn_multicast
|
||||||
|
- route_target_constraints
|
||||||
|
- flowspec_unicast
|
||||||
|
- flowspec_vpn
|
||||||
|
- key_value
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- afi
|
||||||
|
- safi
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
gracefulRestart:
|
||||||
|
description: GracefulRestart defines graceful restart
|
||||||
|
parameters which are negotiated with this neighbor.
|
||||||
|
If empty / nil, the graceful restart capability is disabled.
|
||||||
|
properties:
|
||||||
|
enabled:
|
||||||
|
description: Enabled flag, when set enables graceful
|
||||||
|
restart capability.
|
||||||
|
type: boolean
|
||||||
|
restartTimeSeconds:
|
||||||
|
default: 120
|
||||||
|
description: RestartTimeSeconds is the estimated time
|
||||||
|
it will take for the BGP session to be re-established
|
||||||
|
with peer after a restart. After this period, peer
|
||||||
|
will remove stale routes. This is described RFC
|
||||||
|
4724 section 4.2.
|
||||||
|
format: int32
|
||||||
|
maximum: 4095
|
||||||
|
minimum: 1
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- enabled
|
||||||
|
type: object
|
||||||
|
holdTimeSeconds:
|
||||||
|
default: 90
|
||||||
|
description: HoldTimeSeconds defines the initial value
|
||||||
|
for the BGP HoldTimer (RFC 4271, Section 4.2). Updating
|
||||||
|
this value will cause a session reset.
|
||||||
|
format: int32
|
||||||
|
maximum: 65535
|
||||||
|
minimum: 3
|
||||||
|
type: integer
|
||||||
|
keepAliveTimeSeconds:
|
||||||
|
default: 30
|
||||||
|
description: KeepaliveTimeSeconds defines the initial
|
||||||
|
value for the BGP KeepaliveTimer (RFC 4271, Section
|
||||||
|
8). It can not be larger than HoldTimeSeconds. Updating
|
||||||
|
this value will cause a session reset.
|
||||||
|
format: int32
|
||||||
|
maximum: 65535
|
||||||
|
minimum: 1
|
||||||
|
type: integer
|
||||||
|
peerASN:
|
||||||
|
description: PeerASN is the ASN of the peer BGP router.
|
||||||
|
Supports extended 32bit ASNs
|
||||||
|
format: int64
|
||||||
|
maximum: 4294967295
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
peerAddress:
|
||||||
|
description: PeerAddress is the IP address of the peer.
|
||||||
|
This must be in CIDR notation and use a /32 to express
|
||||||
|
a single host.
|
||||||
|
format: cidr
|
||||||
|
type: string
|
||||||
|
peerPort:
|
||||||
|
default: 179
|
||||||
|
description: PeerPort is the TCP port of the peer. 1-65535
|
||||||
|
is the range of valid port numbers that can be specified.
|
||||||
|
If unset, defaults to 179.
|
||||||
|
format: int32
|
||||||
|
maximum: 65535
|
||||||
|
minimum: 1
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- peerASN
|
||||||
|
- peerAddress
|
||||||
|
type: object
|
||||||
|
minItems: 1
|
||||||
|
type: array
|
||||||
|
podIPPoolSelector:
|
||||||
|
description: "PodIPPoolSelector selects CiliumPodIPPools based
|
||||||
|
on labels. The virtual router will announce allocated CIDRs
|
||||||
|
of matching CiliumPodIPPools. \n If empty / nil no CiliumPodIPPools
|
||||||
|
will be announced."
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector
|
||||||
|
requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector
|
||||||
|
that contains values, a key, and an operator that relates
|
||||||
|
the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector
|
||||||
|
applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship
|
||||||
|
to a set of values. Valid operators are In, NotIn,
|
||||||
|
Exists and DoesNotExist.
|
||||||
|
enum:
|
||||||
|
- In
|
||||||
|
- NotIn
|
||||||
|
- Exists
|
||||||
|
- DoesNotExist
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values.
|
||||||
|
If the operator is In or NotIn, the values array
|
||||||
|
must be non-empty. If the operator is Exists or
|
||||||
|
DoesNotExist, the values array must be empty. This
|
||||||
|
array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
description: MatchLabelsValue represents the value from
|
||||||
|
the MatchLabels {key,value} pair.
|
||||||
|
maxLength: 63
|
||||||
|
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs.
|
||||||
|
A single {key,value} in the matchLabels map is equivalent
|
||||||
|
to an element of matchExpressions, whose key field is
|
||||||
|
"key", the operator is "In", and the values array contains
|
||||||
|
only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
serviceSelector:
|
||||||
|
description: "ServiceSelector selects a group of load balancer
|
||||||
|
services which this virtual router will announce. The loadBalancerClass
|
||||||
|
for a service must be nil or specify a class supported by
|
||||||
|
Cilium, e.g. \"io.cilium/bgp-control-plane\". Refer to the
|
||||||
|
following document for additional details regarding load balancer
|
||||||
|
classes: \n https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
|
||||||
|
\n If empty / nil no services will be announced."
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector
|
||||||
|
requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector
|
||||||
|
that contains values, a key, and an operator that relates
|
||||||
|
the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector
|
||||||
|
applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship
|
||||||
|
to a set of values. Valid operators are In, NotIn,
|
||||||
|
Exists and DoesNotExist.
|
||||||
|
enum:
|
||||||
|
- In
|
||||||
|
- NotIn
|
||||||
|
- Exists
|
||||||
|
- DoesNotExist
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values.
|
||||||
|
If the operator is In or NotIn, the values array
|
||||||
|
must be non-empty. If the operator is Exists or
|
||||||
|
DoesNotExist, the values array must be empty. This
|
||||||
|
array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
description: MatchLabelsValue represents the value from
|
||||||
|
the MatchLabels {key,value} pair.
|
||||||
|
maxLength: 63
|
||||||
|
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs.
|
||||||
|
A single {key,value} in the matchLabels map is equivalent
|
||||||
|
to an element of matchExpressions, whose key field is
|
||||||
|
"key", the operator is "In", and the values array contains
|
||||||
|
only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- localASN
|
||||||
|
- neighbors
|
||||||
|
type: object
|
||||||
|
minItems: 1
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- virtualRouters
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- metadata
|
||||||
|
type: object
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
subresources: {}
|
||||||
|
status:
|
||||||
|
acceptedNames:
|
||||||
|
kind: ""
|
||||||
|
plural: ""
|
||||||
|
conditions: []
|
||||||
|
storedVersions: []
|
|
@ -0,0 +1,36 @@
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2alpha1
|
||||||
|
kind: CiliumBGPPeeringPolicy
|
||||||
|
# comments courtesy of JJGadgets
|
||||||
|
# MAKE SURE CRDs ARE INSTALLED IN CLUSTER VIA cilium-config ConfigMap OR Cilium HelmRelease/values.yaml (bgpControlPlane.enabled: true), BEFORE THIS IS APPLIED!
|
||||||
|
# "CiliumBGPPeeringPolicy" Custom Resource will replace the old MetalLB BGP's "bgp-config" ConfigMap
|
||||||
|
# "CiliumBGPPeeringPolicy" is used with `bgpControlPlane.enabled: true` which uses GoBGP, NOT the old `bgp.enabled: true` which uses MetalLB
|
||||||
|
metadata:
|
||||||
|
name: bgp-loadbalancer-ip-main
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
matchLabels:
|
||||||
|
kubernetes.io/os: "linux" # match all Linux nodes, change this to match more granularly if more than 1 PeeringPolicy is to be used throughout cluster
|
||||||
|
virtualRouters:
|
||||||
|
- localASN: 64512
|
||||||
|
exportPodCIDR: false
|
||||||
|
serviceSelector: # this replaces address-pools, instead of defining the range of IPs that can be assigned to LoadBalancer services, now services have to match below selectors for their LB IPs to be announced
|
||||||
|
matchExpressions:
|
||||||
|
- {
|
||||||
|
key: thisFakeSelector,
|
||||||
|
operator: NotIn,
|
||||||
|
values: ["will-match-and-announce-all-services"],
|
||||||
|
}
|
||||||
|
neighbors:
|
||||||
|
- peerAddress: "10.1.1.1/32" # unlike bgp-config ConfigMap, peerAddress needs to be in CIDR notation
|
||||||
|
peerASN: 64512
|
||||||
|
|
||||||
|
---
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumloadbalancerippool_v2alpha1.json
|
||||||
|
apiVersion: "cilium.io/v2alpha1"
|
||||||
|
kind: CiliumLoadBalancerIPPool
|
||||||
|
metadata:
|
||||||
|
name: main-pool
|
||||||
|
spec:
|
||||||
|
cidrs:
|
||||||
|
- cidr: 10.45.0.1/24
|
78
.archive/kubernetes/kube-system/cilium/app/helmrelease.yaml
Normal file
78
.archive/kubernetes/kube-system/cilium/app/helmrelease.yaml
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
---
|
||||||
|
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: cilium
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
interval: 30m
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cilium
|
||||||
|
version: 1.15.3
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cilium
|
||||||
|
namespace: flux-system
|
||||||
|
maxHistory: 2
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: 3
|
||||||
|
upgrade:
|
||||||
|
cleanupOnFail: true
|
||||||
|
remediation:
|
||||||
|
retries: 3
|
||||||
|
uninstall:
|
||||||
|
keepHistory: false
|
||||||
|
values:
|
||||||
|
cluster:
|
||||||
|
name: homelab
|
||||||
|
id: 1
|
||||||
|
hubble:
|
||||||
|
relay:
|
||||||
|
enabled: true
|
||||||
|
ui:
|
||||||
|
enabled: true
|
||||||
|
metrics:
|
||||||
|
enableOpenMetrics: true
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
operator:
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
ipam:
|
||||||
|
mode: kubernetes
|
||||||
|
kubeProxyReplacement: true
|
||||||
|
k8sServiceHost: 127.0.0.1
|
||||||
|
k8sServicePort: 7445
|
||||||
|
rollOutCiliumPods: true
|
||||||
|
cgroup:
|
||||||
|
automount:
|
||||||
|
enabled: false
|
||||||
|
hostRoot: /sys/fs/cgroup
|
||||||
|
bgp:
|
||||||
|
enabled: false
|
||||||
|
announce:
|
||||||
|
loadbalancerIP: true
|
||||||
|
podCIDR: false
|
||||||
|
bgpControlPlane:
|
||||||
|
enabled: true
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
ciliumAgent:
|
||||||
|
- CHOWN
|
||||||
|
- KILL
|
||||||
|
- NET_ADMIN
|
||||||
|
- NET_RAW
|
||||||
|
- IPC_LOCK
|
||||||
|
- SYS_ADMIN
|
||||||
|
- SYS_RESOURCE
|
||||||
|
- DAC_OVERRIDE
|
||||||
|
- FOWNER
|
||||||
|
- SETGID
|
||||||
|
- SETUID
|
||||||
|
cleanCiliumState:
|
||||||
|
- NET_ADMIN
|
||||||
|
- SYS_ADMIN
|
||||||
|
- SYS_RESOURCE
|
|
@ -0,0 +1,23 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: allow-ssh
|
||||||
|
spec:
|
||||||
|
description: ""
|
||||||
|
nodeSelector:
|
||||||
|
matchLabels:
|
||||||
|
# node-access: ssh
|
||||||
|
node-role.kubernetes.io/control-plane: "true"
|
||||||
|
ingress:
|
||||||
|
- fromEntities:
|
||||||
|
- cluster
|
||||||
|
- toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: "22"
|
||||||
|
protocol: TCP
|
||||||
|
- icmps:
|
||||||
|
- fields:
|
||||||
|
- type: 8
|
||||||
|
family: IPv4
|
|
@ -0,0 +1,27 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: api-server
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
# apply to master nodes
|
||||||
|
matchLabels:
|
||||||
|
node-role.kubernetes.io/control-plane: 'true'
|
||||||
|
ingress:
|
||||||
|
# load balancer -> api server
|
||||||
|
- fromCIDR:
|
||||||
|
- 167.235.217.82/32
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# api server -> kubelet
|
||||||
|
- toEntities:
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '10250'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,41 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: cilium-health
|
||||||
|
specs:
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to health endpoints
|
||||||
|
matchLabels:
|
||||||
|
reserved:health: ''
|
||||||
|
ingress:
|
||||||
|
# cilium agent -> cilium agent
|
||||||
|
- fromEntities:
|
||||||
|
- host
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4240'
|
||||||
|
protocol: TCP
|
||||||
|
- nodeSelector:
|
||||||
|
# apply to all nodes
|
||||||
|
matchLabels: {}
|
||||||
|
ingress:
|
||||||
|
# cilium agent -> cilium agent
|
||||||
|
- fromEntities:
|
||||||
|
- health
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4240'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# cilium agent -> cilium agent
|
||||||
|
- toEntities:
|
||||||
|
- health
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4240'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,26 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: cilium-vxlan
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
# apply to all nodes
|
||||||
|
matchLabels: {}
|
||||||
|
ingress:
|
||||||
|
# node -> vxlan
|
||||||
|
- fromEntities:
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '8472'
|
||||||
|
protocol: UDP
|
||||||
|
egress:
|
||||||
|
# node -> vxlan
|
||||||
|
- toEntities:
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '8472'
|
||||||
|
protocol: UDP
|
|
@ -0,0 +1,65 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: core-dns
|
||||||
|
namespace: kube-system
|
||||||
|
specs:
|
||||||
|
- nodeSelector:
|
||||||
|
# apply to master nodes
|
||||||
|
matchLabels:
|
||||||
|
node-role.kubernetes.io/control-plane: 'true'
|
||||||
|
ingress:
|
||||||
|
# core dns -> api server
|
||||||
|
- fromEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: coredns
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
||||||
|
- nodeSelector:
|
||||||
|
# apply to all nodes
|
||||||
|
matchLabels: {}
|
||||||
|
egress:
|
||||||
|
# kubelet -> core dns probes
|
||||||
|
- toEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: coredns
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '8080'
|
||||||
|
protocol: TCP
|
||||||
|
- port: '8181'
|
||||||
|
protocol: TCP
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to core dns pods
|
||||||
|
matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: coredns
|
||||||
|
ingress:
|
||||||
|
# kubelet -> core dns probes
|
||||||
|
- fromEntities:
|
||||||
|
- host
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '8080'
|
||||||
|
protocol: TCP
|
||||||
|
- port: '8181'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# core dns -> api server
|
||||||
|
- toEntities:
|
||||||
|
- kube-apiserver
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
||||||
|
# core dns -> upstream DNS
|
||||||
|
- toCIDR:
|
||||||
|
- 185.12.64.1/32
|
||||||
|
- 185.12.64.2/32
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '53'
|
||||||
|
protocol: UDP
|
27
.archive/kubernetes/kube-system/cilium/app/netpols/etcd.yaml
Normal file
27
.archive/kubernetes/kube-system/cilium/app/netpols/etcd.yaml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: etcd
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
# apply to master nodes
|
||||||
|
matchLabels:
|
||||||
|
node-role.kubernetes.io/control-plane: 'true'
|
||||||
|
ingress:
|
||||||
|
# etcd peer -> etcd peer
|
||||||
|
- fromEntities:
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '2380'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# etcd peer -> etcd peer
|
||||||
|
- toEntities:
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '2380'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,15 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: "cilium.io/v2"
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: allow-specific-traffic
|
||||||
|
spec:
|
||||||
|
endpointSelector: {}
|
||||||
|
ingress:
|
||||||
|
- fromEntities:
|
||||||
|
- host
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,50 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: hubble-relay
|
||||||
|
namespace: kube-system
|
||||||
|
specs:
|
||||||
|
- nodeSelector:
|
||||||
|
# apply to all nodes
|
||||||
|
matchLabels: {}
|
||||||
|
ingress:
|
||||||
|
# hubble relay -> hubble agent
|
||||||
|
- fromEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-relay
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4244'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# kubelet -> hubble relay probes
|
||||||
|
- toEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-relay
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4245'
|
||||||
|
protocol: TCP
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to hubble relay pods
|
||||||
|
matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-relay
|
||||||
|
ingress:
|
||||||
|
# kubelet -> hubble relay probes
|
||||||
|
- fromEntities:
|
||||||
|
- host
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4245'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# hubble relay -> hubble agent
|
||||||
|
- toEntities:
|
||||||
|
- host
|
||||||
|
- remote-node
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4244'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,75 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: hubble-ui
|
||||||
|
namespace: kube-system
|
||||||
|
specs:
|
||||||
|
- nodeSelector:
|
||||||
|
# apply to master nodes
|
||||||
|
matchLabels:
|
||||||
|
node-role.kubernetes.io/control-plane: ''
|
||||||
|
ingress:
|
||||||
|
# hubble ui -> api server
|
||||||
|
- fromEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-ui
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to core dns endpoints
|
||||||
|
matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: coredns
|
||||||
|
ingress:
|
||||||
|
# hubble ui -> core dns
|
||||||
|
- fromEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-ui
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '53'
|
||||||
|
protocol: UDP
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to hubble relay endpoints
|
||||||
|
matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-relay
|
||||||
|
ingress:
|
||||||
|
# hubble ui -> hubble relay
|
||||||
|
- fromEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-ui
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4245'
|
||||||
|
protocol: TCP
|
||||||
|
- endpointSelector:
|
||||||
|
# apply to hubble ui endpoints
|
||||||
|
matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-ui
|
||||||
|
egress:
|
||||||
|
# hubble ui -> api server
|
||||||
|
- toEntities:
|
||||||
|
- kube-apiserver
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
||||||
|
# hubble ui -> hubble relay
|
||||||
|
- toEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: hubble-relay
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '4245'
|
||||||
|
protocol: TCP
|
||||||
|
# hubble ui -> core dns
|
||||||
|
- toEndpoints:
|
||||||
|
- matchLabels:
|
||||||
|
io.cilium.k8s.policy.serviceaccount: coredns
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '53'
|
||||||
|
protocol: UDP
|
|
@ -0,0 +1,28 @@
|
||||||
|
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
|
||||||
|
---
|
||||||
|
apiVersion: cilium.io/v2
|
||||||
|
kind: CiliumClusterwideNetworkPolicy
|
||||||
|
metadata:
|
||||||
|
name: kubelet
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
# apply to all nodes
|
||||||
|
matchLabels: {}
|
||||||
|
ingress:
|
||||||
|
# api server -> kubelet
|
||||||
|
- fromEntities:
|
||||||
|
- kube-apiserver
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '10250'
|
||||||
|
protocol: TCP
|
||||||
|
egress:
|
||||||
|
# kubelet -> load balancer
|
||||||
|
- toCIDR:
|
||||||
|
- 167.235.217.82/32
|
||||||
|
toEntities:
|
||||||
|
- host
|
||||||
|
toPorts:
|
||||||
|
- ports:
|
||||||
|
- port: '6443'
|
||||||
|
protocol: TCP
|
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
namespace: kube-system
|
||||||
|
resources:
|
||||||
|
- ./allow-ssh.yaml
|
||||||
|
- ./apiserver.yaml
|
||||||
|
- ./cilium-health.yaml
|
||||||
|
- ./cilium-vxlan.yaml
|
||||||
|
- ./core-dns.yaml
|
||||||
|
- ./etcd.yaml
|
||||||
|
- ./hubble-relay.yaml
|
||||||
|
- ./hubble-ui.yaml
|
||||||
|
- ./kubelet.yaml
|
||||||
|
|
|
@ -3,18 +3,15 @@
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
metadata:
|
metadata:
|
||||||
name: &app vault
|
name: cilium
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
commonMetadata:
|
interval: 30m
|
||||||
labels:
|
retryInterval: 1m
|
||||||
app.kubernetes.io/name: *app
|
timeout: 5m
|
||||||
interval: 1m
|
path: "./kubernetes/apps/kube-system/cilium/app"
|
||||||
path: "./kubernetes/apps/security/vault/app"
|
|
||||||
prune: true
|
prune: true
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: theshire
|
name: homelab
|
||||||
wait: false
|
wait: false
|
||||||
dependsOn:
|
|
||||||
- name: rook-ceph-cluster
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
|
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
name: rook-ceph-operator
|
name: rook-ceph-operator
|
||||||
|
@ -10,7 +10,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: rook-ceph
|
chart: rook-ceph
|
||||||
version: v1.15.6
|
version: v1.14.2
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: rook-ceph
|
name: rook-ceph
|
||||||
|
@ -29,6 +29,8 @@ spec:
|
||||||
namespace: volsync-system
|
namespace: volsync-system
|
||||||
values:
|
values:
|
||||||
csi:
|
csi:
|
||||||
|
provisioner:
|
||||||
|
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.1
|
||||||
cephFSKernelMountOptions: ms_mode=prefer-crc
|
cephFSKernelMountOptions: ms_mode=prefer-crc
|
||||||
enableLiveness: true
|
enableLiveness: true
|
||||||
serviceMonitor:
|
serviceMonitor:
|
|
@ -0,0 +1,26 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: rook-ceph-dashboard-password
|
||||||
|
stringData:
|
||||||
|
password: ENC[AES256_GCM,data:WWTt7SN6ssndLahsOA1gujEeGAM=,iv:YbHGNN+11wA/MLq9vFVM6v4mhPO58JmwXBDj0Qs7+Wk=,tag:5Xn0tqpiIiEt8ZWZHRTM3w==,type:str]
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1eqlaq205y5jre9hu5hvulywa7w3d4qyxwmafneamxcn7nejesedsf4q9g6
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzb2ZpaDd0azNHNTJoUTB6
|
||||||
|
VVpKbm94ZEprSHplb2UrQnkzTzdGUEFjcGxBCnhxR1BwNmFIOExtMW5GRkVJWTl5
|
||||||
|
blQzSmZ0Tm5CWTk3N25nUUM0dFpKUTQKLS0tIEgwSHNlVXNRdHZvcE10VzExU0hE
|
||||||
|
L0dGK1lFd0ZSQ0lTcEdMNTBkSDJ6WWsKQuiJmRSLbvmgenlu4F2/CQYCCbZTtS/K
|
||||||
|
nz7NsY2om+mWMvPSvLAp1pOHDAdFW79ggQAiCyslDi9iOkaD8MOnxQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2024-01-16T23:22:39Z"
|
||||||
|
mac: ENC[AES256_GCM,data:djsWoz/MuUhEKsM03+iaGV/dZUjRAGkiBEz4hROi+rfNWeHLJG2/xXPSKYYgT3h7JOZGh2Gnz7NXiB7TuixlWrAfT2BUBzd+2o9/hzg3xQzLAjApSfZdyap6oafatKxZAR/JHBSw7s0saVNnop9d/DZK4c1Fb1qNKoTrnWqqrF8=,iv:oitjHdZl07CaoBtNtX/sOPLHu7AS/R4YE4TKBJKrUBw=,tag:Br8mBH+mATEwsLzSZmoVYg==,type:str]
|
||||||
|
pgp: []
|
||||||
|
encrypted_regex: ^(data|stringData)$
|
||||||
|
version: 3.8.1
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
|
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
name: rook-ceph-cluster
|
name: rook-ceph-cluster
|
||||||
|
@ -10,7 +10,7 @@ spec:
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: rook-ceph-cluster
|
chart: rook-ceph-cluster
|
||||||
version: v1.15.6
|
version: v1.14.2
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: rook-ceph
|
name: rook-ceph
|
||||||
|
@ -49,11 +49,8 @@ spec:
|
||||||
bdev_enable_discard = true
|
bdev_enable_discard = true
|
||||||
bdev_async_discard = true
|
bdev_async_discard = true
|
||||||
osd_class_update_on_start = false
|
osd_class_update_on_start = false
|
||||||
|
osd_pool_default_size = 1
|
||||||
cephClusterSpec:
|
cephClusterSpec:
|
||||||
mgr:
|
|
||||||
modules:
|
|
||||||
- name: pg_autoscaler
|
|
||||||
enabled: true
|
|
||||||
network:
|
network:
|
||||||
provider: host
|
provider: host
|
||||||
connections:
|
connections:
|
||||||
|
@ -67,35 +64,33 @@ spec:
|
||||||
storage:
|
storage:
|
||||||
useAllNodes: true
|
useAllNodes: true
|
||||||
useAllDevices: false
|
useAllDevices: false
|
||||||
deviceFilter: "nvme0n1"
|
deviceFilter: "nvme2n1"
|
||||||
resources:
|
resources:
|
||||||
mgr:
|
mgr:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 500m
|
||||||
memory: 512Mi
|
memory: 512Mi
|
||||||
limits:
|
limits:
|
||||||
cpu: 2000m
|
cpu: 2000m
|
||||||
memory: 2Gi
|
memory: 2Gi
|
||||||
mon:
|
mon:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 500m
|
||||||
memory: 1Gi
|
memory: 1Gi
|
||||||
limits:
|
limits:
|
||||||
cpu: 4000m
|
cpu: 4000m
|
||||||
memory: 4Gi
|
memory: 4Gi
|
||||||
osd:
|
osd:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 500m
|
||||||
memory: 1Gi
|
memory: 4Gi
|
||||||
limits:
|
limits:
|
||||||
cpu: 4000m
|
cpu: 4000m
|
||||||
memory: 3Gi
|
memory: 8Gi
|
||||||
cephBlockPools:
|
cephBlockPools:
|
||||||
- name: ceph-blockpool
|
- name: ceph-blockpool
|
||||||
spec:
|
spec:
|
||||||
failureDomain: host
|
failureDomain: host
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
storageClass:
|
storageClass:
|
||||||
enabled: true
|
enabled: true
|
||||||
name: ceph-block
|
name: ceph-block
|
||||||
|
@ -121,20 +116,16 @@ spec:
|
||||||
- name: ceph-filesystem
|
- name: ceph-filesystem
|
||||||
spec:
|
spec:
|
||||||
metadataPool:
|
metadataPool:
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
dataPools:
|
dataPools:
|
||||||
- failureDomain: host
|
- failureDomain: host
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
name: data0
|
name: data0
|
||||||
metadataServer:
|
metadataServer:
|
||||||
activeCount: 1
|
activeCount: 1
|
||||||
activeStandby: true
|
activeStandby: true
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 1000m
|
||||||
memory: 1Gi
|
memory: 4Gi
|
||||||
limits:
|
limits:
|
||||||
memory: 4Gi
|
memory: 4Gi
|
||||||
storageClass:
|
storageClass:
|
||||||
|
@ -162,19 +153,14 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
metadataPool:
|
metadataPool:
|
||||||
failureDomain: host
|
failureDomain: host
|
||||||
replicated:
|
|
||||||
size: 3
|
|
||||||
dataPool:
|
dataPool:
|
||||||
failureDomain: host
|
failureDomain: host
|
||||||
erasureCoded:
|
|
||||||
dataChunks: 2
|
|
||||||
codingChunks: 1
|
|
||||||
preservePoolsOnDelete: true
|
preservePoolsOnDelete: true
|
||||||
gateway:
|
gateway:
|
||||||
port: 80
|
port: 80
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 1000m
|
||||||
memory: 1Gi
|
memory: 1Gi
|
||||||
limits:
|
limits:
|
||||||
memory: 2Gi
|
memory: 2Gi
|
|
@ -14,9 +14,10 @@ spec:
|
||||||
prune: false # never should be deleted
|
prune: false # never should be deleted
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: theshire
|
name: homelab
|
||||||
wait: false
|
wait: false
|
||||||
interval: 30m
|
interval: 30m
|
||||||
|
retryInterval: 1m
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
---
|
---
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
||||||
|
@ -34,7 +35,8 @@ spec:
|
||||||
prune: false # never should be deleted
|
prune: false # never should be deleted
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: theshire
|
name: homelab
|
||||||
wait: false
|
wait: false
|
||||||
interval: 30m
|
interval: 30m
|
||||||
|
retryInterval: 1m
|
||||||
timeout: 15m
|
timeout: 15m
|
|
@ -1,200 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: kyverno
|
|
||||||
namespace: kyverno
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: kyverno
|
|
||||||
version: 3.3.2
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: kyverno
|
|
||||||
namespace: flux-system
|
|
||||||
maxHistory: 2
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
uninstall:
|
|
||||||
keepHistory: false
|
|
||||||
values:
|
|
||||||
crds:
|
|
||||||
install: true
|
|
||||||
grafana:
|
|
||||||
enabled: true
|
|
||||||
annotations:
|
|
||||||
grafana_folder: System
|
|
||||||
backgroundController:
|
|
||||||
clusterRole:
|
|
||||||
extraResources:
|
|
||||||
- apiGroups:
|
|
||||||
- '*'
|
|
||||||
resources:
|
|
||||||
- '*'
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
limits:
|
|
||||||
memory: 1Gi
|
|
||||||
cleanupController:
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
reportsController:
|
|
||||||
clusterRole:
|
|
||||||
extraResources:
|
|
||||||
- apiGroups:
|
|
||||||
- '*'
|
|
||||||
resources:
|
|
||||||
- '*'
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
admissionController:
|
|
||||||
clusterRole:
|
|
||||||
extraResources:
|
|
||||||
- apiGroups:
|
|
||||||
- '*'
|
|
||||||
resources:
|
|
||||||
- '*'
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
topologySpreadConstraints:
|
|
||||||
- maxSkew: 1
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
whenUnsatisfiable: DoNotSchedule
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/instance: kyverno
|
|
||||||
app.kubernetes.io/component: kyverno
|
|
||||||
config:
|
|
||||||
# -- Resource types to be skipped by the Kyverno policy engine.
|
|
||||||
# Make sure to surround each entry in quotes so that it doesn't get parsed as a nested YAML list.
|
|
||||||
# These are joined together without spaces, run through `tpl`, and the result is set in the config map.
|
|
||||||
# @default -- See [values.yaml](https://github.com/kyverno/kyverno/blob/ed1906a0dc281c2aeb9b7046b843708825310330/charts/kyverno/values.yaml#L207C3-L316C1)
|
|
||||||
resourceFilters:
|
|
||||||
- "[Event,*,*]"
|
|
||||||
# - "[*/*,kube-system,*]"
|
|
||||||
- "[*/*,kube-public,*]"
|
|
||||||
- "[*/*,kube-node-lease,*]"
|
|
||||||
- "[Node,*,*]"
|
|
||||||
- "[Node/*,*,*]"
|
|
||||||
- "[APIService,*,*]"
|
|
||||||
- "[APIService/*,*,*]"
|
|
||||||
- "[TokenReview,*,*]"
|
|
||||||
- "[SubjectAccessReview,*,*]"
|
|
||||||
- "[SelfSubjectAccessReview,*,*]"
|
|
||||||
# remove the following to allow for schematic-to-pod.yaml to work
|
|
||||||
# - '[Binding,*,*]'
|
|
||||||
# - '[Pod/binding,*,*]'
|
|
||||||
- "[ReplicaSet,*,*]"
|
|
||||||
- "[ReplicaSet/*,*,*]"
|
|
||||||
- "[EphemeralReport,*,*]"
|
|
||||||
- "[ClusterEphemeralReport,*,*]"
|
|
||||||
# exclude resources from the chart
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}:core]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.admission-controller.roleName" . }}:additional]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}:core]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.background-controller.roleName" . }}:additional]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}:core]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.cleanup-controller.roleName" . }}:additional]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}:core]'
|
|
||||||
- '[ClusterRole,*,{{ template "kyverno.reports-controller.roleName" . }}:additional]'
|
|
||||||
- '[ClusterRoleBinding,*,{{ template "kyverno.admission-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRoleBinding,*,{{ template "kyverno.background-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRoleBinding,*,{{ template "kyverno.cleanup-controller.roleName" . }}]'
|
|
||||||
- '[ClusterRoleBinding,*,{{ template "kyverno.reports-controller.roleName" . }}]'
|
|
||||||
- '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]'
|
|
||||||
- '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]'
|
|
||||||
- '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.roleName" . }}]'
|
|
||||||
- '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.roleName" . }}]'
|
|
||||||
- '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.roleName" . }}]'
|
|
||||||
- '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.roleName" . }}]'
|
|
||||||
- '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.roleName" . }}]'
|
|
||||||
- '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.roleName" . }}]'
|
|
||||||
- '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.roleName" . }}]'
|
|
||||||
- '[RoleBinding,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.roleName" . }}]'
|
|
||||||
- '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.configMapName" . }}]'
|
|
||||||
- '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.metricsConfigMapName" . }}]'
|
|
||||||
- '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]'
|
|
||||||
- '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]'
|
|
||||||
- '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]'
|
|
||||||
- '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]'
|
|
||||||
- '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]'
|
|
||||||
- '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]'
|
|
||||||
- '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]'
|
|
||||||
- '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]'
|
|
||||||
- '[Job,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]'
|
|
||||||
- '[Job/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]'
|
|
||||||
- '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]'
|
|
||||||
- '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]'
|
|
||||||
- '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]'
|
|
||||||
- '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.admission-controller.name" . }}]'
|
|
||||||
- '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.background-controller.name" . }}]'
|
|
||||||
- '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.cleanup-controller.name" . }}]'
|
|
||||||
- '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.reports-controller.name" . }}]'
|
|
||||||
- '[Secret,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.*]'
|
|
||||||
- '[Secret,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.*]'
|
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
# - ./remove-cpu-limits.yaml
|
|
||||||
- ./schematic-to-pod.yaml
|
|
||||||
- ./volsync-movers.yaml
|
|
|
@ -1,39 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kyverno.io/clusterpolicy_v1.json
|
|
||||||
apiVersion: kyverno.io/v1
|
|
||||||
kind: ClusterPolicy
|
|
||||||
metadata:
|
|
||||||
name: add-talos-factory-schematic-to-node
|
|
||||||
annotations:
|
|
||||||
pod-policies.kyverno.io/autogen-controllers: none
|
|
||||||
policies.kyverno.io/title: Adds talos factory schematic to node
|
|
||||||
policies.kyverno.io/category: Other
|
|
||||||
policies.kyverno.io/subject: Pod
|
|
||||||
kyverno.io/kyverno-version: 1.10.0
|
|
||||||
policies.kyverno.io/minversion: 1.10.0
|
|
||||||
kyverno.io/kubernetes-version: "1.30"
|
|
||||||
spec:
|
|
||||||
background: false
|
|
||||||
rules:
|
|
||||||
- name: project-foo
|
|
||||||
match:
|
|
||||||
any:
|
|
||||||
- resources:
|
|
||||||
kinds:
|
|
||||||
- Pod/binding
|
|
||||||
names:
|
|
||||||
- apply-talos*
|
|
||||||
context:
|
|
||||||
- name: node
|
|
||||||
variable:
|
|
||||||
jmesPath: request.object.target.name
|
|
||||||
default: ""
|
|
||||||
- name: schematic
|
|
||||||
apiCall:
|
|
||||||
urlPath: "/api/v1/nodes/{{node}}"
|
|
||||||
jmesPath: 'metadata.annotations."extensions.talos.dev/schematic" || ''empty'''
|
|
||||||
mutate:
|
|
||||||
patchStrategicMerge:
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
extensions.talos.dev/schematic: "{{ schematic }}"
|
|
|
@ -1,76 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/kyverno.io/clusterpolicy_v1.json
|
|
||||||
apiVersion: kyverno.io/v1
|
|
||||||
kind: ClusterPolicy
|
|
||||||
metadata:
|
|
||||||
name: volsync-movers
|
|
||||||
annotations:
|
|
||||||
policies.kyverno.io/title: Set custom config on the Volsync mover Jobs
|
|
||||||
policies.kyverno.io/description: |
|
|
||||||
This policy sets custom configuration on the Volsync mover Jobs.
|
|
||||||
policies.kyverno.io/subject: Pod
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- name: mutate-volsync-src-movers
|
|
||||||
match:
|
|
||||||
any:
|
|
||||||
- resources:
|
|
||||||
kinds: ["batch/v1/Job"]
|
|
||||||
names: ["volsync-src-*"]
|
|
||||||
namespaces: ["*"]
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/created-by: volsync
|
|
||||||
mutate:
|
|
||||||
patchStrategicMerge:
|
|
||||||
spec:
|
|
||||||
podReplacementPolicy: Failed
|
|
||||||
podFailurePolicy:
|
|
||||||
rules:
|
|
||||||
- action: FailJob
|
|
||||||
onExitCodes:
|
|
||||||
containerName: restic
|
|
||||||
operator: In
|
|
||||||
values: [11]
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
initContainers:
|
|
||||||
- name: jitter
|
|
||||||
image: docker.io/library/busybox:latest
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
command: ['sh', '-c', 'sleep $(shuf -i 0-60 -n 1)']
|
|
||||||
containers:
|
|
||||||
- name: restic
|
|
||||||
volumeMounts:
|
|
||||||
- name: repository
|
|
||||||
mountPath: /repository
|
|
||||||
volumes:
|
|
||||||
- name: repository
|
|
||||||
nfs:
|
|
||||||
server: shadowfax.jahanson.tech
|
|
||||||
path: /nahar/volsync
|
|
||||||
- name: mutate-volsync-dst-movers
|
|
||||||
match:
|
|
||||||
any:
|
|
||||||
- resources:
|
|
||||||
kinds: ["batch/v1/Job"]
|
|
||||||
names: ["volsync-dst-*"]
|
|
||||||
namespaces: ["*"]
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/created-by: volsync
|
|
||||||
mutate:
|
|
||||||
patchStrategicMerge:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: restic
|
|
||||||
volumeMounts:
|
|
||||||
- name: repository
|
|
||||||
mountPath: /repository
|
|
||||||
volumes:
|
|
||||||
- name: repository
|
|
||||||
nfs:
|
|
||||||
server: shadowfax.jahanson.tech
|
|
||||||
path: /nahar/volsync
|
|
|
@ -1,38 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: system-upgrade
|
|
||||||
annotations:
|
|
||||||
kustomize.toolkit.fluxcd.io/prune: disabled
|
|
||||||
volsync.backube/privileged-movers: "true"
|
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json
|
|
||||||
apiVersion: notification.toolkit.fluxcd.io/v1beta3
|
|
||||||
kind: Provider
|
|
||||||
metadata:
|
|
||||||
name: alert-manager
|
|
||||||
namespace: system-upgrade
|
|
||||||
spec:
|
|
||||||
type: alertmanager
|
|
||||||
address: http://alertmanager.observability.svc.cluster.local:9093/api/v2/alerts/
|
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json
|
|
||||||
apiVersion: notification.toolkit.fluxcd.io/v1beta3
|
|
||||||
kind: Alert
|
|
||||||
metadata:
|
|
||||||
name: alert-manager
|
|
||||||
namespace: system-upgrade
|
|
||||||
spec:
|
|
||||||
providerRef:
|
|
||||||
name: alert-manager
|
|
||||||
eventSeverity: error
|
|
||||||
eventSources:
|
|
||||||
- kind: HelmRelease
|
|
||||||
name: "*"
|
|
||||||
exclusionList:
|
|
||||||
- "error.*lookup github\\.com"
|
|
||||||
- "error.*lookup raw\\.githubusercontent\\.com"
|
|
||||||
- "dial.*tcp.*timeout"
|
|
||||||
- "waiting.*socket"
|
|
||||||
suspend: false
|
|
|
@ -1,101 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: &app system-upgrade-controller
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: app-template
|
|
||||||
version: 3.5.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: bjw-s
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
strategy: rollback
|
|
||||||
retries: 3
|
|
||||||
values:
|
|
||||||
controllers:
|
|
||||||
system-upgrade-controller:
|
|
||||||
strategy: RollingUpdate
|
|
||||||
containers:
|
|
||||||
app:
|
|
||||||
image:
|
|
||||||
repository: docker.io/rancher/system-upgrade-controller
|
|
||||||
tag: v0.14.2@sha256:3cdbfdd90f814702cefb832fc4bdb09ea93865a4d06c6bafd019d1dc6a9f34c9
|
|
||||||
env:
|
|
||||||
SYSTEM_UPGRADE_CONTROLLER_DEBUG: false
|
|
||||||
SYSTEM_UPGRADE_CONTROLLER_THREADS: 2
|
|
||||||
SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900
|
|
||||||
SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99
|
|
||||||
SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent
|
|
||||||
SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.31.1
|
|
||||||
SYSTEM_UPGRADE_JOB_POD_REPLACEMENT_POLICY: Failed
|
|
||||||
SYSTEM_UPGRADE_JOB_PRIVILEGED: true
|
|
||||||
SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900
|
|
||||||
SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m
|
|
||||||
SYSTEM_UPGRADE_CONTROLLER_NAME: *app
|
|
||||||
SYSTEM_UPGRADE_CONTROLLER_NAMESPACE:
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
capabilities: { drop: ["ALL"] }
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
defaultPodOptions:
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 65534
|
|
||||||
runAsGroup: 65534
|
|
||||||
seccompProfile: { type: RuntimeDefault }
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
tolerations:
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: Exists
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
serviceAccount:
|
|
||||||
create: true
|
|
||||||
name: system-upgrade
|
|
||||||
persistence:
|
|
||||||
tmp:
|
|
||||||
type: emptyDir
|
|
||||||
etc-ssl:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /etc/ssl
|
|
||||||
hostPathType: DirectoryOrCreate
|
|
||||||
globalMounts:
|
|
||||||
- readOnly: true
|
|
||||||
etc-pki:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /etc/pki
|
|
||||||
hostPathType: DirectoryOrCreate
|
|
||||||
globalMounts:
|
|
||||||
- readOnly: true
|
|
||||||
etc-ca-certificates:
|
|
||||||
type: hostPath
|
|
||||||
hostPath: /etc/ca-certificates
|
|
||||||
hostPathType: DirectoryOrCreate
|
|
||||||
globalMounts:
|
|
||||||
- readOnly: true
|
|
|
@ -1,7 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- helmrelease.yaml
|
|
||||||
- rbac.yaml
|
|
|
@ -1,21 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: system-upgrade
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cluster-admin
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: system-upgrade
|
|
||||||
namespace: system-upgrade
|
|
||||||
---
|
|
||||||
apiVersion: talos.dev/v1alpha1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: talos
|
|
||||||
spec:
|
|
||||||
roles:
|
|
||||||
- os:admin
|
|
|
@ -1,50 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app system-upgrade-controller
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: system-upgrade
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: node-feature-discovery-rules
|
|
||||||
path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: true
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: &app system-upgrade-controller-plans
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
targetNamespace: system-upgrade
|
|
||||||
commonMetadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: *app
|
|
||||||
dependsOn:
|
|
||||||
- name: system-upgrade-controller
|
|
||||||
path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/plans
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: theshire
|
|
||||||
wait: false
|
|
||||||
interval: 30m
|
|
||||||
timeout: 5m
|
|
||||||
postBuild:
|
|
||||||
substitute:
|
|
||||||
# renovate: datasource=docker depName=ghcr.io/siderolabs/installer
|
|
||||||
TALOS_VERSION: v1.8.2
|
|
||||||
# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet
|
|
||||||
KUBERNETES_VERSION: v1.30.2
|
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json
|
|
||||||
apiVersion: upgrade.cattle.io/v1
|
|
||||||
kind: Plan
|
|
||||||
metadata:
|
|
||||||
name: kubernetes
|
|
||||||
spec:
|
|
||||||
version: ${KUBERNETES_VERSION}
|
|
||||||
serviceAccountName: system-upgrade
|
|
||||||
secrets:
|
|
||||||
- name: talos
|
|
||||||
path: /var/run/secrets/talos.dev
|
|
||||||
ignoreUpdates: true
|
|
||||||
concurrency: 1
|
|
||||||
exclusive: true
|
|
||||||
nodeSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: feature.node.kubernetes.io/system-os_release.ID
|
|
||||||
operator: In
|
|
||||||
values: ["talos"]
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
tolerations:
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: Exists
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
prepare: &prepare
|
|
||||||
image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION}
|
|
||||||
envs:
|
|
||||||
- name: NODE_IP
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.hostIP
|
|
||||||
args:
|
|
||||||
- --nodes=$(NODE_IP)
|
|
||||||
- health
|
|
||||||
- --server=false
|
|
||||||
upgrade:
|
|
||||||
<<: *prepare
|
|
||||||
args:
|
|
||||||
- --nodes=$(NODE_IP)
|
|
||||||
- upgrade-k8s
|
|
||||||
- --to=$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION)
|
|
|
@ -1,51 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json
|
|
||||||
apiVersion: upgrade.cattle.io/v1
|
|
||||||
kind: Plan
|
|
||||||
metadata:
|
|
||||||
name: talos
|
|
||||||
spec:
|
|
||||||
version: ${TALOS_VERSION}
|
|
||||||
serviceAccountName: system-upgrade
|
|
||||||
secrets:
|
|
||||||
- name: talos
|
|
||||||
path: /var/run/secrets/talos.dev
|
|
||||||
ignoreUpdates: true
|
|
||||||
concurrency: 1
|
|
||||||
exclusive: true
|
|
||||||
nodeSelector:
|
|
||||||
matchExpressions:
|
|
||||||
- key: feature.node.kubernetes.io/system-os_release.ID
|
|
||||||
operator: In
|
|
||||||
values: ["talos"]
|
|
||||||
- key: feature.node.kubernetes.io/system-os_release.VERSION_ID
|
|
||||||
operator: NotIn
|
|
||||||
values: ["${TALOS_VERSION}"]
|
|
||||||
tolerations:
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: Exists
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
prepare: &prepare
|
|
||||||
image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION}
|
|
||||||
envs:
|
|
||||||
- name: NODE_IP
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.hostIP
|
|
||||||
- name: TALOS_SCHEMATIC_ID
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.annotations['extensions.talos.dev/schematic']
|
|
||||||
args:
|
|
||||||
- --nodes=$(NODE_IP)
|
|
||||||
- health
|
|
||||||
- --server=false
|
|
||||||
upgrade:
|
|
||||||
<<: *prepare
|
|
||||||
args:
|
|
||||||
- --nodes=$(NODE_IP)
|
|
||||||
- upgrade
|
|
||||||
- --image=factory.talos.dev/installer/$(TALOS_SCHEMATIC_ID):$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION)
|
|
||||||
- --wait=false
|
|
|
@ -1,27 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
|
|
||||||
apiVersion: external-secrets.io/v1beta1
|
|
||||||
kind: ExternalSecret
|
|
||||||
metadata:
|
|
||||||
name: vault
|
|
||||||
namespace: security
|
|
||||||
spec:
|
|
||||||
secretStoreRef:
|
|
||||||
kind: ClusterSecretStore
|
|
||||||
name: onepassword-connect
|
|
||||||
target:
|
|
||||||
name: vault-secret
|
|
||||||
creationPolicy: Owner
|
|
||||||
data:
|
|
||||||
- secretKey: AWS_SECRET_ACCESS_KEY
|
|
||||||
remoteRef:
|
|
||||||
key: vault
|
|
||||||
property: AWS_SECRET_ACCESS_KEY
|
|
||||||
- secretKey: AWS_ACCESS_KEY_ID
|
|
||||||
remoteRef:
|
|
||||||
key: vault
|
|
||||||
property: AWS_ACCESS_KEY_ID
|
|
||||||
- secretKey: VAULT_AWSKMS_SEAL_KEY_ID
|
|
||||||
remoteRef:
|
|
||||||
key: vault
|
|
||||||
property: VAULT_AWSKMS_SEAL_KEY_ID
|
|
|
@ -1,141 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: vault
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: vault
|
|
||||||
version: 0.28.1
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: hashicorp
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
upgrade:
|
|
||||||
cleanupOnFail: true
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
strategy: uninstall
|
|
||||||
values:
|
|
||||||
server:
|
|
||||||
image:
|
|
||||||
repository: public.ecr.aws/hashicorp/vault
|
|
||||||
tag: "1.17.5"
|
|
||||||
logLevel: "info"
|
|
||||||
logFormat: "json"
|
|
||||||
ingress:
|
|
||||||
enabled: true
|
|
||||||
ingressClassName: internal-nginx
|
|
||||||
hosts:
|
|
||||||
- host: &host "vault.jahanson.tech"
|
|
||||||
paths: []
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- *host
|
|
||||||
service:
|
|
||||||
type: "ClusterIP"
|
|
||||||
port: &port 8200
|
|
||||||
targetPort: *port
|
|
||||||
# off until it's online for the first time
|
|
||||||
readinessProbe:
|
|
||||||
enabled: true
|
|
||||||
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
|
|
||||||
livenessProbe:
|
|
||||||
enabled: true
|
|
||||||
path: "/v1/sys/health?standbyok=true"
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
# If you need to use a http path instead of the default exec
|
|
||||||
# path: /v1/sys/health?standbyok=true
|
|
||||||
# Port number on which readinessProbe will be checked.
|
|
||||||
port: *port
|
|
||||||
extraEnvironmentVars:
|
|
||||||
# This is required because they will lose their values when the pod is upgraded in my experience.
|
|
||||||
# Probably a Flux thing.
|
|
||||||
VAULT_CLUSTER_ADDR: http://$(HOSTNAME).vault-internal:8201
|
|
||||||
extraSecretEnvironmentVars:
|
|
||||||
- envName: AWS_SECRET_ACCESS_KEY
|
|
||||||
secretName: vault-secret
|
|
||||||
secretKey: AWS_SECRET_ACCESS_KEY
|
|
||||||
- envName: AWS_ACCESS_KEY_ID
|
|
||||||
secretName: vault-secret
|
|
||||||
secretKey: AWS_ACCESS_KEY_ID
|
|
||||||
- envName: VAULT_AWSKMS_SEAL_KEY_ID
|
|
||||||
secretName: vault-secret
|
|
||||||
secretKey: VAULT_AWSKMS_SEAL_KEY_ID
|
|
||||||
# These are defaults but explicitly set here for clarity.
|
|
||||||
dataStorage:
|
|
||||||
size: 4Gi
|
|
||||||
mountPath: /vault/data
|
|
||||||
storageClass: ceph-block
|
|
||||||
auditStorage:
|
|
||||||
enabled: true
|
|
||||||
size: 10Gi
|
|
||||||
mountPath: /vault/audit
|
|
||||||
storageClass: ceph-block
|
|
||||||
# We want high availability. If standalone is true it sets the storage backend to file
|
|
||||||
# and the max replicas can only be 1.
|
|
||||||
standalone:
|
|
||||||
enabled: false
|
|
||||||
ha:
|
|
||||||
enabled: true
|
|
||||||
# maxUnavailable will default to (n/2)-1 where n is the number of replicas
|
|
||||||
# so if you have 6 replicas, maxUnavailable will be 2 unless you set it specifically.
|
|
||||||
replicas: 3
|
|
||||||
config: ""
|
|
||||||
raft:
|
|
||||||
enabled: true
|
|
||||||
config: |
|
|
||||||
ui = true
|
|
||||||
|
|
||||||
listener "tcp" {
|
|
||||||
tls_disable = 1
|
|
||||||
address = "[::]:8200"
|
|
||||||
cluster_address = "[::]:8201"
|
|
||||||
# For prometheus!
|
|
||||||
telemetry {
|
|
||||||
unauthenticated_metrics_access = "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
storage "raft" {
|
|
||||||
path = "/vault/data"
|
|
||||||
retry_join {
|
|
||||||
auto_join = "provider=k8s label_selector=\"app.kubernetes.io/name=vault,component=server\" namespace=\"security\""
|
|
||||||
auto_join_scheme = "http"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
seal "awskms" {
|
|
||||||
region = "us-east-2"
|
|
||||||
}
|
|
||||||
|
|
||||||
service_registration "kubernetes" {}
|
|
||||||
statefulSet:
|
|
||||||
securityContext:
|
|
||||||
pod:
|
|
||||||
runAsUser: 568
|
|
||||||
runAsGroup: 568
|
|
||||||
runAsNonRoot: true
|
|
||||||
fsGroup: 568
|
|
||||||
fsGroupChangePolicy: OnRootMismatch
|
|
||||||
supplementalGroups: [10000]
|
|
||||||
container:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
readOnlyRootFilesystem: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- "ALL"
|
|
||||||
ui:
|
|
||||||
enabled: true
|
|
||||||
publishNotReadyAddresses: true
|
|
||||||
# The service should only contain selectors for active Vault pod
|
|
||||||
activeVaultPodOnly: true
|
|
||||||
serviceType: "LoadBalancer"
|
|
||||||
externalPort: *port
|
|
||||||
targetPort: *port
|
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
namespace: security
|
|
||||||
resources:
|
|
||||||
- ./externalsecret.yaml
|
|
||||||
- ./helmrelease.yaml
|
|
12
.envrc
12
.envrc
|
@ -1,13 +1,5 @@
|
||||||
#shellcheck disable=SC2148,SC2155
|
#shellcheck disable=SC2148,SC2155
|
||||||
export KUBECONFIG="$(expand_path ./kubeconfig)"
|
export KUBECONFIG="$(expand_path ./kubeconfig)"
|
||||||
export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)"
|
export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)"
|
||||||
export TALOSCONFIG="$(expand_path ./kubernetes/bootstrap/talos/clusterconfig/talosconfig)"
|
export TALOSCONFIG="$(expand_path ./talosconfig.yaml)"
|
||||||
export KREW_ROOT="$(expand_path ~/.krew/bin)"
|
export OMNICONFIG="$(expand_path ./omniconfig.yaml)"
|
||||||
export CLUSTER="theshire"
|
|
||||||
export KUBERNETES_DIR="$(expand_path ./kubernetes)"
|
|
||||||
#export MQTTUI_BROKER="mqtt://10.1.1.38"
|
|
||||||
#export MQTTUI_BROKER=$(op item get "emqx [jahanson]" --fields broker)
|
|
||||||
#export MQTTUI_USERNAME=$(op item get "emqx [jahanson]" --fields username)
|
|
||||||
#export MQTTUI_PASSWORD=$(op item get "emqx [jahanson]" --fields mqtt-password)
|
|
||||||
PATH_add $KREW_ROOT
|
|
||||||
use nix
|
|
||||||
|
|
59
.forgejo/actions/renovate/action.yaml
Normal file
59
.forgejo/actions/renovate/action.yaml
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
# action.yml
|
||||||
|
name: renovate
|
||||||
|
description: 'renovate with caching'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
endpoint:
|
||||||
|
description: 'Renovate Endpoint'
|
||||||
|
required: true
|
||||||
|
token:
|
||||||
|
description: 'Renovate Token'
|
||||||
|
required: true
|
||||||
|
github-token:
|
||||||
|
description: 'GitHub Token'
|
||||||
|
required: true
|
||||||
|
args:
|
||||||
|
description: 'Renovate Args'
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
save-cache:
|
||||||
|
description: 'Save cache'
|
||||||
|
required: false
|
||||||
|
default: 'false'
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: 'composite'
|
||||||
|
steps:
|
||||||
|
- uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
.tmp/cache/renovate/repository
|
||||||
|
key: repo-cache-${{ inputs.endpoint }}-${{ github.run_id }}
|
||||||
|
restore-keys: |
|
||||||
|
repo-cache-${{ inputs.endpoint }}-
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
renovate ${{ inputs.args }}
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_COM_TOKEN: ${{ inputs.github-token }}
|
||||||
|
LOG_LEVEL: debug
|
||||||
|
RENOVATE_BASE_DIR: ${{ github.workspace }}/.tmp
|
||||||
|
RENOVATE_ENDPOINT: ${{ inputs.endpoint }}
|
||||||
|
RENOVATE_PLATFORM: gitea
|
||||||
|
RENOVATE_REPOSITORY_CACHE: 'enabled'
|
||||||
|
RENOVATE_TOKEN: ${{ inputs.token }}
|
||||||
|
RENOVATE_GIT_AUTHOR: 'Renovate Bot <bot@kriese.eu>'
|
||||||
|
|
||||||
|
GIT_AUTHOR_NAME: 'Renovate Bot'
|
||||||
|
GIT_AUTHOR_EMAIL: 'bot@kriese.eu'
|
||||||
|
GIT_COMMITTER_NAME: 'Renovate Bot'
|
||||||
|
GIT_COMMITTER_EMAIL: 'bot@kriese.eu'
|
||||||
|
|
||||||
|
- name: Save renovate repo cache
|
||||||
|
if: always() && inputs.save-cache == 'true'
|
||||||
|
uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
.tmp/cache/renovate/repository
|
||||||
|
key: repo-cache-${{ inputs.endpoint }}-${{ github.run_id }}
|
|
@ -2,55 +2,62 @@
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"extends": [
|
"extends": [
|
||||||
"config:recommended",
|
"config:recommended",
|
||||||
"local>jahanson/theshire//.renovate/autoMerge.json5",
|
"local>jahanson/homelab//.renovate/customManagers.json5",
|
||||||
"local>jahanson/theshire//.renovate/commitMessage.json5",
|
"local>jahanson/homelab//.renovate/autoMerge.json5"
|
||||||
"local>jahanson/theshire//.renovate/customManagers.json5",
|
|
||||||
"local>jahanson/theshire//.renovate/grafanaDashboards.json5",
|
|
||||||
"local>jahanson/theshire//.renovate/groups.json5",
|
|
||||||
"local>jahanson/theshire//.renovate/labels.json5",
|
|
||||||
"local>jahanson/theshire//.renovate/packageRules.json5"
|
|
||||||
],
|
],
|
||||||
"ignorePaths": ["**/*.sops.*", "**/.archive/**", "**/resources/**"],
|
"ignorePaths": [".archive/**"],
|
||||||
"flux": {
|
"flux": {
|
||||||
"fileMatch": ["kubernetes/.+\\.ya?ml$"]
|
"fileMatch": [
|
||||||
|
"kubernetes/.+\\.ya?ml$"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"helm-values": {
|
"helm-values": {
|
||||||
"fileMatch": ["kubernetes/.+\\.ya?ml$"]
|
"fileMatch": [
|
||||||
|
"kubernetes/.+\\.ya?ml$"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"kubernetes": {
|
"kubernetes": {
|
||||||
"fileMatch": [
|
"fileMatch": [
|
||||||
"(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$",
|
"kubernetes/.+\\.ya?ml$"
|
||||||
"(^|/)\\.taskfiles/.+\\.ya?ml(?:\\.j2)?$"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"customManagers": [
|
"regexManagers": [
|
||||||
{
|
{
|
||||||
"customType": "regex",
|
|
||||||
"description": [
|
"description": [
|
||||||
"Process CRD dependencies - Chart and Github Release are the same version"
|
"Process CRD dependencies - Chart and Github Release are the same version"
|
||||||
],
|
],
|
||||||
"fileMatch": ["kubernetes/.+\\.ya?ml$"],
|
"fileMatch": [
|
||||||
|
"kubernetes/.+\\.ya?ml$"
|
||||||
|
],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
"# renovate: registryUrl=(?<registryUrl>\\S+) chart=(?<depName>\\S+)\n.*?(?<currentValue>[^-\\s]*)\n"
|
"# renovate: registryUrl=(?<registryUrl>\\S+) chart=(?<depName>\\S+)\n.*?(?<currentValue>[^-\\s]*)\n",
|
||||||
],
|
],
|
||||||
"datasourceTemplate": "helm"
|
"datasourceTemplate": "helm"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"customType": "regex",
|
"description": [
|
||||||
"description": ["Generic Docker image Regex manager"],
|
"Generic Docker image Regex manager"
|
||||||
"fileMatch": ["infrastructure/.+\\.ya?ml$", "infrastructure/.+\\.tf$"],
|
],
|
||||||
|
"fileMatch": [
|
||||||
|
"infrastructure/.+\\.ya?ml$",
|
||||||
|
"infrastructure/.+\\.tf$"
|
||||||
|
],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
"# renovate: docker-image( versioning=(?<versioning>.*=?))?\\\n .*[:|=] \"?(?<depName>.*?):(?<currentValue>[^\"\\n]*=?)\"?"
|
"# renovate: docker-image( versioning=(?<versioning>.*=?))?\n .*[:|=] \"?(?<depName>.*?):(?<currentValue>[^\"\n]*=?)\"?",
|
||||||
],
|
],
|
||||||
"datasourceTemplate": "docker",
|
"datasourceTemplate": "docker",
|
||||||
"versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}"
|
"versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"customType": "regex",
|
"description": [
|
||||||
"description": ["Raw GitHub URL Regex manager"],
|
"Raw GitHub URL Regex manager"
|
||||||
"fileMatch": ["infrastructure/.+\\.ya?ml$", "kubernetes/.+\\.ya?ml$"],
|
],
|
||||||
|
"fileMatch": [
|
||||||
|
"infrastructure/.+\\.ya?ml$",
|
||||||
|
"kubernetes/.+\\.ya?ml$"
|
||||||
|
],
|
||||||
"matchStrings": [
|
"matchStrings": [
|
||||||
"https:\\/\\/raw.githubusercontent.com\\/(?<depName>[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?<currentValue>[\\w\\d\\.\\-_]+)\\/.*"
|
"https:\\/\\/raw.githubusercontent.com\\/(?<depName>[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?<currentValue>[\\w\\d\\.\\-_]+)\\/.*",
|
||||||
],
|
],
|
||||||
"datasourceTemplate": "github-releases",
|
"datasourceTemplate": "github-releases",
|
||||||
"versioningTemplate": "semver"
|
"versioningTemplate": "semver"
|
||||||
|
@ -59,60 +66,87 @@
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
{
|
{
|
||||||
"description": "Use custom versioning for Vector",
|
"description": "Use custom versioning for Vector",
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": [
|
||||||
"matchPackageNames": ["docker.io/timberio/vector"],
|
"docker"
|
||||||
|
],
|
||||||
|
"matchPackageNames": [
|
||||||
|
"docker.io/timberio/vector"
|
||||||
|
],
|
||||||
"versioning": "regex:^(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)-(?<compatibility>.*)$"
|
"versioning": "regex:^(?<major>\\d+)\\.(?<minor>\\d+)\\.(?<patch>\\d+)-(?<compatibility>.*)$"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Use custom versioning for Minio",
|
"description": "Use custom versioning for Minio",
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": [
|
||||||
|
"docker"
|
||||||
|
],
|
||||||
"versioning": "regex:^RELEASE\\.(?<major>\\d+)-(?<minor>\\d+)-(?<patch>\\d+)T.*Z(-(?<compatibility>.*))?$",
|
"versioning": "regex:^RELEASE\\.(?<major>\\d+)-(?<minor>\\d+)-(?<patch>\\d+)T.*Z(-(?<compatibility>.*))?$",
|
||||||
"matchPackageNames": ["quay.io/minio/minio"]
|
"matchPackageNames": [
|
||||||
|
"quay.io/minio/minio"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Flux Group",
|
"description": "Flux Group",
|
||||||
"groupName": "Flux",
|
"groupName": "Flux",
|
||||||
"matchDatasources": ["docker", "github-tags"],
|
"matchPackagePatterns": [
|
||||||
|
"^flux",
|
||||||
|
"^ghcr.io/fluxcd/"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker",
|
||||||
|
"github-tags"
|
||||||
|
],
|
||||||
"versioning": "semver",
|
"versioning": "semver",
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group",
|
||||||
},
|
},
|
||||||
"separateMinorPatch": true,
|
"separateMinorPatch": true,
|
||||||
"matchPackageNames": ["/^flux/", "/^ghcr.io/fluxcd//"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Mastodon images",
|
"description": "Mastodon images",
|
||||||
"groupName": "Mastodon",
|
"groupName": "Mastodon",
|
||||||
"matchDatasources": ["docker", "github-tags"],
|
"matchPackagePatterns": [
|
||||||
|
"mastodon",
|
||||||
|
"^ghcr.io/mastodon/"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker",
|
||||||
|
"github-tags"
|
||||||
|
],
|
||||||
"versioning": "semver",
|
"versioning": "semver",
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group",
|
||||||
},
|
},
|
||||||
"separateMinorPatch": true,
|
"separateMinorPatch": true,
|
||||||
"matchPackageNames": ["/mastodon/", "/^ghcr.io/mastodon//"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "1Password Connect images",
|
"description": "1Password Connect images",
|
||||||
"groupName": "1password-connect",
|
"groupName": "1password-connect",
|
||||||
"matchPackageNames": [
|
"matchPackageNames": [
|
||||||
"docker.io/1password/connect-sync",
|
"docker.io/1password/connect-sync",
|
||||||
"docker.io/1password/connect-api"
|
"docker.io/1password/connect-api",
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker"
|
||||||
|
],
|
||||||
|
"group": {
|
||||||
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
|
},
|
||||||
|
"separateMinorPatch": true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Rook-Ceph image and chart",
|
||||||
|
"groupName": "Rook Ceph",
|
||||||
|
"matchPackagePatterns": [
|
||||||
|
"rook.ceph"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker",
|
||||||
|
"helm"
|
||||||
],
|
],
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": "Rook-Ceph image and chart",
|
|
||||||
"groupName": "Rook Ceph",
|
|
||||||
"matchDatasources": ["docker", "helm"],
|
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
},
|
},
|
||||||
"separateMinorPatch": true,
|
"separateMinorPatch": true,
|
||||||
"matchPackageNames": ["/rook.ceph/"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Cilium image and chart",
|
"description": "Cilium image and chart",
|
||||||
|
@ -120,19 +154,27 @@
|
||||||
"matchPackageNames": [
|
"matchPackageNames": [
|
||||||
"quay.io/cilium/cilium",
|
"quay.io/cilium/cilium",
|
||||||
"quay.io/cilium/operator-generic",
|
"quay.io/cilium/operator-generic",
|
||||||
"cilium"
|
"cilium",
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"helm",
|
||||||
|
"docker"
|
||||||
],
|
],
|
||||||
"matchDatasources": ["helm", "docker"],
|
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
},
|
},
|
||||||
"separateMinorPatch": true
|
"separateMinorPatch": true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "External Snapshotter charts",
|
"description": "External Snapshotter charts",
|
||||||
"groupName": "External Snapshotter",
|
"groupName": "External Snapshotter",
|
||||||
"matchPackageNames": ["snapshot-controller", "snapshot-validation-webhook"],
|
"matchPackageNames": [
|
||||||
"matchDatasources": ["helm"],
|
"snapshot-controller",
|
||||||
|
"snapshot-validation-webhook"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"helm"
|
||||||
|
],
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
},
|
},
|
||||||
|
@ -141,22 +183,42 @@
|
||||||
{
|
{
|
||||||
"description": "Thanos image and chart - versions do not match",
|
"description": "Thanos image and chart - versions do not match",
|
||||||
"groupName": "Thanos",
|
"groupName": "Thanos",
|
||||||
"matchDatasources": ["docker", "github-releases", "helm"],
|
"matchPackagePatterns": [
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
"quay.io/thanos/thanos",
|
||||||
|
"thanos"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker",
|
||||||
|
"github-releases",
|
||||||
|
"helm"
|
||||||
|
],
|
||||||
|
"matchUpdateTypes": [
|
||||||
|
"minor",
|
||||||
|
"patch"
|
||||||
|
],
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
},
|
},
|
||||||
"matchPackageNames": ["/quay.io/thanos/thanos/", "/thanos/"]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Vector image and chart - versions do not match",
|
"description": "Vector image and chart - versions do not match",
|
||||||
"groupName": "Vector",
|
"groupName": "Vector",
|
||||||
"matchDatasources": ["docker", "github-releases", "helm"],
|
"matchPackagePatterns": [
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
"vector"
|
||||||
|
],
|
||||||
|
"matchDatasources": [
|
||||||
|
"docker",
|
||||||
|
"github-releases",
|
||||||
|
"helm"
|
||||||
|
],
|
||||||
|
"matchUpdateTypes": [
|
||||||
|
"minor",
|
||||||
|
"patch"
|
||||||
|
],
|
||||||
"group": {
|
"group": {
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
"commitMessageTopic": "{{{groupName}}} group"
|
||||||
},
|
},
|
||||||
"matchPackageNames": ["/vector/"]
|
},
|
||||||
}
|
// Version strategies
|
||||||
]
|
]
|
||||||
}
|
}
|
24
.forgejo/workflows/renovate.yaml
Normal file
24
.forgejo/workflows/renovate.yaml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
schedule:
|
||||||
|
- cron: '*/30 * * * *'
|
||||||
|
jobs:
|
||||||
|
renovate:
|
||||||
|
name: Renovate
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ghcr.io/renovatebot/renovate:37.363.8@sha256:d5d20fda77bffb65bb4099389d6cb064e1e06b085bc547e10330bf82317c9693
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
show-progress: false
|
||||||
|
|
||||||
|
- name: Renovate
|
||||||
|
uses: ./.forgejo/actions/renovate
|
||||||
|
with:
|
||||||
|
endpoint: https://git.hsn.dev/api/v1/
|
||||||
|
token: ${{ secrets.RENOVATE_TOKEN }}
|
||||||
|
github-token: ${{ secrets.GH_TOKEN }}
|
||||||
|
args: ${{ github.repository }} --platform gitea # --autodiscover
|
||||||
|
save-cache: true
|
|
@ -1,138 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
|
|
||||||
name: "K8S json Schemas --> Cloudflare R2"
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * *" # Every day at midnight
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
paths: [".forgejo/workflows/schemas.yaml"]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
name: Schemas
|
|
||||||
runs-on: ["ubuntu-x86_64"]
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: https://github.com/actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Workflow Tools
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl"
|
|
||||||
chmod +x kubectl
|
|
||||||
mv kubectl /usr/local/bin/
|
|
||||||
|
|
||||||
curl -LO "https://dl.min.io/client/mc/release/linux-amd64/mc"
|
|
||||||
chmod +x mc
|
|
||||||
mv mc /usr/local/bin/
|
|
||||||
|
|
||||||
- name: Setup Python
|
|
||||||
run: |
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y python3 python3-pip python3-yaml
|
|
||||||
pip3 install --upgrade pip
|
|
||||||
|
|
||||||
- name: Write kubeconfig
|
|
||||||
id: kubeconfig
|
|
||||||
uses: https://github.com/timheuer/base64-to-file@v1
|
|
||||||
with:
|
|
||||||
encodedString: "${{ secrets.KUBECONFIG }}"
|
|
||||||
fileName: kubeconfig
|
|
||||||
fileDir: ${{ env.GITHUB_WORKSPACE }}
|
|
||||||
- name: Write mc
|
|
||||||
id: mcconfig
|
|
||||||
uses: https://github.com/timheuer/base64-to-file@v1
|
|
||||||
with:
|
|
||||||
encodedString: "${{ secrets.MCCONFIG }}"
|
|
||||||
fileName: config.json
|
|
||||||
fileDir: ${{ env.GITHUB_WORKSPACE }}
|
|
||||||
|
|
||||||
- name: Extracting CRDs to yaml and converting to JSON schema
|
|
||||||
env:
|
|
||||||
KUBECONFIG: "${{ steps.kubeconfig.outputs.filePath }}"
|
|
||||||
run: |
|
|
||||||
# kubeconfig
|
|
||||||
echo "kubeconfig location: $KUBECONFIG"
|
|
||||||
# Create temp folder for CRDs
|
|
||||||
TMP_CRD_DIR=$(mktemp -d)
|
|
||||||
echo "Temp directory: $TMP_CRD_DIR"
|
|
||||||
|
|
||||||
# Create final schemas directory
|
|
||||||
SCHEMAS_DIR=$GITHUB_WORKSPACE/crdSchemas
|
|
||||||
mkdir -p $SCHEMAS_DIR
|
|
||||||
echo "Schemas directory: $SCHEMAS_DIR"
|
|
||||||
|
|
||||||
# Create array to store CRD kinds and groups
|
|
||||||
ORGANIZE_BY_GROUP=true
|
|
||||||
declare -A CRD_GROUPS 2>/dev/null
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
# Array creation failed, signal to skip organization by group
|
|
||||||
ORGANIZE_BY_GROUP=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Extract CRDs from cluster
|
|
||||||
NUM_OF_CRDS=0
|
|
||||||
while read -r crd
|
|
||||||
do
|
|
||||||
filename=${crd%% *}
|
|
||||||
kubectl get crds "$filename" -o yaml > "$TMP_CRD_DIR/$filename.yaml" 2>&1
|
|
||||||
echo "Extracted CRD: $filename"
|
|
||||||
|
|
||||||
resourceKind=$(grep "kind:" "$TMP_CRD_DIR/$filename.yaml" | awk 'NR==2{print $2}' | tr '[:upper:]' '[:lower:]')
|
|
||||||
resourceGroup=$(grep "group:" "$TMP_CRD_DIR/$filename.yaml" | awk 'NR==1{print $2}')
|
|
||||||
|
|
||||||
# Save name and group for later directory organization
|
|
||||||
CRD_GROUPS["$resourceKind"]="$resourceGroup"
|
|
||||||
|
|
||||||
let ++NUM_OF_CRDS
|
|
||||||
done < <(kubectl get crds 2>&1 | sed -n '/NAME/,$p' | tail -n +2)
|
|
||||||
echo numCRDs: $NUM_OF_CRDS
|
|
||||||
|
|
||||||
# Download converter script
|
|
||||||
curl https://raw.githubusercontent.com/yannh/kubeconform/master/scripts/openapi2jsonschema.py --output $TMP_CRD_DIR/openapi2jsonschema.py 2>/dev/null
|
|
||||||
|
|
||||||
# Convert crds to jsonSchema
|
|
||||||
cd $SCHEMAS_DIR
|
|
||||||
python3 $TMP_CRD_DIR/openapi2jsonschema.py $TMP_CRD_DIR/*.yaml
|
|
||||||
conversionResult=$?
|
|
||||||
|
|
||||||
# Copy and rename files to support kubeval
|
|
||||||
rm -rf $SCHEMAS_DIR/master-standalone
|
|
||||||
mkdir -p $SCHEMAS_DIR/master-standalone
|
|
||||||
cp $SCHEMAS_DIR/*.json $SCHEMAS_DIR/master-standalone
|
|
||||||
find $SCHEMAS_DIR/master-standalone -name '*json' -exec bash -c ' mv -f $0 ${0/\_/-stable-}' {} \;
|
|
||||||
|
|
||||||
# Organize schemas by group
|
|
||||||
if [ $ORGANIZE_BY_GROUP == true ]; then
|
|
||||||
for schema in $SCHEMAS_DIR/*.json
|
|
||||||
do
|
|
||||||
crdFileName=$(basename $schema .json)
|
|
||||||
crdKind=${crdFileName%%_*}
|
|
||||||
crdGroup=${CRD_GROUPS[$crdKind]}
|
|
||||||
if [ -z $crdGroup ]; then
|
|
||||||
crdGroup="uncategorized"
|
|
||||||
echo "CRD kind $crdKind has no group, moving to $crdGroup"
|
|
||||||
fi
|
|
||||||
echo making directory $crdGroup
|
|
||||||
mkdir -p $crdGroup
|
|
||||||
mv $schema ./$crdGroup
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf $TMP_CRD_DIR
|
|
||||||
|
|
||||||
- name: Deploy to Cloudflare R2
|
|
||||||
env:
|
|
||||||
MC_CONFIG_DIR: "${{ steps.mcconfig.outputs.fileDir }}"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo $GITHUB_WORKSPACE/crdSchemas/
|
|
||||||
mc cp --recursive $GITHUB_WORKSPACE/crdSchemas/ r2-ks/kubernetes-schema
|
|
22
.gitignore
vendored
22
.gitignore
vendored
|
@ -1,29 +1,15 @@
|
||||||
# OS generated files
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
|
.private/
|
||||||
# Development environments
|
|
||||||
.direnv
|
|
||||||
.idea/
|
|
||||||
.venv/
|
.venv/
|
||||||
.pytest_cache/
|
|
||||||
|
|
||||||
# Infrastructure and deployment
|
|
||||||
.terraform
|
.terraform
|
||||||
*.tfvars
|
*.tfvars
|
||||||
kubeconfig*
|
|
||||||
*talosconfig.yaml
|
|
||||||
omniconfig.yaml
|
|
||||||
|
|
||||||
# Security and credentials
|
|
||||||
.private/
|
|
||||||
.decrypted~*
|
.decrypted~*
|
||||||
*.agekey
|
*.agekey
|
||||||
*.pub
|
*.pub
|
||||||
*.key
|
*.key
|
||||||
*.pem
|
*.pem
|
||||||
*.secrets
|
kubeconfig*
|
||||||
|
*talosconfig.yaml
|
||||||
|
omniconfig.yaml
|
||||||
config.xml
|
config.xml
|
||||||
|
|
||||||
# syncthing
|
|
||||||
**/*sync-conflict*
|
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
.archive
|
|
||||||
.forgejo
|
|
||||||
.git
|
|
||||||
.taskfiles
|
|
|
@ -47,7 +47,7 @@ repos:
|
||||||
args: [--severity=error]
|
args: [--severity=error]
|
||||||
additional_dependencies: []
|
additional_dependencies: []
|
||||||
|
|
||||||
- repo: https://github.com/onedr0p/sops-pre-commit
|
- repo: https://github.com/k8s-at-home/sops-pre-commit
|
||||||
rev: v2.1.1
|
rev: v2.1.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: forbid-secrets
|
- id: forbid-secrets
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
{
|
|
||||||
"quoteProps": "preserve",
|
|
||||||
"trailingComma": "none"
|
|
||||||
}
|
|
|
@ -1,26 +1,22 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
{
|
{
|
||||||
"description": "Auto merge container digests",
|
"description": "Auto merge container digests",
|
||||||
"matchDatasources": ["docker"],
|
"matchDatasources": ["docker"],
|
||||||
"automerge": true,
|
"automerge": true,
|
||||||
"automergeType": "branch",
|
"automergeType": "branch",
|
||||||
"matchUpdateTypes": ["digest"],
|
"matchUpdateTypes": ["digest"],
|
||||||
"matchPackagePrefixes": [
|
"matchPackagePrefixes": ["ghcr.io/onedr0p", "ghcr.io/bjw-s"],
|
||||||
"ghcr.io/onedr0p",
|
"ignoreTests": true
|
||||||
"ghcr.io/bjw-s",
|
},
|
||||||
"ghcr.io/bjw-s-labs"
|
{
|
||||||
],
|
"description": "Auto merge KPS minors and patches",
|
||||||
"ignoreTests": true
|
"matchDatasources": ["helm"],
|
||||||
},
|
"automerge": true,
|
||||||
{
|
"matchUpdateTypes": ["minor", "patch"],
|
||||||
"description": "Auto merge KPS minors and patches",
|
"matchDepNames": ["kube-prometheus-stack"],
|
||||||
"matchDatasources": ["helm"],
|
"ignoreTests": false
|
||||||
"automerge": true,
|
}
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
]
|
||||||
"matchDepNames": ["kube-prometheus-stack"],
|
}
|
||||||
"ignoreTests": false
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"commitMessageTopic": "{{depName}}",
|
|
||||||
"commitMessageExtra": "to {{newVersion}}",
|
|
||||||
"commitMessageSuffix": "",
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"matchDatasources": ["helm"],
|
|
||||||
"commitMessageTopic": "chart {{depName}}"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"commitMessageTopic": "image {{depName}}"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,19 +1,37 @@
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
"customManagers": [
|
"customDatasources": {
|
||||||
{
|
"grafana-dashboards": {
|
||||||
"customType": "regex",
|
"defaultRegistryUrlTemplate": "https://grafana.com/api/dashboards/{{packageName}}",
|
||||||
"description": ["Process custom dependencies"],
|
"format": "json",
|
||||||
"fileMatch": ["(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$"],
|
"transformTemplates": [
|
||||||
"matchStrings": [
|
"{\"releases\":[{\"version\": $string(revision)}]}"
|
||||||
// # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io
|
]
|
||||||
// version: 1.15.1
|
}
|
||||||
"datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)( repository=(?<registryUrl>\\S+))?\\n.+: (&\\S+\\s)?(?<currentValue>\\S+)",
|
},
|
||||||
// # renovate: datasource=github-releases depName=rancher/system-upgrade-controller
|
"customManagers": [
|
||||||
// https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml
|
{
|
||||||
"datasource=(?<datasource>\\S+) depName=(?<depName>\\S+)\\n.+/(?<currentValue>(v|\\d)[^/]+)"
|
"customType": "regex",
|
||||||
],
|
"description": "Process Grafana dashboards",
|
||||||
"datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}"
|
"fileMatch": [
|
||||||
}
|
"(^|/)kubernetes/.+\\.ya?ml(\\.j2)?$"
|
||||||
]
|
],
|
||||||
}
|
"matchStrings": [
|
||||||
|
"depName=\"(?<depName>\\S+)\"\\n.*?gnetId: (?<packageName>\\d+)\\n.*?revision: (?<currentValue>\\d+)"
|
||||||
|
],
|
||||||
|
"datasourceTemplate": "custom.grafana-dashboards",
|
||||||
|
"versioningTemplate": "regex:^(?<major>\\d+)$"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"packageRules": [
|
||||||
|
{
|
||||||
|
"addLabels": ["renovate/grafana-dashboard"],
|
||||||
|
"commitMessageExtra": "to revision {{newVersion}}",
|
||||||
|
"commitMessageTopic": "dashboard {{depName}}",
|
||||||
|
"matchDatasources": ["grafana-dashboards", "custom.grafana-dashboards"],
|
||||||
|
"matchUpdateTypes": ["major"],
|
||||||
|
"semanticCommitScope": "grafana-dashboards",
|
||||||
|
"semanticCommitType": "chore"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"customDatasources": {
|
|
||||||
"grafana-dashboards": {
|
|
||||||
"defaultRegistryUrlTemplate": "https://grafana.com/api/dashboards/{{packageName}}",
|
|
||||||
"format": "json",
|
|
||||||
"transformTemplates": [
|
|
||||||
"{\"releases\":[{\"version\": $string(revision)}]}"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"customManagers": [
|
|
||||||
{
|
|
||||||
"customType": "regex",
|
|
||||||
"description": ["Process Grafana dashboards"],
|
|
||||||
"fileMatch": ["(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$"],
|
|
||||||
"matchStrings": [
|
|
||||||
"depName=\"(?<depName>.*)\"\\n(?<indentation>\\s+)gnetId: (?<packageName>\\d+)\\n.+revision: (?<currentValue>\\d+)"
|
|
||||||
],
|
|
||||||
"autoReplaceStringTemplate": "depName=\"{{{depName}}}\"\n{{{indentation}}}gnetId: {{{packageName}}}\n{{{indentation}}}revision: {{{newValue}}}",
|
|
||||||
"datasourceTemplate": "custom.grafana-dashboards",
|
|
||||||
"versioningTemplate": "regex:^(?<major>\\d+)$"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"addLabels": ["renovate/grafana-dashboard"],
|
|
||||||
"automerge": true,
|
|
||||||
"automergeType": "branch",
|
|
||||||
"matchDatasources": ["custom.grafana-dashboards"],
|
|
||||||
"matchUpdateTypes": ["major"],
|
|
||||||
"semanticCommitType": "chore",
|
|
||||||
"semanticCommitScope": "grafana-dashboards",
|
|
||||||
"commitMessageTopic": "dashboard {{depName}}",
|
|
||||||
"commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"description": ["Dragonfly Operator Group"],
|
|
||||||
"groupName": "Dragonfly Operator",
|
|
||||||
"matchPackagePatterns": ["dragonfly(?:db)?.operator"],
|
|
||||||
"matchDatasources": ["docker", "github-releases"],
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Flux Group"],
|
|
||||||
"groupName": "Flux",
|
|
||||||
"matchPackagePatterns": ["fluxcd"],
|
|
||||||
"matchDatasources": ["docker", "github-tags"],
|
|
||||||
"versioning": "semver",
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Rook-Ceph Group"],
|
|
||||||
"groupName": "Rook-Ceph",
|
|
||||||
"matchPackagePatterns": ["rook.ceph"],
|
|
||||||
"matchDatasources": ["helm"],
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Talos Group"],
|
|
||||||
"groupName": "Talos",
|
|
||||||
"matchPackagePatterns": [
|
|
||||||
"ghcr.io/siderolabs/talosctl",
|
|
||||||
"ghcr.io/siderolabs/installer",
|
|
||||||
"factory.talos.dev/installer"
|
|
||||||
],
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Volsync Group"],
|
|
||||||
"groupName": "Volsync",
|
|
||||||
"matchPackagePatterns": ["volsync"],
|
|
||||||
"matchDatasources": ["docker", "helm"],
|
|
||||||
"matchUpdateTypes": ["minor", "patch"],
|
|
||||||
"group": {
|
|
||||||
"commitMessageTopic": "{{{groupName}}} group"
|
|
||||||
},
|
|
||||||
"separateMinorPatch": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"matchUpdateTypes": ["major"],
|
|
||||||
"labels": ["type/major"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchUpdateTypes": ["minor"],
|
|
||||||
"labels": ["type/minor"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchUpdateTypes": ["patch"],
|
|
||||||
"labels": ["type/patch"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchUpdateTypes": ["digest"],
|
|
||||||
"labels": ["type/digest"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"addLabels": ["renovate/container"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchDatasources": ["helm"],
|
|
||||||
"addLabels": ["renovate/helm"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchDatasources": ["github-releases", "github-tags"],
|
|
||||||
"addLabels": ["renovate/github-release"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matchManagers": ["github-actions"],
|
|
||||||
"addLabels": ["renovate/github-action"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
|
||||||
"packageRules": [
|
|
||||||
{
|
|
||||||
"description": ["Loose versioning for non-semver packages"],
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"matchPackagePatterns": ["cross-seed", "plex"],
|
|
||||||
"versioning": "loose"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Custom schedule for frequently updated packages"],
|
|
||||||
"matchDataSources": ["docker", "helm"],
|
|
||||||
"matchPackagePatterns": ["minio", "reloader"],
|
|
||||||
"schedule": ["on the first day of the month"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"description": ["Custom versioning for minio"],
|
|
||||||
"matchDatasources": ["docker"],
|
|
||||||
"matchPackagePatterns": ["minio"],
|
|
||||||
"versioning": "regex:^RELEASE\\.(?<major>\\d+)-(?<minor>\\d+)-(?<patch>\\d+)T.*Z$"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
15
.sops.yaml
15
.sops.yaml
|
@ -1,24 +1,15 @@
|
||||||
---
|
---
|
||||||
creation_rules:
|
creation_rules:
|
||||||
- # IMPORTANT: Keep this rule first
|
|
||||||
path_regex: kubernetes/bootstrap/talos/talsecret(\.sops)?\.ya?ml
|
|
||||||
input_type: yaml
|
|
||||||
encrypted_regex: ^(token|crt|key|id|secret|secretboxencryptionsecret|ca|bootstraptoken)$
|
|
||||||
age: >-
|
|
||||||
age1gr4js8ln65khjzjkf9gs5c32a2vrrv6jlv5asuz6hccqq8pddc4sjflprn
|
|
||||||
- path_regex: kubernetes/.*/talos/.*\.sops\.ya?ml$
|
|
||||||
age: >-
|
|
||||||
age1gr4js8ln65khjzjkf9gs5c32a2vrrv6jlv5asuz6hccqq8pddc4sjflprn
|
|
||||||
- path_regex: kubernetes/.*\.sops\.ya?ml
|
- path_regex: kubernetes/.*\.sops\.ya?ml
|
||||||
encrypted_regex: "^(data|stringData)$"
|
encrypted_regex: "^(data|stringData)$"
|
||||||
# Homelab
|
# Homelab
|
||||||
age: >-
|
age: >-
|
||||||
age1gr4js8ln65khjzjkf9gs5c32a2vrrv6jlv5asuz6hccqq8pddc4sjflprn
|
age1eqlaq205y5jre9hu5hvulywa7w3d4qyxwmafneamxcn7nejesedsf4q9g6
|
||||||
- path_regex: .*\.sops\.(env|ini|json|toml)
|
- path_regex: .*\.sops\.(env|ini|json|toml)
|
||||||
# Homelab
|
# Homelab
|
||||||
age: >-
|
age: >-
|
||||||
age1gr4js8ln65khjzjkf9gs5c32a2vrrv6jlv5asuz6hccqq8pddc4sjflprn
|
age1eqlaq205y5jre9hu5hvulywa7w3d4qyxwmafneamxcn7nejesedsf4q9g6
|
||||||
- path_regex: (ansible|terraform|talos)/.*\.sops\.ya?ml
|
- path_regex: (ansible|terraform|talos)/.*\.sops\.ya?ml
|
||||||
# Homelab
|
# Homelab
|
||||||
age: >-
|
age: >-
|
||||||
age1gr4js8ln65khjzjkf9gs5c32a2vrrv6jlv5asuz6hccqq8pddc4sjflprn
|
age1eqlaq205y5jre9hu5hvulywa7w3d4qyxwmafneamxcn7nejesedsf4q9g6
|
||||||
|
|
19
.taskfiles/VolSync/ListJob.tmpl.yaml
Normal file
19
.taskfiles/VolSync/ListJob.tmpl.yaml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "list-${rsrc}-${ts}"
|
||||||
|
namespace: "${namespace}"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: list
|
||||||
|
image: docker.io/restic/restic:0.16.0
|
||||||
|
args: ["snapshots"]
|
||||||
|
envFrom:
|
||||||
|
- secretRef:
|
||||||
|
name: "${rsrc}-restic-secret"
|
25
.taskfiles/VolSync/ReplicationDestination.tmpl.yaml
Normal file
25
.taskfiles/VolSync/ReplicationDestination.tmpl.yaml
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
apiVersion: volsync.backube/v1alpha1
|
||||||
|
kind: ReplicationDestination
|
||||||
|
metadata:
|
||||||
|
name: "${rsrc}-${claim}-${ts}"
|
||||||
|
namespace: "${namespace}"
|
||||||
|
spec:
|
||||||
|
trigger:
|
||||||
|
manual: restore-once
|
||||||
|
restic:
|
||||||
|
repository: "${rsrc}-restic-secret"
|
||||||
|
destinationPVC: "${claim}"
|
||||||
|
copyMethod: Direct
|
||||||
|
storageClassName: openebs-zfs
|
||||||
|
# IMPORTANT NOTE:
|
||||||
|
# Set to the last X number of snapshots to restore from
|
||||||
|
previous: ${previous}
|
||||||
|
# OR;
|
||||||
|
# IMPORTANT NOTE:
|
||||||
|
# On bootstrap set `restoreAsOf` to the time the old cluster was destroyed.
|
||||||
|
# This will essentially prevent volsync from trying to restore a backup
|
||||||
|
# from a application that started with default data in the PVC.
|
||||||
|
# Do not restore snapshots made after the following RFC3339 Timestamp.
|
||||||
|
# date --rfc-3339=seconds (--utc)
|
||||||
|
# restoreAsOf: "2022-12-10T16:00:00-05:00"
|
158
.taskfiles/VolSync/Tasks.yaml
Normal file
158
.taskfiles/VolSync/Tasks.yaml
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
---
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
x-task-vars: &task-vars
|
||||||
|
rsrc: '{{.rsrc}}'
|
||||||
|
controller: '{{.controller}}'
|
||||||
|
namespace: '{{.namespace}}'
|
||||||
|
claim: '{{.claim}}'
|
||||||
|
ts: '{{.ts}}'
|
||||||
|
kustomization: '{{.kustomization}}'
|
||||||
|
previous: '{{.previous}}'
|
||||||
|
|
||||||
|
vars:
|
||||||
|
destinationTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/ReplicationDestination.tmpl.yaml"
|
||||||
|
wipeJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/WipeJob.tmpl.yaml"
|
||||||
|
waitForJobScript: "{{.ROOT_DIR}}/.taskfiles/VolSync/wait-for-job.sh"
|
||||||
|
listJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/ListJob.tmpl.yaml"
|
||||||
|
unlockJobTemplate: "{{.ROOT_DIR}}/.taskfiles/VolSync/UnlockJob.tmpl.yaml"
|
||||||
|
ts: '{{now | date "150405"}}'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
list:
|
||||||
|
desc: List all snapshots taken by restic for a given ReplicationSource (ex. task volsync:list rsrc=plex [namespace=default])
|
||||||
|
silent: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.listJobTemplate}}) | kubectl apply -f -
|
||||||
|
- bash {{.waitForJobScript}} list-{{.rsrc}}-{{.ts}} {{.namespace}}
|
||||||
|
- kubectl -n {{.namespace}} wait job/list-{{.rsrc}}-{{.ts}} --for condition=complete --timeout=1m
|
||||||
|
- kubectl -n {{.namespace}} logs job/list-{{.rsrc}}-{{.ts}} --container list
|
||||||
|
- kubectl -n {{.namespace}} delete job list-{{.rsrc}}-{{.ts}}
|
||||||
|
vars:
|
||||||
|
rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}'
|
||||||
|
namespace: '{{.namespace | default "default"}}'
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
# - sh: test -f {{.waitForJobScript}}
|
||||||
|
- sh: test -f {{.listJobTemplate}}
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
desc: Unlocks restic repository for a given ReplicationSource (ex. task volsync:unlock rsrc=plex [namespace=default])
|
||||||
|
silent: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.unlockJobTemplate}}) | kubectl apply -f -
|
||||||
|
# - bash {{.waitForJobScript}} unlock-{{.rsrc}}-{{.ts}} {{.namespace}}
|
||||||
|
- kubectl -n {{.namespace}} wait job/unlock-{{.rsrc}}-{{.ts}} --for condition=complete --timeout=1m
|
||||||
|
- kubectl -n {{.namespace}} logs job/unlock-{{.rsrc}}-{{.ts}} --container unlock
|
||||||
|
- kubectl -n {{.namespace}} delete job unlock-{{.rsrc}}-{{.ts}}
|
||||||
|
vars:
|
||||||
|
rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}'
|
||||||
|
namespace: '{{.namespace | default "default"}}'
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
# - sh: test -f {{.waitForJobScript}}
|
||||||
|
- sh: test -f {{.unlockJobTemplate}}
|
||||||
|
|
||||||
|
# To run backup jobs in parallel for all replicationsources:
|
||||||
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot rsrc=$0 namespace=$1'
|
||||||
|
#
|
||||||
|
snapshot:
|
||||||
|
desc: Trigger a Restic ReplicationSource snapshot (ex. task volsync:snapshot rsrc=plex [namespace=default])
|
||||||
|
cmds:
|
||||||
|
- kubectl -n {{.namespace}} patch replicationsources {{.rsrc}} --type merge -p '{"spec":{"trigger":{"manual":"{{.ts}}"}}}'
|
||||||
|
- bash {{.waitForJobScript}} volsync-src-{{.rsrc}} {{.namespace}}
|
||||||
|
- kubectl -n {{.namespace}} wait job/volsync-src-{{.rsrc}} --for condition=complete --timeout=120m
|
||||||
|
# TODO: Find a way to output logs
|
||||||
|
# Error from server (NotFound): jobs.batch "volsync-src-zzztest" not found
|
||||||
|
# - kubectl -n {{.namespace}} logs job/volsync-src-{{.rsrc}}
|
||||||
|
vars:
|
||||||
|
rsrc: '{{ or .rsrc (fail "ReplicationSource `rsrc` is required") }}'
|
||||||
|
namespace: '{{.namespace | default "default"}}'
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
# - sh: test -f {{.waitForJobScript}}
|
||||||
|
- sh: kubectl -n {{.namespace}} get replicationsources {{.rsrc}}
|
||||||
|
msg: "ReplicationSource '{{.rsrc}}' not found in namespace '{{.namespace}}'"
|
||||||
|
|
||||||
|
# To run restore jobs in parallel for all replicationdestinations:
|
||||||
|
# - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=2 -l bash -c 'task volsync:restore rsrc=$0 namespace=$1'
|
||||||
|
#
|
||||||
|
restore:
|
||||||
|
desc: Trigger a Restic ReplicationSource restore (ex. task volsync:restore rsrc=plex [namespace=default])
|
||||||
|
cmds:
|
||||||
|
- task: restore-suspend-app
|
||||||
|
vars: *task-vars
|
||||||
|
- task: restore-wipe-job
|
||||||
|
vars: *task-vars
|
||||||
|
- task: restore-volsync-job
|
||||||
|
vars: *task-vars
|
||||||
|
- task: restore-resume-app
|
||||||
|
vars: *task-vars
|
||||||
|
vars:
|
||||||
|
rsrc: '{{ or .rsrc (fail "Variable `rsrc` is required") }}'
|
||||||
|
namespace: '{{.namespace | default "default"}}'
|
||||||
|
# 1) Query to find the Flux Kustomization associated with the ReplicationSource (rsrc)
|
||||||
|
kustomization:
|
||||||
|
sh: |
|
||||||
|
kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \
|
||||||
|
-o jsonpath="{.metadata.labels.kustomize\.toolkit\.fluxcd\.io/name}"
|
||||||
|
# 2) Query to find the Claim associated with the ReplicationSource (rsrc)
|
||||||
|
claim:
|
||||||
|
sh: |
|
||||||
|
kubectl -n {{.namespace}} get replicationsource {{.rsrc}} \
|
||||||
|
-o jsonpath="{.spec.sourcePVC}"
|
||||||
|
# 3) Query to find the controller associated with the PersistentVolumeClaim (claim)
|
||||||
|
controller:
|
||||||
|
sh: |
|
||||||
|
app=$(kubectl -n {{.namespace}} get persistentvolumeclaim {{.claim}} -o jsonpath="{.metadata.labels.app\.kubernetes\.io/name}")
|
||||||
|
if kubectl -n {{ .namespace }} get deployment.apps/$app >/dev/null 2>&1 ; then
|
||||||
|
echo "deployment.apps/$app"
|
||||||
|
else
|
||||||
|
echo "statefulset.apps/$app"
|
||||||
|
fi
|
||||||
|
previous: "{{.previous | default 2}}"
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
- sh: test -f {{.wipeJobTemplate}}
|
||||||
|
- sh: test -f {{.destinationTemplate}}
|
||||||
|
# - sh: test -f {{.waitForJobScript}}
|
||||||
|
|
||||||
|
# Suspend the Flux ks and hr
|
||||||
|
restore-suspend-app:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- flux -n flux-system suspend kustomization {{.kustomization}}
|
||||||
|
- flux -n {{.namespace}} suspend helmrelease {{.rsrc}}
|
||||||
|
- kubectl -n {{.namespace}} scale {{.controller}} --replicas 0
|
||||||
|
- kubectl -n {{.namespace}} wait pod --for delete --selector="app.kubernetes.io/name={{.rsrc}}" --timeout=2m
|
||||||
|
env: *task-vars
|
||||||
|
|
||||||
|
# Wipe the PVC of all data
|
||||||
|
restore-wipe-job:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.wipeJobTemplate}}) | kubectl apply -f -
|
||||||
|
- bash {{.waitForJobScript}} wipe-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}}
|
||||||
|
- kubectl -n {{.namespace}} wait job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m
|
||||||
|
- kubectl -n {{.namespace}} logs job/wipe-{{.rsrc}}-{{.claim}}-{{.ts}} --container wipe
|
||||||
|
- kubectl -n {{.namespace}} delete job wipe-{{.rsrc}}-{{.claim}}-{{.ts}}
|
||||||
|
env: *task-vars
|
||||||
|
|
||||||
|
# Create VolSync replicationdestination CR to restore data
|
||||||
|
restore-volsync-job:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.destinationTemplate}}) | kubectl apply -f -
|
||||||
|
- bash {{.waitForJobScript}} volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} {{.namespace}}
|
||||||
|
- kubectl -n {{.namespace}} wait job/volsync-dst-{{.rsrc}}-{{.claim}}-{{.ts}} --for condition=complete --timeout=120m
|
||||||
|
- kubectl -n {{.namespace}} delete replicationdestination {{.rsrc}}-{{.claim}}-{{.ts}}
|
||||||
|
env: *task-vars
|
||||||
|
|
||||||
|
# Resume Flux ks and hr
|
||||||
|
restore-resume-app:
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- flux -n {{.namespace}} resume helmrelease {{.rsrc}}
|
||||||
|
- flux -n flux-system resume kustomization {{.kustomization}}
|
||||||
|
env: *task-vars
|
38
.taskfiles/VolSync/UnlockJob.tmpl.yaml
Normal file
38
.taskfiles/VolSync/UnlockJob.tmpl.yaml
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "unlock-${rsrc}-${ts}"
|
||||||
|
namespace: "${namespace}"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: unlock
|
||||||
|
image: docker.io/restic/restic:0.16.0
|
||||||
|
args: ["unlock", "--remove-all"]
|
||||||
|
envFrom:
|
||||||
|
- secretRef:
|
||||||
|
name: "${rsrc}-volsync-r2-secret"
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "unlock-${rsrc}-r2-${ts}"
|
||||||
|
namespace: "${namespace}"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: unlock
|
||||||
|
image: docker.io/restic/restic:0.16.0
|
||||||
|
args: ["unlock", "--remove-all"]
|
||||||
|
envFrom:
|
||||||
|
- secretRef:
|
||||||
|
name: "${rsrc}-volsync-secret"
|
25
.taskfiles/VolSync/WipeJob.tmpl.yaml
Normal file
25
.taskfiles/VolSync/WipeJob.tmpl.yaml
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "wipe-${rsrc}-${claim}-${ts}"
|
||||||
|
namespace: "${namespace}"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: wipe
|
||||||
|
image: public.ecr.aws/docker/library/busybox:latest
|
||||||
|
command: ["/bin/sh", "-c", "cd /config; find . -delete"]
|
||||||
|
volumeMounts:
|
||||||
|
- name: config
|
||||||
|
mountPath: /config
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumes:
|
||||||
|
- name: config
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: "${claim}"
|
14
.taskfiles/VolSync/wait-for-job.sh
Normal file
14
.taskfiles/VolSync/wait-for-job.sh
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
JOB_NAME=$1
|
||||||
|
NAMESPACE="${2:-default}"
|
||||||
|
|
||||||
|
[[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')"
|
||||||
|
if [ "${STATUS}" == "Pending" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
|
@ -2,64 +2,11 @@
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
cleanup-pods:
|
hubble:
|
||||||
desc: Clean up leftover Pods
|
desc: forward the hubble relay
|
||||||
cmds:
|
cmds:
|
||||||
- for:
|
- cilium hubble port-forward &
|
||||||
matrix:
|
hubble-ui:
|
||||||
PHASE: [Failed, Succeeded, Pending]
|
desc: port-forward hubble to 8888
|
||||||
cmd: kubectl delete pods --field-selector status.phase={{.ITEM.PHASE}} -A --ignore-not-found=true
|
cmds:
|
||||||
sync-secrets:
|
- kubectl port-forward -n kube-system svc/hubble-ui 8888:80
|
||||||
desc: Sync ExternalSecret resources
|
|
||||||
vars:
|
|
||||||
secret: '{{ .secret | default ""}}'
|
|
||||||
namespace: '{{.namespace | default "default"}}'
|
|
||||||
cmd: |
|
|
||||||
{{if eq .secret ""}}
|
|
||||||
kubectl get externalsecret.external-secrets.io --all-namespaces --no-headers -A | awk '{print $1, $2}' \
|
|
||||||
| xargs --max-procs=4 -l bash -c 'kubectl -n $0 annotate externalsecret.external-secrets.io $1 force-sync=$(date +%s) --overwrite'
|
|
||||||
{{else}}
|
|
||||||
kubectl -n {{.namespace}} annotate externalsecret.external-secrets.io {{.secret}} force-sync=$(date +%s) --overwrite
|
|
||||||
{{end}}
|
|
||||||
preconditions:
|
|
||||||
- kubectl -n {{.namespace}} get externalsecret {{.secret}}
|
|
||||||
mount-volume:
|
|
||||||
desc: Mount a PersistentVolumeClaim to a temporary pod
|
|
||||||
interactive: true
|
|
||||||
vars:
|
|
||||||
claim: '{{ or .claim (fail "PersistentVolumeClaim `claim` is required") }}'
|
|
||||||
namespace: '{{.namespace | default "default"}}'
|
|
||||||
cmd: |
|
|
||||||
kubectl run -n {{.namespace}} debug-{{.claim}} -i --tty --rm --image=null --privileged --overrides='
|
|
||||||
{
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"spec": {
|
|
||||||
"containers": [
|
|
||||||
{
|
|
||||||
"name": "debug",
|
|
||||||
"image": "docker.io/library/alpine:latest",
|
|
||||||
"command": ["/bin/ash"],
|
|
||||||
"stdin": true,
|
|
||||||
"stdinOnce": true,
|
|
||||||
"tty": true,
|
|
||||||
"volumeMounts": [
|
|
||||||
{
|
|
||||||
"name": "config",
|
|
||||||
"mountPath": "/config"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"volumes": [
|
|
||||||
{
|
|
||||||
"name": "config",
|
|
||||||
"persistentVolumeClaim": {
|
|
||||||
"claimName": "{{.claim}}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"restartPolicy": "Never"
|
|
||||||
}
|
|
||||||
}'
|
|
||||||
preconditions:
|
|
||||||
- kubectl -n {{.namespace}} get pvc {{.claim}}
|
|
104
.taskfiles/rook/Taskfile.yaml
Normal file
104
.taskfiles/rook/Taskfile.yaml
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
---
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
x-task-vars: &task-vars
|
||||||
|
node: "{{.node}}"
|
||||||
|
ceph_disk: "{{.ceph_disk}}"
|
||||||
|
ts: "{{.ts}}"
|
||||||
|
jobName: "{{.jobName}}"
|
||||||
|
|
||||||
|
vars:
|
||||||
|
waitForJobScript: "../_scripts/wait-for-k8s-job.sh"
|
||||||
|
ts: '{{now | date "150405"}}'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
wipe-node-aule:
|
||||||
|
desc: Trigger a wipe of Rook-Ceph data on node "aule"
|
||||||
|
cmds:
|
||||||
|
- task: wipe-disk
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460833"
|
||||||
|
- task: wipe-data
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
vars:
|
||||||
|
node: aule
|
||||||
|
|
||||||
|
wipe-node-orome:
|
||||||
|
desc: Trigger a wipe of Rook-Ceph data on node "orome"
|
||||||
|
cmds:
|
||||||
|
- task: wipe-disk
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37645333"
|
||||||
|
- task: wipe-data
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
vars:
|
||||||
|
node: orome
|
||||||
|
|
||||||
|
wipe-node-eonwe:
|
||||||
|
desc: Trigger a wipe of Rook-Ceph data on node "eonwe"
|
||||||
|
cmds:
|
||||||
|
- task: wipe-disk
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460887"
|
||||||
|
- task: wipe-data
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
vars:
|
||||||
|
node: eonwe
|
||||||
|
|
||||||
|
wipe-node-arlen:
|
||||||
|
desc: Trigger a wipe of Rook-Ceph data on node "arlen"
|
||||||
|
cmds:
|
||||||
|
- task: wipe-disk
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460897"
|
||||||
|
- task: wipe-data
|
||||||
|
vars:
|
||||||
|
node: "{{.node}}"
|
||||||
|
vars:
|
||||||
|
node: arlen
|
||||||
|
|
||||||
|
wipe-disk:
|
||||||
|
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-disk node=aule ceph_disk="/dev/nvme0n1")
|
||||||
|
silent: true
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.wipeRookDiskJobTemplate}}) | kubectl apply -f -
|
||||||
|
- bash {{.waitForJobScript}} {{.wipeCephDiskJobName}} default
|
||||||
|
- kubectl -n default wait job/{{.wipeCephDiskJobName}} --for condition=complete --timeout=1m
|
||||||
|
- kubectl -n default logs job/{{.wipeCephDiskJobName}} --container list
|
||||||
|
- kubectl -n default delete job {{.wipeCephDiskJobName}}
|
||||||
|
vars:
|
||||||
|
node: '{{ or .node (fail "`node` is required") }}'
|
||||||
|
ceph_disk: '{{ or .ceph_disk (fail "`ceph_disk` is required") }}'
|
||||||
|
jobName: 'wipe-disk-{{- .node -}}-{{- .ceph_disk | replace "/" "-" -}}-{{- .ts -}}'
|
||||||
|
wipeRookDiskJobTemplate: "WipeDiskJob.tmpl.yaml"
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
- sh: test -f {{.waitForJobScript}}
|
||||||
|
- sh: test -f {{.wipeRookDiskJobTemplate}}
|
||||||
|
|
||||||
|
wipe-data:
|
||||||
|
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-data node=aule)
|
||||||
|
silent: true
|
||||||
|
internal: true
|
||||||
|
cmds:
|
||||||
|
- envsubst < <(cat {{.wipeRookDataJobTemplate}}) | kubectl apply -f -
|
||||||
|
- bash {{.waitForJobScript}} {{.wipeRookDataJobName}} default
|
||||||
|
- kubectl -n default wait job/{{.wipeRookDataJobName}} --for condition=complete --timeout=1m
|
||||||
|
- kubectl -n default logs job/{{.wipeRookDataJobName}} --container list
|
||||||
|
- kubectl -n default delete job {{.wipeRookDataJobName}}
|
||||||
|
vars:
|
||||||
|
node: '{{ or .node (fail "`node` is required") }}'
|
||||||
|
jobName: "wipe-rook-data-{{- .node -}}-{{- .ts -}}"
|
||||||
|
wipeRookDataJobTemplate: "WipeRookDataJob.tmpl.yaml"
|
||||||
|
env: *task-vars
|
||||||
|
preconditions:
|
||||||
|
- sh: test -f {{.waitForJobScript}}
|
||||||
|
- sh: test -f {{.wipeRookDataJobTemplate}}
|
26
.taskfiles/rook/WipeDiskJob.tmpl.yaml
Normal file
26
.taskfiles/rook/WipeDiskJob.tmpl.yaml
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "${jobName}"
|
||||||
|
namespace: "default"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: Never
|
||||||
|
nodeName: ${node}
|
||||||
|
containers:
|
||||||
|
- name: disk-wipe
|
||||||
|
image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources: {}
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args:
|
||||||
|
- apk add --no-cache sgdisk util-linux parted;
|
||||||
|
sgdisk --zap-all ${ceph_disk};
|
||||||
|
blkdiscard ${ceph_disk};
|
||||||
|
dd if=/dev/zero bs=1M count=10000 oflag=direct of=${ceph_disk};
|
||||||
|
partprobe ${ceph_disk};
|
29
.taskfiles/rook/WipeRookDataJob.tmpl.yaml
Normal file
29
.taskfiles/rook/WipeRookDataJob.tmpl.yaml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
---
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: "${jobName}"
|
||||||
|
namespace: "default"
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
restartPolicy: Never
|
||||||
|
nodeName: ${node}
|
||||||
|
containers:
|
||||||
|
- name: disk-wipe
|
||||||
|
image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources: {}
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args:
|
||||||
|
- rm -rf /mnt/host_var/lib/rook
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /mnt/host_var
|
||||||
|
name: host-var
|
||||||
|
volumes:
|
||||||
|
- name: host-var
|
||||||
|
hostPath:
|
||||||
|
path: /var
|
19
.taskfiles/rook/pod.yaml
Normal file
19
.taskfiles/rook/pod.yaml
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: my-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: disk-wipe
|
||||||
|
image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources: {}
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args:
|
||||||
|
- apk add --no-cache sgdisk util-linux parted e2fsprogs;
|
||||||
|
sgdisk --zap-all /dev/nvme1n1;
|
||||||
|
blkdiscard /dev/nvme1n1;
|
||||||
|
dd if=/dev/zero bs=1M count=10000 oflag=direct of=/dev/nvme1n1;
|
||||||
|
sgdisk /dev/nvme1n1
|
||||||
|
partprobe /dev/nvme1n1;
|
|
@ -1,148 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
|
||||||
version: "3"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
bootstrap:
|
|
||||||
desc: Bootstrap Talos
|
|
||||||
summary: |
|
|
||||||
Args:
|
|
||||||
CONTROLLER: Controller node to run command against (required)
|
|
||||||
prompt: Bootstrap Talos on the '{{.K8S_CLUSTER}}' cluster... continue?
|
|
||||||
cmds:
|
|
||||||
- task: bootstrap-etcd
|
|
||||||
vars: &vars
|
|
||||||
CONTROLLER: "{{.CONTROLER}}"
|
|
||||||
- task: fetch-kubeconfig
|
|
||||||
vars: *vars
|
|
||||||
- task: bootstrap-integrations
|
|
||||||
vars: *vars
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
- CONTROLLER
|
|
||||||
|
|
||||||
bootstrap-etcd:
|
|
||||||
desc: Bootstrap Etcd
|
|
||||||
cmd: until talosctl --nodes {{.CONTROLLER}} bootstrap; do sleep 10; done
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- CONTROLLER
|
|
||||||
|
|
||||||
bootstrap-integrations:
|
|
||||||
desc: Bootstrap core integrations needed for Talos
|
|
||||||
cmds:
|
|
||||||
- until kubectl wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done
|
|
||||||
- helmfile --kube-context {{.K8S_CLUSTER}} --file {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff
|
|
||||||
- until kubectl wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
|
||||||
- which helmfile
|
|
||||||
- sh: kubectl config get-contexts {{.K8S_CLUSTER}}
|
|
||||||
msg: "Kubectl context {{.K8S_CLUSTER}} not found"
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/helmfile.yaml
|
|
||||||
|
|
||||||
fetch-kubeconfig:
|
|
||||||
desc: Fetch kubeconfig from Talos controllers
|
|
||||||
cmd: |
|
|
||||||
talosctl kubeconfig --nodes {{.CONTROLLER}} \
|
|
||||||
--force --force-context-name {{.K8S_CLUSTER}} {{.K8S_CLUSTER_DIR}}
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
|
|
||||||
generate-clusterconfig:
|
|
||||||
desc: Generate clusterconfig for Talos
|
|
||||||
cmds:
|
|
||||||
- talhelper genconfig
|
|
||||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
|
||||||
--secret-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
|
||||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
|
||||||
--out-dir {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talsecret.sops.yaml
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
|
||||||
|
|
||||||
upgrade:
|
|
||||||
desc: Upgrade Talos version for a node
|
|
||||||
vars:
|
|
||||||
TALOS_VERSION:
|
|
||||||
sh: |
|
|
||||||
yq -r ".talosVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
|
||||||
TALOS_IMAGE:
|
|
||||||
sh: |
|
|
||||||
talhelper genurl installer \
|
|
||||||
--env-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml \
|
|
||||||
--config-file {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml \
|
|
||||||
| grep {{.NODE}} \
|
|
||||||
| awk '{split($0,u," "); print u[2]}'
|
|
||||||
cmds:
|
|
||||||
- talosctl upgrade -n {{.NODE}} --image {{.TALOS_IMAGE }}
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
- NODE
|
|
||||||
preconditions:
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talenv.sops.yaml
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
|
||||||
- msg: "Talos image could not be determined for node={{.NODE}}"
|
|
||||||
sh: 'test -n "{{.TALOS_IMAGE}}"'
|
|
||||||
|
|
||||||
upgrade-k8s:
|
|
||||||
desc: Upgrade Kubernetes version for a Talos cluster
|
|
||||||
silent: false
|
|
||||||
vars:
|
|
||||||
KUBERNETES_VERSION:
|
|
||||||
sh: |
|
|
||||||
yq -r ".kubernetesVersion" {{.K8S_CLUSTER_DIR}}/bootstrap/talos/talconfig.yaml
|
|
||||||
TALOS_CONTROLLER:
|
|
||||||
sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1
|
|
||||||
cmds:
|
|
||||||
- until kubectl wait --timeout=5m --for=condition=Complete jobs --all --all-namespaces; do sleep 10; done
|
|
||||||
- talosctl upgrade-k8s -n {{.TALOS_CONTROLLER}} --to {{.KUBERNETES_VERSION}}
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
preconditions:
|
|
||||||
- talosctl config info &>/dev/null
|
|
||||||
- talosctl --nodes {{.TALOS_CONTROLLER}} get machineconfig &>/dev/null
|
|
||||||
|
|
||||||
apply-clusterconfig:
|
|
||||||
desc: Apply clusterconfig for a Talos cluster
|
|
||||||
vars:
|
|
||||||
CLUSTERCONFIG_FILES:
|
|
||||||
sh: find {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig -type f -name '*.yaml' -printf '%f\n'
|
|
||||||
cmds:
|
|
||||||
- for:
|
|
||||||
var: CLUSTERCONFIG_FILES
|
|
||||||
task: _apply-machineconfig
|
|
||||||
vars:
|
|
||||||
filename: "{{.ITEM}}"
|
|
||||||
hostname: |-
|
|
||||||
{{ trimPrefix (printf "%s-" .K8S_CLUSTER) .ITEM | trimSuffix ".yaml" }}
|
|
||||||
DRY_RUN: "{{ .DRY_RUN }}"
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
|
|
||||||
_apply-machineconfig:
|
|
||||||
internal: true
|
|
||||||
desc: Apply a single Talos machineConfig to a Talos node
|
|
||||||
cmds:
|
|
||||||
- talosctl apply-config
|
|
||||||
--nodes "{{.hostname}}"
|
|
||||||
--file "{{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}"
|
|
||||||
{{ if eq "true" .DRY_RUN }}--dry-run{{ end }}
|
|
||||||
requires:
|
|
||||||
vars:
|
|
||||||
- K8S_CLUSTER
|
|
||||||
- hostname
|
|
||||||
- filename
|
|
||||||
preconditions:
|
|
||||||
- test -f {{.K8S_CLUSTER_DIR}}/bootstrap/talos/clusterconfig/{{.filename}}
|
|
|
@ -1,97 +0,0 @@
|
||||||
---
|
|
||||||
# yaml-language-server: $schema=https://taskfile.dev/schema.json
|
|
||||||
version: '3'
|
|
||||||
|
|
||||||
# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed.
|
|
||||||
# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex)
|
|
||||||
# 2. ReplicationSource and ReplicationDestination are a Restic repository
|
|
||||||
# 3. Each application only has one PVC that is being replicated
|
|
||||||
|
|
||||||
vars:
|
|
||||||
VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources'
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
state-*:
|
|
||||||
desc: Suspend or resume Volsync [CLUSTER=main]
|
|
||||||
cmds:
|
|
||||||
- flux --namespace flux-system {{.STATE}} kustomization volsync
|
|
||||||
- flux --namespace volsync-system {{.STATE}} helmrelease volsync
|
|
||||||
- kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .STATE "suspend"}}0{{else}}1{{end}}
|
|
||||||
vars:
|
|
||||||
STATE: '{{index .MATCH 0}}'
|
|
||||||
requires:
|
|
||||||
vars: [CLUSTER]
|
|
||||||
preconditions:
|
|
||||||
- '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]'
|
|
||||||
- which flux kubectl
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
desc: Unlock all restic source repos [CLUSTER=main]
|
|
||||||
cmds:
|
|
||||||
- for: { var: SOURCES, split: "\n" }
|
|
||||||
cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}"
|
|
||||||
vars:
|
|
||||||
SOURCES:
|
|
||||||
sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}'
|
|
||||||
requires:
|
|
||||||
vars: [CLUSTER]
|
|
||||||
preconditions:
|
|
||||||
- which kubectl
|
|
||||||
|
|
||||||
snapshot:
|
|
||||||
desc: Snapshot an app [CLUSTER=main] [NS=default] [APP=required]
|
|
||||||
cmds:
|
|
||||||
- kubectl --namespace {{.NS}} patch replicationsources {{.APP}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}'
|
|
||||||
- until kubectl --namespace {{.NS}} get job/{{.JOB}} &>/dev/null; do sleep 5; done
|
|
||||||
- kubectl --namespace {{.NS}} wait job/{{.JOB}} --for=condition=complete --timeout=120m
|
|
||||||
vars:
|
|
||||||
NS: '{{.NS | default "default"}}'
|
|
||||||
JOB: volsync-src-{{.APP}}
|
|
||||||
requires:
|
|
||||||
vars: [CLUSTER, APP]
|
|
||||||
preconditions:
|
|
||||||
- kubectl --namespace {{.NS}} get replicationsources {{.APP}}
|
|
||||||
- which kubectl
|
|
||||||
|
|
||||||
restore:
|
|
||||||
desc: Restore an app [CLUSTER=main] [NS=default] [APP=required] [PREVIOUS=required]
|
|
||||||
cmds:
|
|
||||||
# Suspend
|
|
||||||
- flux --namespace flux-system suspend kustomization {{.APP}}
|
|
||||||
- flux --namespace {{.NS}} suspend helmrelease {{.APP}}
|
|
||||||
- kubectl --namespace {{.NS}} scale {{.CONTROLLER}}/{{.APP}} --replicas 0
|
|
||||||
- kubectl --namespace {{.NS}} wait pod --for=delete --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
|
||||||
# Restore
|
|
||||||
- minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename -
|
|
||||||
- until kubectl --namespace {{.NS}} get job/volsync-dst-{{.APP}}-manual &>/dev/null; do sleep 5; done
|
|
||||||
- kubectl --namespace {{.NS}} wait job/volsync-dst-{{.APP}}-manual --for=condition=complete --timeout=120m
|
|
||||||
- kubectl --namespace {{.NS}} delete replicationdestination {{.APP}}-manual
|
|
||||||
# Resume
|
|
||||||
- flux --namespace flux-system resume kustomization {{.APP}}
|
|
||||||
- flux --namespace {{.NS}} resume helmrelease {{.APP}}
|
|
||||||
- flux --namespace {{.NS}} reconcile helmrelease {{.APP}} --force
|
|
||||||
- kubectl --namespace {{.NS}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.APP}}" --timeout=5m
|
|
||||||
vars:
|
|
||||||
NS: '{{.NS | default "default"}}'
|
|
||||||
CONTROLLER:
|
|
||||||
sh: kubectl --namespace {{.NS}} get deployment {{.APP}} &>/dev/null && echo deployment || echo statefulset
|
|
||||||
env:
|
|
||||||
NS: '{{.NS}}'
|
|
||||||
APP: '{{.APP}}'
|
|
||||||
PREVIOUS: '{{.PREVIOUS}}'
|
|
||||||
CLAIM:
|
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.sourcePVC}"
|
|
||||||
ACCESS_MODES:
|
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.accessModes}"
|
|
||||||
STORAGE_CLASS_NAME:
|
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.storageClassName}"
|
|
||||||
PUID:
|
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}"
|
|
||||||
PGID:
|
|
||||||
sh: kubectl --namespace {{.NS}} get replicationsources/{{.APP}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}"
|
|
||||||
requires:
|
|
||||||
vars: [CLUSTER, APP, PREVIOUS]
|
|
||||||
preconditions:
|
|
||||||
- test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2
|
|
||||||
- which flux kubectl minijinja-cli
|
|
|
@ -1,23 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: volsync.backube/v1alpha1
|
|
||||||
kind: ReplicationDestination
|
|
||||||
metadata:
|
|
||||||
name: {{ ENV.APP }}-manual
|
|
||||||
namespace: {{ ENV.NS }}
|
|
||||||
spec:
|
|
||||||
trigger:
|
|
||||||
manual: restore-once
|
|
||||||
restic:
|
|
||||||
repository: {{ ENV.APP }}-volsync-secret
|
|
||||||
destinationPVC: {{ ENV.CLAIM }}
|
|
||||||
copyMethod: Direct
|
|
||||||
storageClassName: {{ ENV.STORAGE_CLASS_NAME }}
|
|
||||||
accessModes: {{ ENV.ACCESS_MODES }}
|
|
||||||
previous: {{ ENV.PREVIOUS }}
|
|
||||||
enableFileDeletion: true
|
|
||||||
cleanupCachePVC: true
|
|
||||||
cleanupTempPVC: true
|
|
||||||
moverSecurityContext:
|
|
||||||
runAsUser: {{ ENV.PUID }}
|
|
||||||
runAsGroup: {{ ENV.PGID }}
|
|
||||||
fsGroup: {{ ENV.PGID }}
|
|
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
|
@ -5,7 +5,6 @@
|
||||||
"redhat.vscode-yaml",
|
"redhat.vscode-yaml",
|
||||||
"signageos.signageos-vscode-sops",
|
"signageos.signageos-vscode-sops",
|
||||||
"pkief.material-icon-theme",
|
"pkief.material-icon-theme",
|
||||||
"ms-vscode-remote.remote-ssh",
|
"ms-vscode-remote.remote-ssh"
|
||||||
"editorconfig.editorconfig"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
55
.vscode/settings.json
vendored
55
.vscode/settings.json
vendored
|
@ -1,41 +1,32 @@
|
||||||
{
|
{
|
||||||
"ansible.validation.lint.arguments": "-c .ansible-lint",
|
"ansible.validation.lint.arguments": "-c .ansible-lint",
|
||||||
"files.associations": {
|
"files.associations": {
|
||||||
"*.json5": "jsonc",
|
"*.json5": "jsonc",
|
||||||
"**/ansible/**/*.yaml": "ansible",
|
"**/ansible/**/*.yaml": "ansible",
|
||||||
"**/ansible/**/*.sops.yaml": "yaml",
|
"**/ansible/**/*.sops.yaml": "yaml",
|
||||||
"**/ansible/**/inventory/**/*.yaml": "yaml",
|
"**/ansible/**/inventory/**/*.yaml": "yaml",
|
||||||
"**/kubernetes/**/*.sops.toml": "plaintext",
|
"**/kubernetes/**/*.sops.toml": "plaintext"
|
||||||
"*.hujson": "jsonc"
|
|
||||||
},
|
},
|
||||||
"material-icon-theme.folders.associations": {
|
"material-icon-theme.folders.associations": {
|
||||||
".taskfiles": "utils",
|
".taskfiles": "utils",
|
||||||
"bootstrap": "import",
|
"bootstrap": "import",
|
||||||
"charts": "kubernetes",
|
"charts": "kubernetes",
|
||||||
"hack": "scripts",
|
"hack": "scripts",
|
||||||
"repositories": "database",
|
"repositories": "database",
|
||||||
"vars": "other",
|
"vars": "other",
|
||||||
// namespaces
|
// namespaces
|
||||||
"cert-manager": "guard",
|
"cert-manager": "guard",
|
||||||
"external-secrets": "keys",
|
"external-secrets": "keys",
|
||||||
"kube-system": "kubernetes",
|
"kube-system": "kubernetes",
|
||||||
"monitoring": "event",
|
"monitoring": "event",
|
||||||
"networking": "connection",
|
"networking": "connection",
|
||||||
"rook-ceph": "dump"
|
"rook-ceph": "dump",
|
||||||
},
|
},
|
||||||
"yaml.schemaStore.enable": true,
|
"yaml.schemaStore.enable": true,
|
||||||
"yaml.schemas": {
|
"yaml.schemas": {
|
||||||
"ansible": "ansible/**/*.yaml",
|
"ansible": "ansible/**/*.yaml",
|
||||||
"kubernetes": "kubernetes/**/*.yaml"
|
"kubernetes": "kubernetes/**/*.yaml"
|
||||||
},
|
},
|
||||||
"json.schemas": [
|
|
||||||
{
|
|
||||||
"fileMatch": ["*.hujson"],
|
|
||||||
"schema": {
|
|
||||||
"allowTrailingCommas": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"editor.fontFamily": "FiraCode Nerd Font",
|
"editor.fontFamily": "FiraCode Nerd Font",
|
||||||
"editor.fontLigatures": true,
|
"editor.fontLigatures": true,
|
||||||
"editor.bracketPairColorization.enabled": true,
|
"editor.bracketPairColorization.enabled": true,
|
||||||
|
@ -44,14 +35,12 @@
|
||||||
"editor.guides.highlightActiveBracketPair": true,
|
"editor.guides.highlightActiveBracketPair": true,
|
||||||
"editor.hover.delay": 1500,
|
"editor.hover.delay": 1500,
|
||||||
"editor.stickyScroll.enabled": false,
|
"editor.stickyScroll.enabled": false,
|
||||||
"editor.rulers": [100],
|
"editor.rulers": [
|
||||||
|
100
|
||||||
|
],
|
||||||
"explorer.autoReveal": false,
|
"explorer.autoReveal": false,
|
||||||
"files.trimTrailingWhitespace": true,
|
"files.trimTrailingWhitespace": true,
|
||||||
"ansible.python.interpreterPath": "/usr/bin/python3",
|
"ansible.python.interpreterPath": "/usr/bin/python3",
|
||||||
"sops.defaults.ageKeyFile": "age.key",
|
"sops.defaults.ageKeyFile": "age.key",
|
||||||
"ansible.validation.lint.path": "~/projects/valinor/.venv/bin/ansible-lint",
|
"ansible.validation.lint.path": "~/projects/valinor/.venv/bin/ansible-lint"
|
||||||
"prettier.quoteProps": "preserve",
|
|
||||||
"[jsonc]": {
|
|
||||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue