Update Helm release reloader to v1.0.42 #71

Merged
jahanson merged 2 commits from renovate/reloader-1.x into main 2023-10-05 12:08:50 -05:00
15 changed files with 306 additions and 320 deletions
Showing only changes of commit 51d380366a - Show all commits

View file

@ -25,6 +25,19 @@ tasks:
vars:
node: aule
wipe-node-orome:
desc: Trigger a wipe of Rook-Ceph data on node "orome"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37645333"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: orome
wipe-node-eonwe:
desc: Trigger a wipe of Rook-Ceph data on node "eonwe"
cmds:

View file

@ -0,0 +1,70 @@
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
spec:
storageClassName: ceph-filesystem
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5G
---
apiVersion: batch/v1
kind: Job
metadata:
name: read
spec:
template:
metadata:
name: read
labels:
app: speedtest
job: read
spec:
containers:
- name: read
image: ubuntu:xenial
command: ["dd", "if=/mnt/pv/test.img", "of=/dev/null", "bs=8k"]
volumeMounts:
- mountPath: "/mnt/pv"
name: test-volume
volumes:
- name: test-volume
persistentVolumeClaim:
claimName: test-claim
restartPolicy: Never
---
apiVersion: batch/v1
kind: Job
metadata:
name: write
spec:
template:
metadata:
name: write
labels:
app: speedtest
job: write
spec:
containers:
- name: write
image: ubuntu:xenial
command:
[
"dd",
"if=/dev/zero",
"of=/mnt/pv/test.img",
"bs=1G",
"count=1",
"oflag=dsync",
]
volumeMounts:
- mountPath: "/mnt/pv"
name: test-volume
volumes:
- name: test-volume
persistentVolumeClaim:
claimName: test-claim
restartPolicy: Never

View file

@ -0,0 +1,15 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-crunchy-postgres-operator
namespace: flux-system
spec:
interval: 10m
path: "./kubernetes/apps/database/crunchy-postgres/operator"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true

View file

@ -0,0 +1,22 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: crunchy-postgres-operator
namespace: database
spec:
interval: 30m
chart:
spec:
chart: pgo
version: 5.4.2
sourceRef:
kind: HelmRepository
name: crunchydata
namespace: flux-system
interval: 5m
install:
crds: CreateReplace
upgrade:
crds: CreateReplace

View file

@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: database
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./crunchy-postgres/ks.yaml

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: database
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: rocky-linux
namespace: default
spec:
containers:
- name: rocky
image: rockylinux:9
command: ["/bin/bash", "-c", "while true; do sleep 10; done"]
resources:
requests:
cpu: 50m
memory: 443M
limits:
cpu: 323m
memory: 886M

View file

@ -10,7 +10,7 @@ spec:
chart:
spec:
chart: app-template
version: 1.5.1
version: 2.0.2
interval: 30m
sourceRef:
kind: HelmRepository
@ -21,11 +21,14 @@ spec:
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
image:
repository: quay.io/prometheus/alertmanager
tag: main@sha256:7c060ae2a86177fbb4106fddcdd9f2cd494d4415b67ccda71a9fdf11f52e825b
controllers:
main:
containers:
main:
image:
repository: quay.io/prometheus/alertmanager
tag: main@sha256:7c060ae2a86177fbb4106fddcdd9f2cd494d4415b67ccda71a9fdf11f52e825b
pullPolicy: IfNotPresent
podAnnotations:
reloader.stakater.com/auto: "true"
@ -37,13 +40,15 @@ spec:
ingress:
main:
enabled: true
ingressClassName: nginx
classname: nginx
hosts:
- host: &host alertmanager.valinor.social
paths:
- path: /
pathType: Prefix
service:
name: main
port: http
tls:
- hosts:
- *host
@ -53,14 +58,14 @@ spec:
enabled: true
type: configMap
name: alertmanager-configmap
mountPath: /etc/alertmanager
readOnly: true
globalMounts:
- path: /etc/alertmanager
secrets:
enabled: true
type: secret
name: alertmanager-secret
mountPath: /etc/secrets
readOnly: true
globalMounts:
- path: /etc/secrets
resources:
requests:

View file

@ -14,7 +14,7 @@ spec:
kind: HelmRepository
name: grafana
namespace: flux-system
version: 6.60.1
version: 6.60.2
interval: 30m
timeout: 20m
maxHistory: 2
@ -29,212 +29,7 @@ spec:
uninstall:
keepHistory: false
values:
annotations:
configmap.reloader.stakater.com/reload: grafana
secret.reloader.stakater.com/reload: grafana-secrets
replicas: 1
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: default
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
datasources:
datasources.yaml:
apiVersion: 1
deleteDatasources:
- name: Loki
orgId: 1
- name: Alertmanager
orgId: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://thanos-query-frontend.monitoring.svc.cluster.local:9090
isDefault: true
- name: Loki
type: loki
access: proxy
url: http://loki-gateway.monitoring.svc.cluster.local
jsonData:
maxLines: 250
- name: Alertmanager
type: alertmanager
access: proxy
url: http://kube-prometheus-stack-alertmanager.monitoring.svc.cluster.local:9093
jsonData:
implementation: prometheus
dashboards:
default:
# Ref: https://grafana.com/grafana/dashboards/1860-node-exporter-full/
node-exporter-full:
gnetId: 1860
revision: 30
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/5342-ceph-pools/
ceph-pools:
gnetId: 5342
revision: 9
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/5336-ceph-osd-single/
ceph-osd:
gnetId: 5336
revision: 9
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/2842-ceph-cluster/
ceph-cluster:
gnetId: 2842
revision: 16
datasource: Prometheus
cert-manager:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json
datasource: Prometheus
external-secrets:
url: https://raw.githubusercontent.com/external-secrets/external-secrets/main/docs/snippets/dashboard.json
datasource: Prometheus
flux-cluster:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/cluster.json
datasource: Prometheus
flux-control-plane:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/control-plane.json
datasource: Prometheus
flux-logs:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/logs.json
datasource: Prometheus
kubernetes-api-server:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json
datasource: Prometheus
kubernetes-coredns:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json
datasource: Prometheus
kubernetes-global:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json
datasource: Prometheus
kubernetes-namespaces:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
datasource: Prometheus
kubernetes-nodes:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json
datasource: Prometheus
kubernetes-pods:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
datasource: Prometheus
ingress-nginx:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json
datasource: Prometheus
ingress-nginx-request-handling-performance:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json
datasource: Prometheus
deploymentStrategy:
type: Recreate
env:
GF_ANALYTICS_CHECK_FOR_UPDATES: false
GF_DATE_FORMATS_USE_BROWSER_LOCALE: true
GF_EXPLORE_ENABLED: true
GF_GRAFANA_NET_URL: https://grafana.net
GF_LOG_FILTERS: rendering:debug
GF_PANELS_DISABLE_SANITIZE_HTML: true
GF_SECURITY_ALLOW_EMBEDDING: true
GF_SECURITY_COOKIE_SAMESITE: grafana
GF_SERVER_ROOT_URL: "https://grafana.valinor.social"
envFromSecret: grafana-secrets
grafana.ini:
analytics:
check_for_updates: false
auth:
oauth_auto_login: true
auth.basic:
enabled: false
auth.generic_oauth:
enabled: true
name: Auth0
scopes: "openid profile email"
client_id: # Set by env vars
client_secret: # Set by env vars
auth_url: # Set by env vars
token_url: # Set by env vars
api_url: # Set by env vars
use_pkce: true
auth.generic_oauth.group_mapping:
role_attribute_path: |
contains("https://hsndev/groups"[*], 'grafana-admin') && 'Admin' || contains("https://hsndev/groups"[*], 'grafana-viewer') && 'Viewer'
org_id: 1
grafana_net:
url: https://grafana.net
log:
mode: console
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
server:
root_url: https://grafana.valinor.social
imageRenderer:
enabled: false
ingress:
enabled: true
ingressClassName: nginx
annotations:
external-dns.alpha.kubernetes.io/target: ingress.valinor.social
hosts:
- &host grafana.valinor.social
tls:
- hosts:
- *host
persistence:
enabled: false
plugins:
- natel-discrete-panel
- pr0ps-trackmap-panel
- grafana-piechart-panel
- vonage-status-panel
- grafana-worldmap-panel
- grafana-clock-panel
podAnnotations:
configmap.reloader.stakater.com/reload: grafana
secret.reloader.stakater.com/reload: grafana-secrets
rbac:
pspEnabled: false
resources:
requests:
cpu: 23m
memory: 127M
serviceMonitor:
enabled: true
sidecar:
dashboards:
enabled: true
labelValue: ""
label: grafana_dashboard
folderAnnotation: grafana_folder
searchNamespace: ALL
provider:
disableDelete: true
foldersFromFilesStructure: true
datasources:
enabled: true
labelValue: ""
searchNamespace: ALL
logLevel: INFO

View file

@ -11,7 +11,7 @@ spec:
chart:
spec:
chart: kube-prometheus-stack
version: 51.2.0
version: 51.3.0
sourceRef:
kind: HelmRepository
name: prometheus-community

View file

@ -1,4 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@ -9,7 +10,7 @@ spec:
chart:
spec:
chart: app-template
version: 1.5.1
version: 2.0.2
interval: 30m
sourceRef:
kind: HelmRepository
@ -17,14 +18,17 @@ spec:
namespace: flux-system
values:
image:
repository: ghcr.io/mendhak/http-https-echo
tag: "30@sha256:b6fccf52c73a5786b2e79e06e5c86d4689416d0ea41a6477af6ad89c1d81933f"
env:
HTTP_PORT: &port 8080
LOG_WITHOUT_NEWLINE: "true"
LOG_IGNORE_PATH: "/healthz"
controllers:
main:
containers:
main:
image:
repository: ghcr.io/mendhak/http-https-echo
tag: "30"
env:
HTTP_PORT: &port 8080
LOG_WITHOUT_NEWLINE: "true"
LOG_IGNORE_PATH: "/healthz"
service:
main:
@ -35,13 +39,16 @@ spec:
ingress:
main:
enabled: true
ingressClassName: "nginx"
className: "nginx"
annotations:
external-dns.alpha.kubernetes.io/target: "ingress.valinor.social"
hosts:
- host: &host "echo-server.valinor.social"
paths:
- path: /
service:
name: main
port: http
tls:
- hosts:
- *host

View file

@ -1,4 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
@ -9,7 +10,7 @@ spec:
chart:
spec:
chart: app-template
version: 1.5.1
version: 2.0.2
interval: 30m
sourceRef:
kind: HelmRepository
@ -17,84 +18,121 @@ spec:
namespace: flux-system
values:
controller:
annotations:
reloader.stakater.com/auto: "true"
image:
repository: docker.io/1password/connect-api
tag: 1.7.2
env:
OP_BUS_PORT: "11220"
OP_BUS_PEERS: "localhost:11221"
OP_HTTP_PORT: &port 8080
OP_SESSION:
valueFrom:
secretKeyRef:
name: onepassword-connect-secret
key: onepassword-credentials.json
controllers:
main:
annotations:
reloader.stakater.com/auto: "true"
containers:
main:
image:
repository: docker.io/1password/connect-api
tag: 1.7.2
env:
OP_BUS_PORT: "11220"
OP_BUS_PEERS: "localhost:11221"
OP_HTTP_PORT: &port-connect 8080
OP_SESSION:
valueFrom:
secretKeyRef:
name: onepassword-connect-secret
key: onepassword-credentials.json
probes:
liveness:
enabled: true
custom: true
spec:
httpGet:
path: /heartbeat
port: *port-connect
initialDelaySeconds: 15
periodSeconds: 30
failureThreshold: 3
readiness:
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port-connect
initialDelaySeconds: 15
startup:
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port-connect
failureThreshold: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
sync:
image:
repository: docker.io/1password/connect-sync
tag: 1.7.2
env:
- name: OP_SESSION
valueFrom:
secretKeyRef:
name: onepassword-connect-secret
key: onepassword-credentials.json
- name: OP_HTTP_PORT
value: &port-sync 8081
- name: OP_BUS_PORT
value: "11221"
- name: OP_BUS_PEERS
value: "localhost:11220"
probes:
readinessProbe:
httpGet:
path: /health
port: *port-sync
initialDelaySeconds: 15
livenessProbe:
httpGet:
path: /heartbeat
port: *port-sync
failureThreshold: 3
periodSeconds: 30
initialDelaySeconds: 15
volumeMounts:
- name: shared
mountPath: /home/opuser/.op/data
service:
main:
ports:
http:
port: *port
port: *port-connect
ingress:
main:
enabled: true
ingressClassName: "nginx"
classname: "nginx"
annotations:
nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
hosts:
- host: &host "1pwconnect.valinor.social"
paths:
- path: /
service:
name: main
port: http
tls:
- hosts:
- *host
probes:
liveness:
enabled: true
custom: true
spec:
httpGet:
path: /heartbeat
port: *port
initialDelaySeconds: 15
periodSeconds: 30
failureThreshold: 3
readiness:
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port
initialDelaySeconds: 15
startup:
enabled: true
custom: true
spec:
httpGet:
path: /health
port: *port
failureThreshold: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
podSecurityContext:
runAsUser: 999
runAsGroup: 999
defaultPodOptions:
securityContext:
runAsUser: 999
runAsGroup: 999
persistence:
shared:
enabled: true
type: emptyDir
mountPath: /home/opuser/.op/data
globalMounts:
- path: /home/opuser/.op/data
resources:
requests:
@ -102,35 +140,3 @@ spec:
memory: 10Mi
limits:
memory: 100Mi
sidecars:
sync:
name: sync
image: docker.io/1password/connect-sync:1.7.2@sha256:fe527ed9d81f193d8dfbba4140d61f9e8c8dceb0966b3009259087504e5ff79c
env:
- name: OP_SESSION
valueFrom:
secretKeyRef:
name: onepassword-connect-secret
key: onepassword-credentials.json
- name: OP_HTTP_PORT
value: &port 8081
- name: OP_BUS_PORT
value: "11221"
- name: OP_BUS_PEERS
value: "localhost:11220"
readinessProbe:
httpGet:
path: /health
port: *port
initialDelaySeconds: 15
livenessProbe:
httpGet:
path: /heartbeat
port: *port
failureThreshold: 3
periodSeconds: 30
initialDelaySeconds: 15
volumeMounts:
- name: shared
mountPath: /home/opuser/.op/data

View file

@ -0,0 +1,12 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrepository-source-v1beta2.json
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: crunchydata
namespace: flux-system
spec:
type: oci
interval: 30m
url: oci://registry.developers.crunchydata.com/crunchydata
timeout: 3m

View file

@ -8,6 +8,7 @@ resources:
- cilium.yaml
- cloudnative-pg.yaml
- crowdsec.yaml
- crunchydata.yaml
- dragonflydb.yaml
- elastic.yaml
- external-secrets.yaml