Compare commits

..

277 commits

Author SHA1 Message Date
db67f1cfb5 Merge pull request 'Update image ghcr.io/dragonflydb/dragonfly to v1.22.2' (#616) from renovate/ghcr.io-dragonflydb-dragonfly-1.x into main
Reviewed-on: #616
2024-09-18 10:35:55 -05:00
dd0a492f55 Merge pull request 'Update image docker.io/ollama/ollama to v0.3.11' (#619) from renovate/docker.io-ollama-ollama-0.x into main
Reviewed-on: #619
2024-09-18 10:35:45 -05:00
66f10678fc Merge pull request 'Update image quay.io/redlib/redlib to a7d6de7' (#620) from renovate/quay.io-redlib-redlib-latest into main
Reviewed-on: #620
2024-09-18 10:35:31 -05:00
544df1814f Update image quay.io/redlib/redlib to a7d6de7 2024-09-18 15:33:57 +00:00
5e1cfde4e0 Update image docker.io/ollama/ollama to v0.3.11 2024-09-18 06:03:25 +00:00
416ee6ec5d Merge pull request 'Update image quay.io/redlib/redlib to 67b0267' (#618) from renovate/quay.io-redlib-redlib-latest into main
Reviewed-on: #618
2024-09-17 14:25:10 -05:00
09f6d14e13
icmp not working for some reason. 2024-09-17 14:22:57 -05:00
bb04ed4812 Update image quay.io/redlib/redlib to 67b0267 2024-09-17 19:02:44 +00:00
96553547d7
test 2024-09-17 13:57:57 -05:00
611371a6dd
add gandalf/shadowfax with icmp and remove sting. Update hass to icmp. 2024-09-17 12:28:57 -05:00
3341360a4d Merge pull request 'Update image ghcr.io/onedr0p/qbittorrent to v4.6.7' (#617) from renovate/ghcr.io-onedr0p-qbittorrent-4.x into main
Reviewed-on: #617
2024-09-17 09:31:31 -05:00
f9bdb3ea45 Update image ghcr.io/onedr0p/qbittorrent to v4.6.7 2024-09-17 01:37:08 +00:00
ef0ef790a1 Update image ghcr.io/dragonflydb/dragonfly to v1.22.2 2024-09-16 20:32:24 +00:00
cbffaf9183
update ratios 2024-09-16 09:55:23 -05:00
3d0baf5c57
update ratios 2024-09-16 09:53:02 -05:00
ce0d6be9aa
update ratios 2024-09-16 09:38:23 -05:00
9e10841dbc Merge pull request 'Update image ghcr.io/onedr0p/radarr-develop to v5.10.3.9178' (#614) from renovate/ghcr.io-onedr0p-radarr-develop-5.x into main
Reviewed-on: #614
2024-09-15 17:47:00 -05:00
8e41655158 Merge pull request 'Update image ghcr.io/onedr0p/sonarr-develop to v4.0.9.2386' (#615) from renovate/ghcr.io-onedr0p-sonarr-develop-4.x into main
Reviewed-on: #615
2024-09-15 17:46:52 -05:00
3ff5ac97c2 Update image ghcr.io/onedr0p/sonarr-develop to v4.0.9.2386 2024-09-15 18:32:29 +00:00
8319925a7e Update image ghcr.io/onedr0p/radarr-develop to v5.10.3.9178 2024-09-15 15:07:09 +00:00
7399c39c04 Merge pull request 'Update image ghcr.io/recyclarr/recyclarr to v7.2.4' (#613) from renovate/ghcr.io-recyclarr-recyclarr-7.x into main
Reviewed-on: #613
2024-09-14 17:34:27 -05:00
b01e64f404 Update image ghcr.io/recyclarr/recyclarr to v7.2.4 2024-09-14 22:32:41 +00:00
44a7d70864
move coder to its own namespace. 2024-09-14 17:09:38 -05:00
59ab555ad3
add coder helm repo 2024-09-14 13:34:09 -05:00
826161535e
correct namespace 2024-09-14 13:30:04 -05:00
106ef8ff12
deploy coder 2024-09-14 13:29:12 -05:00
a7ab50f161
add coder 2024-09-14 13:27:58 -05:00
d1fcc8f1a2
update deprecated usage 2024-09-14 08:16:33 -05:00
4ab042f9f1
fix envrc 2024-09-14 08:13:39 -05:00
b46dfdd73c
update recyclarr profiles 2024-09-14 08:13:24 -05:00
3c73df68b3 Merge pull request 'Update image ghcr.io/autobrr/autobrr to v1.46.1' (#609) from renovate/ghcr.io-autobrr-autobrr-1.x into main
Reviewed-on: #609
2024-09-13 21:12:57 -05:00
b23d2c25f6 Merge pull request 'Update image kube-prometheus-stack to v62.7.0' (#611) from renovate/kube-prometheus-stack-62.x into main
Reviewed-on: #611
2024-09-13 21:11:38 -05:00
0c2443d064 Merge pull request 'Update image ghcr.io/onedr0p/kubanetics to v2024.9.1' (#608) from renovate/ghcr.io-onedr0p-kubanetics-2024.x into main
Reviewed-on: #608
2024-09-13 21:11:26 -05:00
53ec245e74 Merge pull request 'Update image docker.io/excalidraw/excalidraw to fae6678' (#610) from renovate/docker.io-excalidraw-excalidraw-latest into main
Reviewed-on: #610
2024-09-13 21:11:01 -05:00
1ac81bfc7f Merge pull request 'Update image ghcr.io/jorenn92/maintainerr to v2.1.2' (#612) from renovate/ghcr.io-jorenn92-maintainerr-2.x into main
Reviewed-on: #612
2024-09-13 21:10:44 -05:00
25bfad64cb Update image ghcr.io/jorenn92/maintainerr to v2.1.2 2024-09-13 08:02:40 +00:00
cee7c438e0
correct path 2024-09-12 16:46:12 -05:00
c0ec508f86
deploy plex -- this time for real :) 2024-09-12 16:44:18 -05:00
fc1fa644aa
add plex 2024-09-12 16:42:30 -05:00
08d7d50ed9 Update image kube-prometheus-stack to v62.7.0 2024-09-12 17:34:03 +00:00
66c444717f Update image ghcr.io/autobrr/autobrr to v1.46.1 2024-09-12 17:05:14 +00:00
d903517e2a Update image docker.io/excalidraw/excalidraw to fae6678 2024-09-12 15:32:33 +00:00
d6859bf689
moved media storage to shadowfax 2024-09-12 07:44:39 -05:00
d93afbcd92
undeploy :(
too much management, using 1pass for everything secrets related instead.
2024-09-11 22:49:47 -05:00
ffada4bcab Update image ghcr.io/onedr0p/kubanetics to v2024.9.1 2024-09-12 02:04:07 +00:00
6826e5d5d7
beware 2024-09-11 15:58:44 -05:00
168094c027 Merge pull request 'Update chart external-dns to 1.15.0' (#606) from renovate/external-dns-1.x into main
Reviewed-on: #606
2024-09-11 15:04:08 -05:00
fa502b33db Merge pull request 'Update image public.ecr.aws/hashicorp/vault to v1.17.5' (#607) from renovate/public.ecr.aws-hashicorp-vault-1.x into main
Reviewed-on: #607
2024-09-11 14:59:18 -05:00
55cf6ed705
WHY is this missing again in the statefulset 2024-09-11 14:47:43 -05:00
d3e601701a Update image public.ecr.aws/hashicorp/vault to v1.17.5 2024-09-11 19:33:13 +00:00
34ab7c09de
update registry 2024-09-11 14:30:49 -05:00
e5346e2ec6 Update chart external-dns to 1.15.0 2024-09-11 16:33:38 +00:00
3472f9689a
debug 2024-09-11 01:05:19 -05:00
ce1f7c5b08
update cluster address 2024-09-11 01:00:54 -05:00
db2f65711c
fix 2024-09-11 00:54:27 -05:00
4a48893d7a
IN! 2024-09-11 00:50:07 -05:00
450ac27a67
out! 2024-09-11 00:48:14 -05:00
159d198407
re-enable probes 2024-09-11 00:43:51 -05:00
15b67972c4
add auto unseal, replicas 3 2024-09-11 00:16:07 -05:00
6995f60582
auto unseal 2024-09-10 23:15:26 -05:00
723435aa15
update size 2024-09-10 22:52:32 -05:00
ca2a5db255
object not array 2024-09-10 22:49:28 -05:00
f110b83a23
autojoin & debug 2024-09-10 22:47:15 -05:00
806b86b233
default 2024-09-10 22:30:53 -05:00
738f039155
retryjoin 2024-09-10 22:15:27 -05:00
beb97dafee
leave default 2024-09-10 21:35:16 -05:00
24c1c9462d
debug 2024-09-10 21:11:57 -05:00
c26261865e
revert 2024-09-10 20:58:46 -05:00
0253ac813d
debug 2024-09-10 20:57:33 -05:00
357f836592
move ports back to 8200 2024-09-10 20:52:52 -05:00
a8de7ab7b0
debug 2024-09-10 20:45:05 -05:00
1872415ea0
correct service name 2024-09-10 20:33:12 -05:00
4e224c70af
uninstall until install is correctly installed, then rollback 2024-09-10 20:24:16 -05:00
dff47edb2f
no template 2024-09-10 20:16:33 -05:00
04cbd1d372
vault creates files in dirs other than the 2 specified. Lets see which. 2024-09-10 20:03:00 -05:00
7c6a2a4202
add vault 2024-09-10 20:00:42 -05:00
91032819a6 Merge pull request 'Update image docker.io/ollama/ollama to v0.3.10' (#596) from renovate/docker.io-ollama-ollama-0.x into main
Reviewed-on: #596
2024-09-10 13:19:53 -05:00
b006d61320 Merge pull request 'Update chart external-secrets to 0.10.3' (#602) from renovate/external-secrets-0.x into main
Reviewed-on: #602
2024-09-10 13:19:30 -05:00
4ad48507be Merge pull request 'Update image docker to v27.2.1' (#603) from renovate/docker-27.x into main
Reviewed-on: #603
2024-09-10 13:18:29 -05:00
d1e634dc0b Merge pull request 'Update image docker.io/cloudflare/cloudflared to v2024.9.1' (#604) from renovate/docker.io-cloudflare-cloudflared-2024.x into main
Reviewed-on: #604
2024-09-10 13:15:38 -05:00
51ea447c3b Update image docker.io/cloudflare/cloudflared to v2024.9.1 2024-09-10 18:02:23 +00:00
e3407ffcad Update image docker to v27.2.1 2024-09-09 23:32:31 +00:00
b9ff13b949 Update chart external-secrets to 0.10.3 2024-09-09 15:32:36 +00:00
495b2c34e7 Merge pull request 'Update image ghcr.io/jorenn92/maintainerr to v2.1.1' (#601) from renovate/ghcr.io-jorenn92-maintainerr-2.x into main
Reviewed-on: #601
2024-09-09 07:21:21 -05:00
3c67c1a8c3 Update image ghcr.io/jorenn92/maintainerr to v2.1.1 2024-09-09 07:32:36 +00:00
abddb24f66 Merge pull request 'Update image ghcr.io/onedr0p/radarr-develop to v5.10.2.9164' (#597) from renovate/ghcr.io-onedr0p-radarr-develop-5.x into main
Reviewed-on: #597
2024-09-08 10:57:11 -05:00
143aeac199 Merge pull request 'Update image ghcr.io/onedr0p/prowlarr-develop to v1.24.0.4721' (#598) from renovate/ghcr.io-onedr0p-prowlarr-develop-1.x into main
Reviewed-on: #598
2024-09-08 10:56:14 -05:00
d3310b7f38 Merge pull request 'Update image kube-prometheus-stack to v62.6.0' (#599) from renovate/kube-prometheus-stack-62.x into main
Reviewed-on: #599
2024-09-08 10:54:46 -05:00
15a9eae30f Merge pull request 'Update image ghcr.io/dragonflydb/dragonfly to v1.22.1' (#600) from renovate/ghcr.io-dragonflydb-dragonfly-1.x into main
Reviewed-on: #600
2024-09-08 10:52:59 -05:00
d3490f9ddc Update image ghcr.io/dragonflydb/dragonfly to v1.22.1 2024-09-08 10:03:28 +00:00
165ff2a9b0 Update image kube-prometheus-stack to v62.6.0 2024-09-08 09:02:40 +00:00
e1e6f693d9 Update image ghcr.io/onedr0p/prowlarr-develop to v1.24.0.4721 2024-09-08 09:02:35 +00:00
bc69e26911 Update image ghcr.io/onedr0p/radarr-develop to v5.10.2.9164 2024-09-08 08:32:36 +00:00
e8743cd04c Update image docker.io/ollama/ollama to v0.3.10 2024-09-08 08:32:33 +00:00
7fc0a26923 Merge pull request 'Update image ghcr.io/open-webui/open-webui to v0.3.21' (#594) from renovate/ghcr.io-open-webui-open-webui-0.x into main
Reviewed-on: #594
2024-09-07 20:03:03 -05:00
7fa4cb2fd5 Update image ghcr.io/open-webui/open-webui to v0.3.21 2024-09-08 00:32:41 +00:00
b91b5cef47 Merge pull request 'Update image kube-prometheus-stack to v62.5.1' (#595) from renovate/kube-prometheus-stack-62.x into main
Reviewed-on: #595
2024-09-07 12:24:28 -05:00
11417e8c0f Update image kube-prometheus-stack to v62.5.1 2024-09-07 16:02:34 +00:00
32bbd58eb4 Revert Open-WebUI v0.3.20 --> v0.3.19
revert Merge pull request 'Update image ghcr.io/open-webui/open-webui to v0.3.20' (#592) from renovate/ghcr.io-open-webui-open-webui-0.x into main

Reviewed-on: #592
2024-09-07 05:29:25 -05:00
1ae8b2083a
do not specify image for csi 2024-09-07 05:19:05 -05:00
dad46cae84 Merge pull request 'Update image kube-prometheus-stack to v62.5.0' (#593) from renovate/kube-prometheus-stack-62.x into main
Reviewed-on: #593
2024-09-07 05:16:02 -05:00
045d2f3095 Merge pull request 'Update image ghcr.io/open-webui/open-webui to v0.3.20' (#592) from renovate/ghcr.io-open-webui-open-webui-0.x into main
Reviewed-on: #592
2024-09-07 05:15:47 -05:00
126bd94cc3 Update kubernetes/flux/repositories/helm/bjw-s.yaml 2024-09-07 02:20:30 -05:00
4dbcc5517c Update image kube-prometheus-stack to v62.5.0 2024-09-07 07:02:43 +00:00
6644ff9954 Update image ghcr.io/open-webui/open-webui to v0.3.20 2024-09-07 04:32:32 +00:00
254fe8aa5f Merge pull request 'Update image ghcr.io/open-webui/open-webui to v0.3.19' (#591) from renovate/ghcr.io-open-webui-open-webui-0.x into main
Reviewed-on: #591
2024-09-06 22:36:05 -05:00
454274fbc8 Merge pull request 'Update image docker.io/ollama/ollama to v0.3.9' (#590) from renovate/docker.io-ollama-ollama-0.x into main
Reviewed-on: #590
2024-09-06 22:35:57 -05:00
f5775487f4 Merge pull request 'Update image ghcr.io/onedr0p/kubanetics to v2024.9.0' (#589) from renovate/ghcr.io-onedr0p-kubanetics-2024.x into main
Reviewed-on: #589
2024-09-06 22:35:50 -05:00
fdf918517d Update image ghcr.io/open-webui/open-webui to v0.3.19 2024-09-07 03:35:20 +00:00
6bb70e4cfd Update image docker.io/ollama/ollama to v0.3.9 2024-09-07 03:35:17 +00:00
e75727896d
shorten interval until it's back 2024-09-06 22:26:55 -05:00
c8141ae442
convert to oci 2024-09-06 22:23:28 -05:00
e22c48b8fb
temp update 2024-09-06 22:21:26 -05:00
7aadf8d0a5
wrong storageclass 2024-09-06 22:17:01 -05:00
dec0f4c86c
add ai workloads 2024-09-06 22:14:38 -05:00
6cbf60a728
copy pasta -- wrong directory 2024-09-06 22:12:50 -05:00
19e17535be
add nvidia rule 2024-09-06 22:09:23 -05:00
af75461454
dragonfly dep change 2024-09-06 21:54:15 -05:00
733f05eccf
cluster name update 2024-09-06 21:52:28 -05:00
aae1f28c84
Add nvidia device plugin 2024-09-06 21:49:49 -05:00
5775937c46
RWX 2024-09-06 21:29:36 -05:00
71cc11f56c Update image ghcr.io/onedr0p/kubanetics to v2024.9.0 2024-09-07 00:05:02 +00:00
e2d20689a9 Merge pull request 'Update image ghcr.io/cross-seed/cross-seed to v6.0.0-34' (#575) from renovate/ghcr.io-cross-seed-cross-seed-6.x into main
Reviewed-on: #575
2024-09-05 23:14:16 -05:00
4eb6f072d5 Merge pull request 'Update image ghcr.io/onedr0p/sonarr-develop to v4.0.9.2342' (#576) from renovate/ghcr.io-onedr0p-sonarr-develop-4.x into main
Reviewed-on: #576
2024-09-05 23:08:03 -05:00
a18143fdfc Merge pull request 'Update image ghcr.io/jorenn92/maintainerr to v2.1.0' (#578) from renovate/ghcr.io-jorenn92-maintainerr-2.x into main
Reviewed-on: #578
2024-09-05 23:06:07 -05:00
21e25b4134 Merge pull request 'Update image ghcr.io/onedr0p/radarr-develop to v5.10.1.9125' (#579) from renovate/ghcr.io-onedr0p-radarr-develop-5.x into main
Reviewed-on: #579
2024-09-05 23:05:23 -05:00
9e43fe3fdd Merge pull request 'Update chart external-secrets to 0.10.2' (#581) from renovate/external-secrets-0.x into main
Reviewed-on: #581
2024-09-05 23:05:12 -05:00
cc3643a323 Merge pull request 'Update chart app-template to 3.4.0' (#586) from renovate/app-template-3.x into main
Reviewed-on: #586
2024-09-05 23:04:59 -05:00
ebbc7a1e83 Merge pull request 'Update image kube-prometheus-stack to v62.4.0' (#587) from renovate/kube-prometheus-stack-62.x into main
Reviewed-on: #587
2024-09-05 23:04:35 -05:00
ea3baef4a9 Update image kube-prometheus-stack to v62.4.0 2024-09-06 04:03:27 +00:00
740e725f4e Update chart app-template to 3.4.0 2024-09-06 04:03:21 +00:00
939b78eb1d Merge pull request 'Update chart snapshot-controller to 3.0.6' (#582) from renovate/patch-external-snapshotter into main
Reviewed-on: #582
2024-09-05 23:03:08 -05:00
581e72d35b Merge pull request 'Update Rook Ceph group to v1.15.1 (patch)' (#583) from renovate/patch-rook-ceph into main
Reviewed-on: #583
2024-09-05 23:02:08 -05:00
ee559917f8 Merge pull request 'Update image ghcr.io/recyclarr/recyclarr to v7.2.3' (#584) from renovate/ghcr.io-recyclarr-recyclarr-7.x into main
Reviewed-on: #584
2024-09-05 23:01:24 -05:00
f9617c0df3 Merge pull request 'Update image spegel to v0.0.24' (#585) from renovate/spegel-0.x into main
Reviewed-on: #585
2024-09-05 22:59:29 -05:00
ac5e7485e8 Merge pull request 'Update image ghcr.io/onedr0p/prowlarr-develop to v1.23.1.4708' (#577) from renovate/ghcr.io-onedr0p-prowlarr-develop-1.x into main
Reviewed-on: #577
2024-09-05 22:58:34 -05:00
d9382c2373
add gatus 2024-09-05 22:55:58 -05:00
e4bfce1c60
update schema location 2024-09-05 22:51:36 -05:00
7d879d7a5b
scale up 2024-09-05 22:48:28 -05:00
092669cef9
add helm repo to flux for emqx 2024-09-05 22:46:52 -05:00
eb74e0d027
add prometheus operator crds for managed crd updates 2024-09-05 22:24:51 -05:00
d766c6ae21
adding quite a few config options for renovate 2024-09-05 18:08:01 -05:00
443b9d99bd
adding renovate groups 2024-09-05 17:23:14 -05:00
e6021cba90
update prettier config && format renovate 2024-09-05 16:21:42 -05:00
f1085d5f39
correct filename 2024-09-05 16:06:06 -05:00
f98c4196d4
deploy dragonfly and operator more declaritively
also easier to update
2024-09-05 16:04:37 -05:00
a99c18b3ae Update spegel Docker tag to v0.0.24 2024-09-05 12:03:27 +00:00
d691bb8de7 Update ghcr.io/recyclarr/recyclarr Docker tag to v7.2.3 2024-09-05 11:05:24 +00:00
9dd9bbf1de Update Rook Ceph group to v1.15.1 2024-09-05 11:05:11 +00:00
eb05484b71 Update ghcr.io/onedr0p/sonarr-develop Docker tag to v4.0.9.2342 2024-09-05 10:48:14 +00:00
57a707684d Update Helm release snapshot-controller to v3.0.6 2024-09-05 10:48:02 +00:00
f799abc2a8 Update Helm release external-secrets to v0.10.2 2024-09-05 10:47:57 +00:00
5ac8a712ba
add forgejo ci-runners 2024-09-05 05:43:09 -05:00
84b5f5f139
add searxng 2024-09-05 05:41:21 -05:00
9e126bd52e
add recyclarr 2024-09-05 05:40:23 -05:00
aa35771649
add it-tools and excalidraw 2024-09-05 05:05:08 -05:00
49928191b8
add autobrr and omegabrr 2024-09-05 05:01:24 -05:00
9ee9ad6a4e
update ks deps 2024-09-05 04:55:11 -05:00
ff038bcf37
update repo 2024-09-05 04:52:45 -05:00
19ca96b78e
deploy overseerr 2024-09-05 04:52:01 -05:00
9c77dc55cd
add overseerr and updated ks deps 2024-09-05 04:51:39 -05:00
74d8d5b6d9
add redlib 2024-09-05 04:44:16 -05:00
e21aa0faa9
correct repo 2024-09-05 04:39:58 -05:00
acaf0c47d3
add tautulli 2024-09-05 04:39:41 -05:00
f0d975a1ac
add unpackerr 2024-09-05 04:36:42 -05:00
8346a566bd
add prowlarr 2024-09-05 03:55:32 -05:00
b81d0113ad
add atuin 2024-09-05 03:50:47 -05:00
749fd68860
re-encrypt. 2024-09-05 03:48:31 -05:00
ae0be25860
update repo 2024-09-05 03:44:53 -05:00
db03b996a9
adding qb 2024-09-05 03:41:16 -05:00
02a039b199
add sabnzbd 2024-09-05 03:35:08 -05:00
f846672628
deploy sonarr 2024-09-05 03:29:29 -05:00
c0d67a970c
test 2024-09-05 03:27:26 -05:00
3453328f87
add sonarr 2024-09-05 03:21:54 -05:00
83d2db71b3
more nodes! 2024-09-05 03:18:26 -05:00
2871b96407
wrong snapshotclass, removing to fallback to default 2024-09-05 02:35:57 -05:00
431640d7fe
update local storage class 2024-09-05 02:33:06 -05:00
0d825891da
add radarr 2024-09-05 01:57:44 -05:00
e3c635fdc5
update clustername 2024-09-05 01:41:34 -05:00
3251d8240b
adding database workloads
crunchy postgres
dragonfly
emqx
2024-09-05 01:33:23 -05:00
d439c2084c
remove zfs engine 2024-09-05 01:21:18 -05:00
2651c3efff
add openebs localpv 2024-09-05 01:11:07 -05:00
2f17e3f3bd
add extra mounts to all workers for openebs local 2024-09-05 01:03:54 -05:00
fb9ca1f9b0
add kubelet mounts
for openebs local -- this provides low latency PV storage for databases
and the like.
2024-09-05 00:36:11 -05:00
ad7fc04320
not working quite right, need to rewrite talos tasks. 2024-09-05 00:00:06 -05:00
0c6deac2c6 Merge pull request 'moving to the shire' (#580) from theshire into main
Reviewed-on: jahanson/homelab#580
2024-09-04 13:40:55 -05:00
d9ff973a55
moving to the shire
Expanding from 1 node to 6 + 2 VMs with GPUs
2024-09-04 13:35:14 -05:00
a8edf29bcb Update ghcr.io/onedr0p/radarr-develop Docker tag to v5.10.1.9125 2024-09-02 15:32:03 +00:00
cfa37e2abd Update ghcr.io/jorenn92/maintainerr Docker tag to v2.1.0 2024-09-02 12:02:05 +00:00
60cbc8a66c Update ghcr.io/onedr0p/prowlarr-develop Docker tag to v1.23.1.4708 2024-09-02 07:32:28 +00:00
2e2da1768f
add coredns 2024-09-02 00:15:46 -05:00
bce0eb418b
update mount 2024-09-01 22:36:01 -05:00
d192d02fbb
remove socketlb 2024-09-01 21:21:07 -05:00
ff56d9dc0d
ntp --> cloudflare 2024-09-01 21:16:41 -05:00
20671fc186
move off of synology 2024-09-01 21:16:07 -05:00
898483ce18
update cilium values 2024-09-01 21:03:17 -05:00
09f310115f Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-34 2024-09-02 00:02:00 +00:00
d04e641038 Update ghcr.io/onedr0p/sabnzbd:4.3.3 Docker digest to 4ad7373 2024-09-01 23:31:57 +00:00
4805fffc38 Update ghcr.io/onedr0p/qbittorrent:4.6.6 Docker digest to 2fd0eba 2024-09-01 23:14:06 +00:00
2115c02c35 Update ghcr.io/onedr0p/prowlarr-develop:1.21.2.4649 Docker digest to 4482eda 2024-09-01 23:13:29 +00:00
07aac639bd Update ghcr.io/onedr0p/sabnzbd:4.3.3 Docker digest to d227dba 2024-08-31 00:09:53 +00:00
c71fefa958 Update ghcr.io/onedr0p/qbittorrent:4.6.6 Docker digest to 78bb1da 2024-08-31 00:04:39 +00:00
534cb0b7f3 Update ghcr.io/onedr0p/prowlarr-develop:1.21.2.4649 Docker digest to 122ee7c 2024-08-31 00:04:02 +00:00
608e4242f3 Merge pull request 'Update Helm release victoria-metrics-k8s-stack to v0.25.8' (#560) from renovate/victoria-metrics-k8s-stack-0.x into main
Reviewed-on: jahanson/homelab#560
2024-08-30 19:01:53 -05:00
904f677095 Merge pull request 'Update ghcr.io/twin/gatus Docker tag to v5.12.1' (#562) from renovate/ghcr.io-twin-gatus-5.x into main
Reviewed-on: jahanson/homelab#562
2024-08-30 18:58:11 -05:00
1a4b97d104 Merge pull request 'Update Helm release external-secrets to v0.10.2' (#566) from renovate/external-secrets-0.x into main
Reviewed-on: jahanson/homelab#566
2024-08-30 18:57:46 -05:00
2bc82c3376 Merge pull request 'Update ghcr.io/kiwigrid/k8s-sidecar Docker tag to v1.27.6' (#572) from renovate/ghcr.io-kiwigrid-k8s-sidecar-1.x into main
Reviewed-on: jahanson/homelab#572
2024-08-30 18:56:33 -05:00
0639fe6f7a Merge pull request 'Update public.ecr.aws/emqx/emqx Docker tag to v5.8.0' (#567) from renovate/public.ecr.aws-emqx-emqx-5.x into main
Reviewed-on: jahanson/homelab#567
2024-08-30 18:56:21 -05:00
1d7a19573c Merge pull request 'Update Helm release app-template to v3.4.0' (#564) from renovate/app-template-3.x into main
Reviewed-on: jahanson/homelab#564
2024-08-30 18:50:28 -05:00
b6f54c5b9a Merge pull request 'Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-32' (#568) from renovate/ghcr.io-cross-seed-cross-seed-6.x into main
Reviewed-on: jahanson/homelab#568
2024-08-30 18:40:42 -05:00
6c29889443 Merge pull request 'Update docker.io/excalidraw/excalidraw:latest Docker digest to 2e35b32' (#569) from renovate/docker.io-excalidraw-excalidraw-latest into main
Reviewed-on: jahanson/homelab#569
2024-08-30 18:40:12 -05:00
a4c2af9903 Merge pull request 'Update docker Docker tag to v27.2.0' (#570) from renovate/docker-27.x into main
Reviewed-on: jahanson/homelab#570
2024-08-30 18:40:04 -05:00
6878fd39ac Merge pull request 'Update ghcr.io/buroa/qbtools Docker tag to v0.16.10' (#571) from renovate/ghcr.io-buroa-qbtools-0.x into main
Reviewed-on: jahanson/homelab#571
2024-08-30 18:39:53 -05:00
f4b6dc2a8e
all the way up 2024-08-30 17:12:04 -05:00
959bb7cee7 Update ghcr.io/buroa/qbtools Docker tag to v0.16.10 2024-08-30 20:01:52 +00:00
193fbec4e2 Update Helm release victoria-metrics-k8s-stack to v0.25.8 2024-08-30 14:01:53 +00:00
e6705d15a7 Update ghcr.io/kiwigrid/k8s-sidecar Docker tag to v1.27.6 2024-08-30 05:32:07 +00:00
78029ea910
no delete for now 2024-08-29 06:29:05 -05:00
88e98fb833
bond needs static 2024-08-29 06:29:04 -05:00
69a47db688 Update ghcr.io/twin/gatus Docker tag to v5.12.1 2024-08-29 01:01:53 +00:00
64b8ce71d8 Update docker Docker tag to v27.2.0 2024-08-28 23:31:56 +00:00
eb02ba4634 Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-32 2024-08-28 23:31:52 +00:00
53569920bb Update docker.io/excalidraw/excalidraw:latest Docker digest to 2e35b32 2024-08-28 23:01:49 +00:00
0e727d7b1b Update Helm release external-secrets to v0.10.2 2024-08-28 16:31:51 +00:00
b7dc417177 Update public.ecr.aws/emqx/emqx Docker tag to v5.8.0 2024-08-28 14:01:52 +00:00
6ef4308e32 Merge pull request 'Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-30' (#559) from renovate/ghcr.io-cross-seed-cross-seed-6.x into main
Reviewed-on: jahanson/homelab#559
2024-08-28 02:03:53 -05:00
602c11dfa7 Merge pull request 'Update ghcr.io/onedr0p/sonarr-develop Docker tag to v4.0.9.2278' (#563) from renovate/ghcr.io-onedr0p-sonarr-develop-4.x into main
Reviewed-on: jahanson/homelab#563
2024-08-28 02:03:34 -05:00
ac137f34e7 Merge pull request 'Update docker.io/ollama/ollama Docker tag to v0.3.8' (#561) from renovate/ollama into main
Reviewed-on: jahanson/homelab#561
2024-08-28 02:03:20 -05:00
22057aae93 Update docker.io/ollama/ollama Docker tag to v0.3.8 2024-08-28 01:01:29 +00:00
51bb105937 Merge pull request 'Update ghcr.io/open-webui/open-webui Docker tag to v0.3.16' (#565) from renovate/ghcr.io-open-webui-open-webui-0.x into main
Reviewed-on: jahanson/homelab#565
2024-08-27 12:05:54 -05:00
62ad3fbc4b Update ghcr.io/open-webui/open-webui Docker tag to v0.3.16 2024-08-27 17:02:39 +00:00
997233bec6 Update Helm release app-template to v3.4.0 2024-08-27 12:31:38 +00:00
5d64fcc03c Update ghcr.io/onedr0p/sonarr-develop Docker tag to v4.0.9.2278 2024-08-27 05:31:33 +00:00
053a3fbff0 Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-30 2024-08-26 17:01:28 +00:00
92d5d2976e Merge pull request 'Update ghcr.io/onedr0p/sonarr-develop Docker tag to v4.0.9.2257' (#558) from renovate/ghcr.io-onedr0p-sonarr-develop-4.x into main
Reviewed-on: jahanson/homelab#558
2024-08-25 23:18:30 -05:00
68119b054c Merge pull request 'Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-29' (#557) from renovate/ghcr.io-cross-seed-cross-seed-6.x into main
Reviewed-on: jahanson/homelab#557
2024-08-25 23:18:06 -05:00
eb5d4f104c Merge pull request 'Update ghcr.io/autobrr/autobrr Docker tag to v1.45.0' (#556) from renovate/ghcr.io-autobrr-autobrr-1.x into main
Reviewed-on: jahanson/homelab#556
2024-08-25 23:16:59 -05:00
cdc5581d70 Update ghcr.io/onedr0p/sonarr-develop Docker tag to v4.0.9.2257 2024-08-26 01:31:38 +00:00
eeea43e3a2 Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-29 2024-08-25 22:01:34 +00:00
e2c786ee10 Update ghcr.io/autobrr/autobrr Docker tag to v1.45.0 2024-08-25 21:01:36 +00:00
4d55562e4d
update to semver 2024-08-25 15:05:30 -05:00
519169e5a5 Merge pull request 'Update ghcr.io/recyclarr/recyclarr:7.2.2 Docker digest to 149eacf' (#555) from renovate/ghcr.io-recyclarr-recyclarr-7.2.2 into main
Reviewed-on: jahanson/homelab#555
2024-08-25 11:03:04 -05:00
d1f5525420 Update ghcr.io/recyclarr/recyclarr:7.2.2 Docker digest to 149eacf 2024-08-25 16:02:34 +00:00
7e9be2cfc7 Merge pull request 'Update ghcr.io/recyclarr/recyclarr Docker tag to v7.2.2' (#554) from renovate/ghcr.io-recyclarr-recyclarr-7.x into main
Reviewed-on: jahanson/homelab#554
2024-08-25 09:33:55 -05:00
40204291bd Update ghcr.io/recyclarr/recyclarr Docker tag to v7.2.2 2024-08-25 14:31:36 +00:00
fb3d5c55f4 Merge pull request 'Update ghcr.io/onedr0p/radarr-develop Docker tag to v5.10.0.9090' (#553) from renovate/ghcr.io-onedr0p-radarr-develop-5.x into main
Reviewed-on: jahanson/homelab#553
2024-08-25 07:19:57 -05:00
50ec476372 Update ghcr.io/onedr0p/radarr-develop Docker tag to v5.10.0.9090 2024-08-25 11:09:35 +00:00
b905ed5d0b
undeploy for now 2024-08-24 21:37:10 -05:00
f6581a53e5
dep until it's placed on another service. 2024-08-24 12:16:19 -05:00
a89f1de395 Merge pull request 'Update ghcr.io/autobrr/omegabrr Docker tag to v1.14.0' (#552) from renovate/ghcr.io-autobrr-omegabrr-1.x into main
Reviewed-on: jahanson/homelab#552
2024-08-24 10:18:47 -05:00
4f3e5da071 Update ghcr.io/autobrr/omegabrr Docker tag to v1.14.0 2024-08-24 13:01:12 +00:00
6d6659a6fb Merge pull request 'Update Helm release victoria-metrics-k8s-stack to v0.25.3' (#548) from renovate/victoria-metrics-k8s-stack-0.x into main
Reviewed-on: jahanson/homelab#548
2024-08-24 00:37:12 -05:00
1cbbe84cd0 Merge pull request 'Update code.forgejo.org/forgejo/runner Docker tag to v3.5.1' (#551) from renovate/code.forgejo.org-forgejo-runner-3.x into main
Reviewed-on: jahanson/homelab#551
2024-08-23 16:31:42 -05:00
21210cab43 Update code.forgejo.org/forgejo/runner Docker tag to v3.5.1 2024-08-23 21:31:08 +00:00
2d3c9f4652 Merge pull request 'Update docker.io/excalidraw/excalidraw:latest Docker digest to 4ac2a8c' (#550) from renovate/docker.io-excalidraw-excalidraw-latest into main
Reviewed-on: jahanson/homelab#550
2024-08-23 14:35:42 -05:00
a151d3d658 Update docker.io/excalidraw/excalidraw:latest Docker digest to 4ac2a8c 2024-08-23 19:31:12 +00:00
ee4ceb505d
update to minimum of 30s 2024-08-23 10:34:21 -05:00
29c6ebf86f Merge pull request 'Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-28' (#549) from renovate/ghcr.io-cross-seed-cross-seed-6.x into main
Reviewed-on: jahanson/homelab#549
2024-08-23 10:32:03 -05:00
251ce90154 Update ghcr.io/cross-seed/cross-seed Docker tag to v6.0.0-28 2024-08-23 15:31:08 +00:00
afa49ce87d
loose versioning for cross-seed 2024-08-23 10:02:09 -05:00
0c6f9c2136
update cross-seed, remove old code 2024-08-23 09:37:21 -05:00
072163eaa7
Move protonvpn variant to archive.
Still works great, just don't need it anymore.
2024-08-23 09:17:45 -05:00
926583acc4
config rearrange 2024-08-23 09:04:06 -05:00
9bfbc9ceab
well, lets give this a go 2024-08-23 08:25:43 -05:00
aa0af4aade Update Helm release victoria-metrics-k8s-stack to v0.25.3 2024-08-23 13:01:12 +00:00
ea08873634 Merge pull request 'Update docker.io/cloudflare/cloudflared Docker tag to v2024.8.3' (#546) from renovate/docker.io-cloudflare-cloudflared-2024.x into main
Reviewed-on: jahanson/homelab#546
2024-08-22 09:32:21 -05:00
0ef05b912c Update docker.io/cloudflare/cloudflared Docker tag to v2024.8.3 2024-08-22 14:31:04 +00:00
4b92888e41 Merge pull request 'Update docker.dragonflydb.io/dragonflydb/operator Docker tag to v1.1.7' (#545) from renovate/docker.dragonflydb.io-dragonflydb-operator-1.x into main
Reviewed-on: jahanson/homelab#545
2024-08-22 08:00:09 -05:00
37499fa72b Update docker.dragonflydb.io/dragonflydb/operator Docker tag to v1.1.7 2024-08-22 12:01:09 +00:00
16e61a4fb4 Merge pull request 'Update pgo Docker tag to v5.6.1' (#542) from renovate/pgo-5.x into main
Reviewed-on: jahanson/homelab#542
2024-08-22 05:33:55 -05:00
4f8c537458 Merge pull request 'Update prometheus-node-exporter Docker tag to v4.39.0' (#544) from renovate/prometheus-node-exporter-4.x into main
Reviewed-on: jahanson/homelab#544
2024-08-22 05:32:46 -05:00
2bb8531dbd Update prometheus-node-exporter Docker tag to v4.39.0 2024-08-22 01:32:09 +00:00
6d17eef027 Update pgo Docker tag to v5.6.1 2024-08-22 00:03:08 +00:00
853a0762a4 Merge pull request 'Update ghcr.io/qdm12/gluetun:latest Docker digest to a7f494e' (#543) from renovate/ghcr.io-qdm12-gluetun-latest into main
Reviewed-on: jahanson/homelab#543
2024-08-21 19:01:41 -05:00
ed7eae27a7 Update ghcr.io/qdm12/gluetun:latest Docker digest to a7f494e 2024-08-22 00:01:12 +00:00
4d5399bf47 Merge pull request 'revert pgo 5.6.1 --> 5.6.0' (#539) from revert-pgo-5.6.1 into main
Reviewed-on: jahanson/homelab#539
2024-08-21 16:26:46 -05:00
33b3aaef29 revert bf1e6aab05
revert Merge pull request 'Update pgo Docker tag to v5.6.1' (#537) from renovate/pgo-5.x into main

Reviewed-on: jahanson/homelab#537
2024-08-21 16:25:49 -05:00
408 changed files with 2128 additions and 12014 deletions

View file

@ -1,9 +0,0 @@
---
skip_list:
- yaml[line-length]
- var-naming
warn_list:
- command-instead-of-shell
- deprecated-command-syntax
- experimental
- no-changed-when

View file

@ -1,52 +0,0 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3"
vars:
PYTHON_BIN: python3
env:
PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH"
VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv"
ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy"
ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles"
ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops"
tasks:
deps:
desc: Set up Ansible dependencies for the environment
cmds:
- task: .venv
run:
desc: Run an Ansible playbook for configuring a cluster
summary: |
Args:
cluster: Cluster to run command against (required)
playbook: Playbook to run (required)
prompt: Run Ansible playbook '{{.playbook}}' against the '{{.cluster}}' cluster... continue?
deps: ["deps"]
cmd: |
.venv/bin/ansible-playbook \
--inventory {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml \
{{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}
preconditions:
- { msg: "Argument (cluster) is required", sh: "test -n {{.cluster}}" }
- { msg: "Argument (playbook) is required", sh: "test -n {{.playbook}}" }
- { msg: "Venv not found", sh: "test -d {{.ROOT_DIR}}/.venv" }
- { msg: "Inventory not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/inventory/hosts.yaml" }
- { msg: "Playbook not found", sh: "test -f {{.ANSIBLE_DIR}}/{{.cluster}}/playbooks/{{.playbook}}.yaml" }
.venv:
internal: true
cmds:
- true && {{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv
- .venv/bin/python3 -m pip install --upgrade pip setuptools wheel
- .venv/bin/python3 -m pip install --upgrade --requirement {{.ANSIBLE_DIR}}/requirements.txt
- .venv/bin/ansible-galaxy install --role-file "{{.ANSIBLE_DIR}}/requirements.yaml" --force
sources:
- "{{.ANSIBLE_DIR}}/requirements.txt"
- "{{.ANSIBLE_DIR}}/requirements.yaml"
generates:
- "{{.ROOT_DIR}}/.venv/pyvenv.cfg"

View file

@ -1,104 +0,0 @@
---
version: "3"
x-task-vars: &task-vars
node: "{{.node}}"
ceph_disk: "{{.ceph_disk}}"
ts: "{{.ts}}"
jobName: "{{.jobName}}"
vars:
waitForJobScript: "../_scripts/wait-for-k8s-job.sh"
ts: '{{now | date "150405"}}'
tasks:
wipe-node-aule:
desc: Trigger a wipe of Rook-Ceph data on node "aule"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460833"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: aule
wipe-node-orome:
desc: Trigger a wipe of Rook-Ceph data on node "orome"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37645333"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: orome
wipe-node-eonwe:
desc: Trigger a wipe of Rook-Ceph data on node "eonwe"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460887"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: eonwe
wipe-node-arlen:
desc: Trigger a wipe of Rook-Ceph data on node "arlen"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460897"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: arlen
wipe-disk:
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-disk node=aule ceph_disk="/dev/nvme0n1")
silent: true
internal: true
cmds:
- envsubst < <(cat {{.wipeRookDiskJobTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} {{.wipeCephDiskJobName}} default
- kubectl -n default wait job/{{.wipeCephDiskJobName}} --for condition=complete --timeout=1m
- kubectl -n default logs job/{{.wipeCephDiskJobName}} --container list
- kubectl -n default delete job {{.wipeCephDiskJobName}}
vars:
node: '{{ or .node (fail "`node` is required") }}'
ceph_disk: '{{ or .ceph_disk (fail "`ceph_disk` is required") }}'
jobName: 'wipe-disk-{{- .node -}}-{{- .ceph_disk | replace "/" "-" -}}-{{- .ts -}}'
wipeRookDiskJobTemplate: "WipeDiskJob.tmpl.yaml"
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: test -f {{.wipeRookDiskJobTemplate}}
wipe-data:
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-data node=aule)
silent: true
internal: true
cmds:
- envsubst < <(cat {{.wipeRookDataJobTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} {{.wipeRookDataJobName}} default
- kubectl -n default wait job/{{.wipeRookDataJobName}} --for condition=complete --timeout=1m
- kubectl -n default logs job/{{.wipeRookDataJobName}} --container list
- kubectl -n default delete job {{.wipeRookDataJobName}}
vars:
node: '{{ or .node (fail "`node` is required") }}'
jobName: "wipe-rook-data-{{- .node -}}-{{- .ts -}}"
wipeRookDataJobTemplate: "WipeRookDataJob.tmpl.yaml"
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: test -f {{.wipeRookDataJobTemplate}}

View file

@ -1,26 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "${jobName}"
namespace: "default"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: Never
nodeName: ${node}
containers:
- name: disk-wipe
image: docker.io/library/alpine:3.20.0
securityContext:
privileged: true
resources: {}
command: ["/bin/sh", "-c"]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all ${ceph_disk};
blkdiscard ${ceph_disk};
dd if=/dev/zero bs=1M count=10000 oflag=direct of=${ceph_disk};
partprobe ${ceph_disk};

View file

@ -1,29 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "${jobName}"
namespace: "default"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: Never
nodeName: ${node}
containers:
- name: disk-wipe
image: docker.io/library/alpine:3.20.0
securityContext:
privileged: true
resources: {}
command: ["/bin/sh", "-c"]
args:
- rm -rf /mnt/host_var/lib/rook
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var

View file

@ -1,19 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: disk-wipe
image: docker.io/library/alpine:3.20.0
securityContext:
privileged: true
resources: {}
command: ["/bin/sh", "-c"]
args:
- apk add --no-cache sgdisk util-linux parted e2fsprogs;
sgdisk --zap-all /dev/nvme1n1;
blkdiscard /dev/nvme1n1;
dd if=/dev/zero bs=1M count=10000 oflag=direct of=/dev/nvme1n1;
sgdisk /dev/nvme1n1
partprobe /dev/nvme1n1;

View file

@ -1,116 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: jellyfin
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.1.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
jellyfin:
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
containers:
app:
image:
repository: jellyfin/jellyfin
tag: 10.8.13
env:
NVIDIA_VISIBLE_DEVICES: "all"
NVIDIA_DRIVER_CAPABILITIES: "compute,video,utility"
DOTNET_SYSTEM_IO_DISABLEFILELOCKING: "true"
JELLYFIN_FFmpeg__probesize: 50000000
JELLYFIN_FFmpeg__analyzeduration: 50000000
JELLYFIN_PublishedServerUrl: jelly.hsn.dev
TZ: America/Chicago
probes:
liveness: &probes
enabled: true
custom: true
spec:
httpGet:
path: /health
port: &port 8096
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
failureThreshold: 3
readiness: *probes
startup:
enabled: false
resources:
requests:
nvidia.com/gpu: 1 # requesting 1 GPU
cpu: 100m
memory: 512Mi
limits:
nvidia.com/gpu: 1
memory: 4Gi
pod:
runtimeClassName: nvidia
enableServiceLinks: false
nodeSelector:
nvidia.com/gpu.present: "true"
securityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups: [44, 105, 10000]
service:
app:
controller: jellyfin
ports:
http:
port: *port
ingress:
app:
enabled: true
className: external-nginx
annotations:
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
external-dns.alpha.kubernetes.io/target: external.hsn.dev
hosts:
- host: &host "jelly.hsn.dev"
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts:
- *host
persistence:
config:
existingClaim: jellyfin
enabled: true
transcode:
type: emptyDir
globalMounts:
- path: /transcode
media:
enabled: true
type: nfs
server: 10.1.1.12
path: /mnt/users/Media
globalMounts:
- path: /media

View file

@ -1,8 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml
- ../../../../templates/volsync

View file

@ -1,23 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app jellyfin
namespace: flux-system
spec:
dependsOn:
- name: external-secrets-stores
path: ./kubernetes/apps/default/jellyfin/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m
postBuild:
substitute:
APP: *app
VOLSYNC_CAPACITY: 10Gi

View file

@ -1,26 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: home-assistant
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: home-assistant-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
HASS_ELEVATION: "{{ .hass_elevation }}"
HASS_LATITUDE: "{{ .hass_latitude }}"
HASS_LONGITUDE: "{{ .hass_longitude }}"
dataFrom:
- extract:
key: home-assistant
rewrite:
- regexp:
source: "(.*)"
target: "hass_$1"

View file

@ -1,90 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: home-assistant
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.1.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: rollback
retries: 3
values:
controllers:
home-assistant:
annotations:
reloader.stakater.com/auto: "true"
pod:
annotations:
k8s.v1.cni.cncf.io/networks: |
[{
"name":"multus-iot",
"namespace": "kube-system",
"ips": ["10.1.3.151/24"]
}]
securityContext:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
containers:
app:
image:
repository: ghcr.io/home-assistant/home-assistant
tag: 2024.5.5
env:
TZ: America/Chicago
HASS_HTTP_TRUSTED_PROXY_1: 10.244.0.0/16
envFrom:
- secretRef:
name: home-assistant-secret
resources:
requests:
cpu: 10m
limits:
memory: 1Gi
service:
app:
controller: home-assistant
ports:
http:
port: 8123
ingress:
app:
className: internal-nginx
hosts:
- host: &host hass.jahanson.tech
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts: [*host]
persistence:
config:
existingClaim: home-assistant
logs:
type: emptyDir
globalMounts:
- path: /config/logs
tts:
type: emptyDir
globalMounts:
- path: /config/tts
tmp:
type: emptyDir

View file

@ -1,8 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
- ../../../../templates/volsync

View file

@ -1,29 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app home-assistant
namespace: flux-system
spec:
targetNamespace: home-automation
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: external-secrets-stores
- name: openebs-system
- name: volsync
path: ./kubernetes/apps/home-automation/home-assistant/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m
postBuild:
substitute:
APP: *app
VOLSYNC_CAPACITY: 5Gi

View file

@ -1,9 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./mosquitto/ks.yaml

View file

@ -1,107 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: &app matter-server
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 3.2.1
interval: 15m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 3
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: rollback
retries: 3
values:
controllers:
matter-server:
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
pod:
annotations:
k8s.v1.cni.cncf.io/networks: |
[{
"name":"multus-iot",
"namespace": "kube-system",
"ips": ["10.1.3.152/24"]
}]
securityContext:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
containers:
app:
image:
repository: ghcr.io/home-assistant-libs/python-matter-server
tag: 6.0.1
pullPolicy: IfNotPresent
env:
TZ: "America/Chicago"
MATTER_SERVER__INSTANCE_NAME: Matter-Server
MATTER_SERVER__PORT: &port 5580
MATTER_SERVER__APPLICATION_URL: &host matter.jahanson.tech
MATTER_SERVER__LOG_LEVEL: info
probes:
liveness:
enabled: true
readiness:
enabled: true
startup:
enabled: true
spec:
failureThreshold: 30
periodSeconds: 5
resources:
requests:
memory: "100M"
limits:
memory: "500M"
service:
app:
controller: *app
type: LoadBalancer
annotations:
io.cilium/lb-ipam-ips: "10.1.1.37"
ports:
api:
enabled: true
primary: true
protocol: TCP
port: *port
externalTrafficPolicy: Cluster
persistence:
config:
enabled: true
existingClaim: matter-server
advancedMounts:
matter-server:
app:
- path: "/data"
ingress:
app:
className: internal-nginx
hosts:
- host: *host
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts: [*host]

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ../../../../templates/volsync

View file

@ -1,28 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app matter-server
namespace: flux-system
spec:
targetNamespace: home-automation
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: openebs-system
- name: volsync
path: ./kubernetes/apps/home-automation/matter-server/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m
postBuild:
substitute:
APP: *app
VOLSYNC_CAPACITY: 1Gi

View file

@ -1,9 +0,0 @@
per_listener_settings false
listener 1883
allow_anonymous false
persistence true
persistence_location /data
autosave_interval 1800
connection_messages false
autosave_interval 60
password_file /mosquitto/external_config/mosquitto_pwd

View file

@ -1,27 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: mosquitto
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: mosquitto-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
mosquitto_pwd: |
{{ .mosquitto_username }}:{{ .mosquitto_password }}
{{ .mosquitto_zwave_username }}:{{ .mosquitto_zwave_password }}
{{ .mosquitto_home_assistant_username }}:{{ .mosquitto_home_assistant_password }}
dataFrom:
- extract:
key: mosquitto
rewrite:
- regexp:
source: "(.*)"
target: "mosquitto_$1"

View file

@ -1,105 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app mosquitto
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.2.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controllers:
mosquitto:
annotations:
reloader.stakater.com/auto: "true"
pod:
securityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
initContainers:
init-config:
image:
repository: public.ecr.aws/docker/library/eclipse-mosquitto
tag: 2.0.18
command:
- "/bin/sh"
- "-c"
args:
- cp /tmp/secret/* /mosquitto/external_config/;
mosquitto_passwd -U /mosquitto/external_config/mosquitto_pwd;
chmod 0600 /mosquitto/external_config/mosquitto_pwd;
containers:
app:
image:
repository: public.ecr.aws/docker/library/eclipse-mosquitto
tag: 2.0.18
probes:
liveness:
enabled: true
readiness:
enabled: true
startup:
enabled: true
spec:
failureThreshold: 30
periodSeconds: 5
resources:
requests:
cpu: 5m
memory: 10M
limits:
memory: 10M
service:
app:
controller: mosquitto
type: LoadBalancer
annotations:
external-dns.alpha.kubernetes.io/hostname: "mqtt.jahanson.tech"
io.cilium/lb-ipam-ips: "10.1.1.36"
externalTrafficPolicy: Local
ports:
mqtt:
enabled: true
port: 1883
persistence:
data:
existingClaim: *app
advancedMounts:
mosquitto:
app:
- path: /data
mosquitto-configfile:
type: configMap
name: mosquitto-configmap
advancedMounts:
mosquitto:
app:
- path: /mosquitto/config/mosquitto.conf
subPath: mosquitto.conf
mosquitto-secret:
type: secret
name: mosquitto-secret
advancedMounts:
mosquitto:
init-config:
- path: /tmp/secret
mosquitto-externalconfig:
type: emptyDir
globalMounts:
- path: /mosquitto/external_config

View file

@ -1,14 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ./externalsecret.yaml
- ../../../../templates/volsync
configMapGenerator:
- name: mosquitto-configmap
files:
- config/mosquitto.conf
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,28 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/kustomization-kustomize-v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &appname mosquitto
namespace: flux-system
spec:
targetNamespace: home-automation
commonMetadata:
labels:
app.kubernetes.io/name: *appname
interval: 10m
path: "./kubernetes/apps/home-automation/mosquitto/app"
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: true
dependsOn:
- name: openebs
- name: volsync
- name: external-secrets-stores
postBuild:
substitute:
APP: *appname
VOLSYNC_CLAIM: mosquitto-data
VOLSYNC_CAPACITY: 512Mi

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: home-automation
labels:
kustomize.toolkit.fluxcd.io/prune: disabled
volsync.backube/privileged-movers: "true"

View file

@ -1,588 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
creationTimestamp: null
name: ciliumbgppeeringpolicies.cilium.io
spec:
group: cilium.io
names:
categories:
- cilium
- ciliumbgp
kind: CiliumBGPPeeringPolicy
listKind: CiliumBGPPeeringPolicyList
plural: ciliumbgppeeringpolicies
shortNames:
- bgpp
singular: ciliumbgppeeringpolicy
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v2alpha1
schema:
openAPIV3Schema:
description: CiliumBGPPeeringPolicy is a Kubernetes third-party resource for
instructing Cilium's BGP control plane to create virtual BGP routers.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec is a human readable description of a BGP peering policy
properties:
nodeSelector:
description: "NodeSelector selects a group of nodes where this BGP
Peering Policy applies. \n If empty / nil this policy applies to
all nodes."
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that
contains values, a key, and an operator that relates the key
and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to
a set of values. Valid operators are In, NotIn, Exists
and DoesNotExist.
enum:
- In
- NotIn
- Exists
- DoesNotExist
type: string
values:
description: values is an array of string values. If the
operator is In or NotIn, the values array must be non-empty.
If the operator is Exists or DoesNotExist, the values
array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
description: MatchLabelsValue represents the value from the
MatchLabels {key,value} pair.
maxLength: 63
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
type: string
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator
is "In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
virtualRouters:
description: A list of CiliumBGPVirtualRouter(s) which instructs the
BGP control plane how to instantiate virtual BGP routers.
items:
description: CiliumBGPVirtualRouter defines a discrete BGP virtual
router configuration.
properties:
exportPodCIDR:
default: false
description: ExportPodCIDR determines whether to export the
Node's private CIDR block to the configured neighbors.
type: boolean
localASN:
description: LocalASN is the ASN of this virtual router. Supports
extended 32bit ASNs
format: int64
maximum: 4294967295
minimum: 0
type: integer
neighbors:
description: Neighbors is a list of neighboring BGP peers for
this virtual router
items:
description: CiliumBGPNeighbor is a neighboring peer for use
in a CiliumBGPVirtualRouter configuration.
properties:
advertisedPathAttributes:
description: AdvertisedPathAttributes can be used to apply
additional path attributes to selected routes when advertising
them to the peer. If empty / nil, no additional path
attributes are advertised.
items:
description: CiliumBGPPathAttributes can be used to
apply additional path attributes to matched routes
when advertising them to a BGP peer.
properties:
communities:
description: Communities defines a set of community
values advertised in the supported BGP Communities
path attributes. If nil / not set, no BGP Communities
path attribute will be advertised.
properties:
large:
description: Large holds a list of the BGP Large
Communities Attribute (RFC 8092) values.
items:
description: BGPLargeCommunity type represents
a value of the BGP Large Communities Attribute
(RFC 8092), as three 4-byte decimal numbers
separated by colons.
pattern: ^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$
type: string
type: array
standard:
description: Standard holds a list of "standard"
32-bit BGP Communities Attribute (RFC 1997)
values defined as numeric values.
items:
description: BGPStandardCommunity type represents
a value of the "standard" 32-bit BGP Communities
Attribute (RFC 1997) as a 4-byte decimal
number or two 2-byte decimal numbers separated
by a colon (<0-65535>:<0-65535>). For example,
no-export community value is 65553:65281.
pattern: ^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
type: string
type: array
wellKnown:
description: WellKnown holds a list "standard"
32-bit BGP Communities Attribute (RFC 1997)
values defined as well-known string aliases
to their numeric values.
items:
description: "BGPWellKnownCommunity type represents
a value of the \"standard\" 32-bit BGP Communities
Attribute (RFC 1997) as a well-known string
alias to its numeric value. Allowed values
and their mapping to the numeric values:
\n internet = 0x00000000
(0:0) planned-shut = 0xffff0000
(65535:0) accept-own = 0xffff0001
(65535:1) route-filter-translated-v4 = 0xffff0002
(65535:2) route-filter-v4 = 0xffff0003
(65535:3) route-filter-translated-v6 = 0xffff0004
(65535:4) route-filter-v6 = 0xffff0005
(65535:5) llgr-stale = 0xffff0006
(65535:6) no-llgr = 0xffff0007
(65535:7) blackhole = 0xffff029a
(65535:666) no-export =
0xffffff01\t(65535:65281) no-advertise =
0xffffff02 (65535:65282) no-export-subconfed
\ = 0xffffff03 (65535:65283) no-peer
\ = 0xffffff04 (65535:65284)"
enum:
- internet
- planned-shut
- accept-own
- route-filter-translated-v4
- route-filter-v4
- route-filter-translated-v6
- route-filter-v6
- llgr-stale
- no-llgr
- blackhole
- no-export
- no-advertise
- no-export-subconfed
- no-peer
type: string
type: array
type: object
localPreference:
description: LocalPreference defines the preference
value advertised in the BGP Local Preference path
attribute. As Local Preference is only valid for
iBGP peers, this value will be ignored for eBGP
peers (no Local Preference path attribute will
be advertised). If nil / not set, the default
Local Preference of 100 will be advertised in
the Local Preference path attribute for iBGP peers.
format: int64
maximum: 4294967295
minimum: 0
type: integer
selector:
description: Selector selects a group of objects
of the SelectorType resulting into routes that
will be announced with the configured Attributes.
If nil / not set, all objects of the SelectorType
are selected.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement
is a selector that contains values, a key,
and an operator that relates the key and
values.
properties:
key:
description: key is the label key that
the selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and
DoesNotExist.
enum:
- In
- NotIn
- Exists
- DoesNotExist
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty.
If the operator is Exists or DoesNotExist,
the values array must be empty. This
array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
description: MatchLabelsValue represents the
value from the MatchLabels {key,value} pair.
maxLength: 63
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is
"In", and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
selectorType:
description: 'SelectorType defines the object type
on which the Selector applies: - For "PodCIDR"
the Selector matches k8s CiliumNode resources
(path attributes apply to routes announced for
PodCIDRs of selected CiliumNodes. Only affects
routes of cluster scope / Kubernetes IPAM CIDRs,
not Multi-Pool IPAM CIDRs. - For "CiliumLoadBalancerIPPool"
the Selector matches CiliumLoadBalancerIPPool
custom resources (path attributes apply to routes
announced for selected CiliumLoadBalancerIPPools).
- For "CiliumPodIPPool" the Selector matches CiliumPodIPPool
custom resources (path attributes apply to routes
announced for allocated CIDRs of selected CiliumPodIPPools).'
enum:
- PodCIDR
- CiliumLoadBalancerIPPool
- CiliumPodIPPool
type: string
required:
- selectorType
type: object
type: array
authSecretRef:
description: AuthSecretRef is the name of the secret to
use to fetch a TCP authentication password for this
peer.
type: string
connectRetryTimeSeconds:
default: 120
description: ConnectRetryTimeSeconds defines the initial
value for the BGP ConnectRetryTimer (RFC 4271, Section
8).
format: int32
maximum: 2147483647
minimum: 1
type: integer
eBGPMultihopTTL:
default: 1
description: EBGPMultihopTTL controls the multi-hop feature
for eBGP peers. Its value defines the Time To Live (TTL)
value used in BGP packets sent to the neighbor. The
value 1 implies that eBGP multi-hop feature is disabled
(only a single hop is allowed). This field is ignored
for iBGP peers.
format: int32
maximum: 255
minimum: 1
type: integer
families:
description: "Families, if provided, defines a set of
AFI/SAFIs the speaker will negotiate with it's peer.
\n If this slice is not provided the default families
of IPv6 and IPv4 will be provided."
items:
description: CiliumBGPFamily represents a AFI/SAFI address
family pair.
properties:
afi:
description: Afi is the Address Family Identifier
(AFI) of the family.
enum:
- ipv4
- ipv6
- l2vpn
- ls
- opaque
type: string
safi:
description: Safi is the Subsequent Address Family
Identifier (SAFI) of the family.
enum:
- unicast
- multicast
- mpls_label
- encapsulation
- vpls
- evpn
- ls
- sr_policy
- mup
- mpls_vpn
- mpls_vpn_multicast
- route_target_constraints
- flowspec_unicast
- flowspec_vpn
- key_value
type: string
required:
- afi
- safi
type: object
type: array
gracefulRestart:
description: GracefulRestart defines graceful restart
parameters which are negotiated with this neighbor.
If empty / nil, the graceful restart capability is disabled.
properties:
enabled:
description: Enabled flag, when set enables graceful
restart capability.
type: boolean
restartTimeSeconds:
default: 120
description: RestartTimeSeconds is the estimated time
it will take for the BGP session to be re-established
with peer after a restart. After this period, peer
will remove stale routes. This is described RFC
4724 section 4.2.
format: int32
maximum: 4095
minimum: 1
type: integer
required:
- enabled
type: object
holdTimeSeconds:
default: 90
description: HoldTimeSeconds defines the initial value
for the BGP HoldTimer (RFC 4271, Section 4.2). Updating
this value will cause a session reset.
format: int32
maximum: 65535
minimum: 3
type: integer
keepAliveTimeSeconds:
default: 30
description: KeepaliveTimeSeconds defines the initial
value for the BGP KeepaliveTimer (RFC 4271, Section
8). It can not be larger than HoldTimeSeconds. Updating
this value will cause a session reset.
format: int32
maximum: 65535
minimum: 1
type: integer
peerASN:
description: PeerASN is the ASN of the peer BGP router.
Supports extended 32bit ASNs
format: int64
maximum: 4294967295
minimum: 0
type: integer
peerAddress:
description: PeerAddress is the IP address of the peer.
This must be in CIDR notation and use a /32 to express
a single host.
format: cidr
type: string
peerPort:
default: 179
description: PeerPort is the TCP port of the peer. 1-65535
is the range of valid port numbers that can be specified.
If unset, defaults to 179.
format: int32
maximum: 65535
minimum: 1
type: integer
required:
- peerASN
- peerAddress
type: object
minItems: 1
type: array
podIPPoolSelector:
description: "PodIPPoolSelector selects CiliumPodIPPools based
on labels. The virtual router will announce allocated CIDRs
of matching CiliumPodIPPools. \n If empty / nil no CiliumPodIPPools
will be announced."
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
enum:
- In
- NotIn
- Exists
- DoesNotExist
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
description: MatchLabelsValue represents the value from
the MatchLabels {key,value} pair.
maxLength: 63
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
serviceSelector:
description: "ServiceSelector selects a group of load balancer
services which this virtual router will announce. The loadBalancerClass
for a service must be nil or specify a class supported by
Cilium, e.g. \"io.cilium/bgp-control-plane\". Refer to the
following document for additional details regarding load balancer
classes: \n https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
\n If empty / nil no services will be announced."
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
enum:
- In
- NotIn
- Exists
- DoesNotExist
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
description: MatchLabelsValue represents the value from
the MatchLabels {key,value} pair.
maxLength: 63
pattern: ^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
required:
- localASN
- neighbors
type: object
minItems: 1
type: array
required:
- virtualRouters
type: object
required:
- metadata
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View file

@ -1,36 +0,0 @@
---
apiVersion: cilium.io/v2alpha1
kind: CiliumBGPPeeringPolicy
# comments courtesy of JJGadgets
# MAKE SURE CRDs ARE INSTALLED IN CLUSTER VIA cilium-config ConfigMap OR Cilium HelmRelease/values.yaml (bgpControlPlane.enabled: true), BEFORE THIS IS APPLIED!
# "CiliumBGPPeeringPolicy" Custom Resource will replace the old MetalLB BGP's "bgp-config" ConfigMap
# "CiliumBGPPeeringPolicy" is used with `bgpControlPlane.enabled: true` which uses GoBGP, NOT the old `bgp.enabled: true` which uses MetalLB
metadata:
name: bgp-loadbalancer-ip-main
spec:
nodeSelector:
matchLabels:
kubernetes.io/os: "linux" # match all Linux nodes, change this to match more granularly if more than 1 PeeringPolicy is to be used throughout cluster
virtualRouters:
- localASN: 64512
exportPodCIDR: false
serviceSelector: # this replaces address-pools, instead of defining the range of IPs that can be assigned to LoadBalancer services, now services have to match below selectors for their LB IPs to be announced
matchExpressions:
- {
key: thisFakeSelector,
operator: NotIn,
values: ["will-match-and-announce-all-services"],
}
neighbors:
- peerAddress: "10.1.1.1/32" # unlike bgp-config ConfigMap, peerAddress needs to be in CIDR notation
peerASN: 64512
---
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumloadbalancerippool_v2alpha1.json
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: main-pool
spec:
cidrs:
- cidr: 10.45.0.1/24

View file

@ -1,78 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: cilium
namespace: kube-system
spec:
interval: 30m
chart:
spec:
chart: cilium
version: 1.15.3
sourceRef:
kind: HelmRepository
name: cilium
namespace: flux-system
maxHistory: 2
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
cluster:
name: homelab
id: 1
hubble:
relay:
enabled: true
ui:
enabled: true
metrics:
enableOpenMetrics: true
prometheus:
enabled: true
operator:
prometheus:
enabled: true
ipam:
mode: kubernetes
kubeProxyReplacement: true
k8sServiceHost: 127.0.0.1
k8sServicePort: 7445
rollOutCiliumPods: true
cgroup:
automount:
enabled: false
hostRoot: /sys/fs/cgroup
bgp:
enabled: false
announce:
loadbalancerIP: true
podCIDR: false
bgpControlPlane:
enabled: true
securityContext:
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE

View file

@ -1,23 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: allow-ssh
spec:
description: ""
nodeSelector:
matchLabels:
# node-access: ssh
node-role.kubernetes.io/control-plane: "true"
ingress:
- fromEntities:
- cluster
- toPorts:
- ports:
- port: "22"
protocol: TCP
- icmps:
- fields:
- type: 8
family: IPv4

View file

@ -1,27 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: api-server
spec:
nodeSelector:
# apply to master nodes
matchLabels:
node-role.kubernetes.io/control-plane: 'true'
ingress:
# load balancer -> api server
- fromCIDR:
- 167.235.217.82/32
toPorts:
- ports:
- port: '6443'
protocol: TCP
egress:
# api server -> kubelet
- toEntities:
- remote-node
toPorts:
- ports:
- port: '10250'
protocol: TCP

View file

@ -1,41 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: cilium-health
specs:
- endpointSelector:
# apply to health endpoints
matchLabels:
reserved:health: ''
ingress:
# cilium agent -> cilium agent
- fromEntities:
- host
- remote-node
toPorts:
- ports:
- port: '4240'
protocol: TCP
- nodeSelector:
# apply to all nodes
matchLabels: {}
ingress:
# cilium agent -> cilium agent
- fromEntities:
- health
- remote-node
toPorts:
- ports:
- port: '4240'
protocol: TCP
egress:
# cilium agent -> cilium agent
- toEntities:
- health
- remote-node
toPorts:
- ports:
- port: '4240'
protocol: TCP

View file

@ -1,26 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: cilium-vxlan
spec:
nodeSelector:
# apply to all nodes
matchLabels: {}
ingress:
# node -> vxlan
- fromEntities:
- remote-node
toPorts:
- ports:
- port: '8472'
protocol: UDP
egress:
# node -> vxlan
- toEntities:
- remote-node
toPorts:
- ports:
- port: '8472'
protocol: UDP

View file

@ -1,65 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: core-dns
namespace: kube-system
specs:
- nodeSelector:
# apply to master nodes
matchLabels:
node-role.kubernetes.io/control-plane: 'true'
ingress:
# core dns -> api server
- fromEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: coredns
toPorts:
- ports:
- port: '6443'
protocol: TCP
- nodeSelector:
# apply to all nodes
matchLabels: {}
egress:
# kubelet -> core dns probes
- toEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: coredns
toPorts:
- ports:
- port: '8080'
protocol: TCP
- port: '8181'
protocol: TCP
- endpointSelector:
# apply to core dns pods
matchLabels:
io.cilium.k8s.policy.serviceaccount: coredns
ingress:
# kubelet -> core dns probes
- fromEntities:
- host
toPorts:
- ports:
- port: '8080'
protocol: TCP
- port: '8181'
protocol: TCP
egress:
# core dns -> api server
- toEntities:
- kube-apiserver
toPorts:
- ports:
- port: '6443'
protocol: TCP
# core dns -> upstream DNS
- toCIDR:
- 185.12.64.1/32
- 185.12.64.2/32
toPorts:
- ports:
- port: '53'
protocol: UDP

View file

@ -1,27 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: etcd
spec:
nodeSelector:
# apply to master nodes
matchLabels:
node-role.kubernetes.io/control-plane: 'true'
ingress:
# etcd peer -> etcd peer
- fromEntities:
- remote-node
toPorts:
- ports:
- port: '2380'
protocol: TCP
egress:
# etcd peer -> etcd peer
- toEntities:
- remote-node
toPorts:
- ports:
- port: '2380'
protocol: TCP

View file

@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: "cilium.io/v2"
kind: CiliumClusterwideNetworkPolicy
metadata:
name: allow-specific-traffic
spec:
endpointSelector: {}
ingress:
- fromEntities:
- host
toPorts:
- ports:
- port: '6443'
protocol: TCP

View file

@ -1,50 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: hubble-relay
namespace: kube-system
specs:
- nodeSelector:
# apply to all nodes
matchLabels: {}
ingress:
# hubble relay -> hubble agent
- fromEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-relay
toPorts:
- ports:
- port: '4244'
protocol: TCP
egress:
# kubelet -> hubble relay probes
- toEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-relay
toPorts:
- ports:
- port: '4245'
protocol: TCP
- endpointSelector:
# apply to hubble relay pods
matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-relay
ingress:
# kubelet -> hubble relay probes
- fromEntities:
- host
toPorts:
- ports:
- port: '4245'
protocol: TCP
egress:
# hubble relay -> hubble agent
- toEntities:
- host
- remote-node
toPorts:
- ports:
- port: '4244'
protocol: TCP

View file

@ -1,75 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumnetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: hubble-ui
namespace: kube-system
specs:
- nodeSelector:
# apply to master nodes
matchLabels:
node-role.kubernetes.io/control-plane: ''
ingress:
# hubble ui -> api server
- fromEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-ui
toPorts:
- ports:
- port: '6443'
protocol: TCP
- endpointSelector:
# apply to core dns endpoints
matchLabels:
io.cilium.k8s.policy.serviceaccount: coredns
ingress:
# hubble ui -> core dns
- fromEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-ui
toPorts:
- ports:
- port: '53'
protocol: UDP
- endpointSelector:
# apply to hubble relay endpoints
matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-relay
ingress:
# hubble ui -> hubble relay
- fromEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-ui
toPorts:
- ports:
- port: '4245'
protocol: TCP
- endpointSelector:
# apply to hubble ui endpoints
matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-ui
egress:
# hubble ui -> api server
- toEntities:
- kube-apiserver
toPorts:
- ports:
- port: '6443'
protocol: TCP
# hubble ui -> hubble relay
- toEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: hubble-relay
toPorts:
- ports:
- port: '4245'
protocol: TCP
# hubble ui -> core dns
- toEndpoints:
- matchLabels:
io.cilium.k8s.policy.serviceaccount: coredns
toPorts:
- ports:
- port: '53'
protocol: UDP

View file

@ -1,28 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
---
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: kubelet
spec:
nodeSelector:
# apply to all nodes
matchLabels: {}
ingress:
# api server -> kubelet
- fromEntities:
- kube-apiserver
toPorts:
- ports:
- port: '10250'
protocol: TCP
egress:
# kubelet -> load balancer
- toCIDR:
- 167.235.217.82/32
toEntities:
- host
toPorts:
- ports:
- port: '6443'
protocol: TCP

View file

@ -1,16 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ./allow-ssh.yaml
- ./apiserver.yaml
- ./cilium-health.yaml
- ./cilium-vxlan.yaml
- ./core-dns.yaml
- ./etcd.yaml
- ./hubble-relay.yaml
- ./hubble-ui.yaml
- ./kubelet.yaml

View file

@ -1,17 +0,0 @@
---
spegel:
containerdSock: /run/containerd/containerd.sock
containerdRegistryConfigPath: /etc/cri/conf.d/hosts
registries:
- https://docker.io
- https://ghcr.io
- https://quay.io
- https://mcr.microsoft.com
- https://public.ecr.aws
- https://gcr.io
- https://registry.k8s.io
- https://k8s.gcr.io
- https://lscr.io
service:
registry:
hostPort: 29999

View file

@ -1,109 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app zfs-scrub
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.2.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
kubanetics:
type: cronjob
cronjob:
schedule: "@weekly"
parallelism: 1 # Set to my total number of nodes
containers:
app:
image:
repository: ghcr.io/aarnaud/talos-debug-tools
tag: latest-6.6.29
command: ["/bin/bash", "-c"]
args:
- |
# Trim filesystems
chroot /host /usr/local/sbin/zpool scrub nahar
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
resources:
requests:
cpu: 25m
limits:
memory: 128Mi
securityContext:
privileged: true
pod:
hostNetwork: true
hostPID: true
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
persistence:
netfs:
type: hostPath
hostPath: /sys
hostPathType: Directory
globalMounts:
- path: /sys
readOnly: true
dev:
type: hostPath
hostPath: /dev
hostPathType: Directory
globalMounts:
- path: /dev
modules:
type: hostPath
hostPath: /lib/modules
hostPathType: ""
globalMounts:
- path: /lib/modules
udev:
type: hostPath
hostPath: /run/udev
hostPathType: ""
globalMounts:
- path: /run/udev
localtime:
type: hostPath
hostPath: /etc/localtime
hostPathType: ""
globalMounts:
- path: /etc/localtime
host:
type: hostPath
hostPath: /
hostPathType: Directory
globalMounts:
- path: /host
efivars:
type: hostPath
hostPath: /sys/firmware/efi/efivars
hostPathType: ""
globalMounts:
- path: /sys/firmware/efi/efivars

View file

@ -1,20 +0,0 @@
#!/usr/bin/env bash
KUBELET_BIN="/usr/local/bin/kubelet"
KUBELET_PID="$(pgrep -f $KUBELET_BIN)"
ZPOOL="nahar"
if [ -z "${KUBELET_PID}" ]; then
echo "kubelet not found"
exit 1
fi
# Enter namespaces and run commands
nsrun() {
nsenter \
--mount="/host/proc/${KUBELET_PID}/ns/mnt" \
--net="/host/proc/${KUBELET_PID}/ns/net" \
-- bash -c "$1"
}
# Scrub filesystems
nsrun "zpool scrub ${ZPOOL}"

View file

@ -1,16 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: immich-app-config
labels:
app.kubernetes.io/name: immich
data:
LOG_LEVEL: verbose
DB_VECTOR_EXTENSION: pgvector
NODE_ENV: production
REDIS_HOSTNAME: dragonfly.database.svc.cluster.local
REDIS_PORT: "6379"
IMMICH_WEB_URL: http://immich-web.media.svc.cluster.local:3000
IMMICH_SERVER_URL: http://immich-server.media.svc.cluster.local:3001
IMMICH_MACHINE_LEARNING_URL: http://immich-machine-learning.media.svc.cluster.local:3003

View file

@ -1,19 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: immich
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: immich-secret
template:
engineVersion: v2
data:
DATABASE_URI: "postgresql://{{ .DATABASE_USER }}:{{ .DATABASE_PASSWORD }}@immich-primary-real.media.svc:{{ .DATABASE_PORT }}/{{ .DATABASE_NAME }}"
dataFrom:
- extract:
key: immich

View file

@ -1,21 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: immich-postgres-gatus-ep
labels:
gatus.io/enabled: "true"
data:
config.yaml: |
endpoints:
- name: immich-postgres
group: infrastructure
url: tcp://immich-primary-real.media.svc.cluster.local:5432
interval: 1m
ui:
hide-url: true
hide-hostname: true
conditions:
- "[CONNECTED] == true"
alerts:
- type: pushover

View file

@ -1,97 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: &name immich
namespace: default
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.1.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
immich-server:
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
containers:
app:
image:
repository: ghcr.io/immich-app/immich-server
tag: v1.105.1
command: /bin/sh
args:
- ./start-server.sh
probes:
startup:
enabled: true
spec:
failureThreshold: 30
periodSeconds: 5
liveness:
enabled: true
readiness:
enabled: true
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
memory: 4Gi
env:
TZ: America/Chicago
DB_URL:
valueFrom:
secretKeyRef:
name: immich-secret
key: DATABASE_URI
envFrom:
- configMapRef:
name: immich-app-config
service:
app:
controller: immich-server
ports:
http:
port: 3001
ingress:
app:
enabled: true
className: external-nginx
annotations:
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
external-dns.alpha.kubernetes.io/target: external.hsn.dev
nginx.ingress.kubernetes.io/proxy-body-size: "0"
hosts:
- host: &host "im.hsn.dev"
paths:
- path: /
service:
identifier: app
port: http
tls:
- hosts:
- *host
persistence:
media:
enabled: true
type: nfs
server: 10.1.1.13
path: /eru/media/immich
globalMounts:
- path: /usr/src/app/upload

View file

@ -1,27 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./configmap.yaml
- ./externalsecret.yaml
- ./gatus.yaml
- ./helmrelease.yaml
- ./machine-learning
- ./microservices
- ./postgresCluster.yaml
- ./pushsecret.yaml
- ./service.yaml
configMapGenerator:
- name: immich-databse-init-sql
files:
- init.sql=./resources/init.sql
labels:
- pairs:
app.kubernetes.io/name: immich
app.kubernetes.io/instance: immich
app.kubernetes.io/part-of: immich
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View file

@ -1,82 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: immich-machine-learning
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 3.1.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
interval: 15m
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
immich-machine-learning:
annotations:
reloader.stakater.com/auto: "true"
strategy: Recreate
pod:
nodeSelector:
nvidia.com/gpu.present: "true"
runtimeClassName: nvidia
containers:
app:
image:
repository: ghcr.io/immich-app/immich-machine-learning
tag: v1.105.1
resources:
requests:
cpu: 15m
memory: 250Mi
limits:
memory: 4000Mi
probes:
startup:
enabled: true
spec:
failureThreshold: 30
periodSeconds: 5
liveness:
enabled: true
readiness:
enabled: true
envFrom:
- configMapRef:
name: immich-app-config
env:
DB_URL:
valueFrom:
secretKeyRef:
name: immich-secret
key: DATABASE_URI
service:
app:
controller: immich-machine-learning
ports:
http:
port: 3003
persistence:
media:
enabled: true
type: nfs
server: 10.1.1.13
path: /eru/media/immich
globalMounts:
- path: /usr/src/app/upload
cache:
enabled: true
type: emptyDir

View file

@ -1,11 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- pairs:
app.kubernetes.io/name: immich-machine-learning
app.kubernetes.io/instance: immich-machine-learning
app.kubernetes.io/part-of: immich
resources:
- ./helmrelease.yaml

View file

@ -1,80 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: immich-microservices
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 3.1.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
interval: 15m
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
immich-microservices:
strategy: Recreate
annotations:
reloader.stakater.com/auto: "true"
pod:
nodeSelector:
nvidia.com/gpu.present: "true"
runtimeClassName: nvidia
containers:
app:
image:
repository: ghcr.io/immich-app/immich-server
tag: v1.105.1
command: /bin/sh
args:
- ./start-microservices.sh
resources:
requests:
cpu: 100m
memory: 250Mi
limits:
memory: 4000Mi
probes:
startup:
enabled: true
spec:
failureThreshold: 30
periodSeconds: 5
liveness:
enabled: true
readiness:
enabled: true
envFrom:
- configMapRef:
name: immich-app-config
env:
DB_URL:
valueFrom:
secretKeyRef:
name: immich-secret
key: DATABASE_URI
service:
app:
controller: immich-microservices
enabled: false
persistence:
media:
enabled: true
type: nfs
server: 10.1.1.13
path: /eru/media/immich
globalMounts:
- path: /usr/src/app/upload

View file

@ -1,11 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/SchemaStore/schemastore/master/src/schemas/json/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- pairs:
app.kubernetes.io/name: immich-microservices
app.kubernetes.io/instance: immich-microservices
app.kubernetes.io/part-of: immich
resources:
- ./helmrelease.yaml

View file

@ -1,94 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/postgres-operator.crunchydata.com/postgrescluster_v1beta1.json
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: &name "${APP}"
spec:
postgresVersion: 16
dataSource:
pgbackrest:
stanza: db
configuration:
- secret:
name: pgo-s3-creds
global:
repo1-path: "/${APP}/repo1"
repo1-s3-uri-style: path
repo:
name: repo1
s3:
bucket: "crunchy-postgres"
endpoint: "s3.hsn.dev"
region: "us-east-1"
monitoring:
pgmonitor:
exporter:
# https://github.com/CrunchyData/postgres-operator-examples/blob/main/helm/install/values.yaml
image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-0.15.0-3
patroni:
dynamicConfiguration:
synchronous_mode: true
postgresql:
synchronous_commit: "on"
pg_hba:
- hostnossl all all 10.244.0.0/16 md5
- hostssl all all all md5
databaseInitSQL:
name: immich-databse-init-sql
key: init.sql
instances:
- name: postgres
metadata:
labels:
app.kubernetes.io/name: pgo-${APP}
replicas: 1
dataVolumeClaimSpec:
storageClassName: openebs-zfs
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: "DoNotSchedule"
labelSelector:
matchLabels:
postgres-operator.crunchydata.com/cluster: ${APP}
postgres-operator.crunchydata.com/data: postgres
users:
- name: "immich"
databases:
- "immich"
options: "SUPERUSER"
password:
type: AlphaNumeric
backups:
pgbackrest:
configuration:
- secret:
name: pgo-s3-creds
global:
archive-push-queue-max: 4GiB
repo1-retention-full: "14"
repo1-retention-full-type: time
repo1-path: "/${APP}/repo1"
repo1-s3-uri-style: path
manual:
repoName: repo1
options:
- --type=full
metadata:
labels:
app.kubernetes.io/name: pgo-${APP}-backup
repos:
- name: repo1
schedules:
full: "0 1 * * 0"
differential: "0 1 * * 1-6"
s3:
bucket: "crunchy-postgres"
endpoint: "s3.hsn.dev"
region: "us-east-1"

View file

@ -1,40 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/pushsecret_v1alpha1.json
apiVersion: external-secrets.io/v1alpha1
kind: PushSecret
metadata:
name: immich
spec:
refreshInterval: 1h
secretStoreRefs:
- name: onepassword-connect
kind: ClusterSecretStore
selector:
secret:
name: immich-pguser-immich
data:
- match:
secretKey: dbname
remoteRef:
remoteKey: immich
property: DATABASE_NAME
- match:
secretKey: host
remoteRef:
remoteKey: immich
property: DATABASE_HOST
- match:
secretKey: user
remoteRef:
remoteKey: immich
property: DATABASE_USER
- match:
secretKey: password
remoteRef:
remoteKey: immich
property: DATABASE_PASSWORD
- match:
secretKey: port
remoteRef:
remoteKey: immich
property: DATABASE_PORT

View file

@ -1,4 +0,0 @@
\c immich\\
CREATE EXTENSION vector;
CREATE EXTENSION cube;
CREATE EXTENSION earthdistance;

View file

@ -1,20 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
postgres-operator.crunchydata.com/cluster: immich
postgres-operator.crunchydata.com/role: primary
name: immich-primary-real
namespace: media
spec:
internalTrafficPolicy: Cluster
ports:
- name: postgres
port: 5432
protocol: TCP
targetPort: postgres
selector:
postgres-operator.crunchydata.com/cluster: immich
postgres-operator.crunchydata.com/role: master
type: ClusterIP

View file

@ -1,30 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app immich
namespace: flux-system
spec:
targetNamespace: media
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: crunchy-postgres-operator
- name: external-secrets-stores
- name: dragonfly
path: ./kubernetes/apps/media/immich/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m
postBuild:
substitute:
APP: *app
DB_NAME: immich
DB_USER: immich

View file

@ -1,9 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./immich/ks.yaml

View file

@ -1,58 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: alertmanager-silencer
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 3.3.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
dependsOn:
- name: kube-prometheus-stack
namespace: observability
values:
controllers:
alertmanager-silencer:
type: cronjob
cronjob:
schedule: "@daily"
containers:
app:
image:
repository: ghcr.io/onedr0p/kubanetics
tag: 2024.7.1@sha256:020ec6f00b9cdc0ee247d2fd34d3951ac32718326bb90c38e947eed9d555de6c
env:
SCRIPT_NAME: alertmanager-silencer.sh
ALERTMANAGER_URL: http://alertmanager-operated.observability.svc.cluster.local:9093
MATCHERS_0: alertname=NodeCPUHighUsage job=node-exporter
MATCHERS_1: alertname=CPUThrottlingHigh container=gc
MATCHERS_2: alertname=CPUThrottlingHigh container=worker
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities: { drop: ["ALL"] }
resources:
requests:
cpu: 25m
limits:
memory: 128Mi
pod:
securityContext:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true

View file

@ -1,21 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app alertmanager-silencer
namespace: flux-system
spec:
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
path: ./kubernetes/apps/observability/alertmanager-silencer/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -1,61 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: grafana-secret
namespace: observability
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: grafana-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "{{ .authentik_grafana_oauth_client_secret }}"
GF_DATE_FORMATS_USE_BROWSER_LOCALE: "true"
GF_SERVER_ROOT_URL: https://grafana.hsn.dev
GF_DATABASE_NAME: "{{ .grafana_GF_DATABASE_NAME }}"
GF_DATABASE_HOST: "postgres-primary-real.database.svc"
GF_DATABASE_USER: "{{ .grafana_GF_DATABASE_USER }}"
GF_DATABASE_PASSWORD: "{{ .grafana_GF_DATABASE_PASSWORD }}"
GF_DATABASE_SSL_MODE: "require"
GF_DATABASE_TYPE: postgres
GF_ANALYTICS_CHECK_FOR_UPDATES: "false"
GF_ANALYTICS_CHECK_FOR_PLUGIN_UPDATES: "false"
GF_ANALYTICS_REPORTING_ENABLED: "false"
GF_AUTH_ANONYMOUS_ENABLED: "false"
GF_AUTH_BASIC_ENABLED: "false"
GF_AUTH_GENERIC_OAUTH_ENABLED: "true"
GF_AUTH_GENERIC_OAUTH_API_URL: https://auth.hsn.dev/application/o/userinfo/
GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://auth.hsn.dev/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://auth.hsn.dev/application/o/token/
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: CoV7ae1HxuNzwCbVPf3U7TfYMX2rVqC5T9RAUo5M
GF_AUTH_GENERIC_OAUTH_EMPTY_SCOPES: "false"
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'"
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email groups
GF_AUTH_OAUTH_AUTO_LOGIN: "true"
GF_EXPLORE_ENABLED: "true"
GF_FEATURE_TOGGLES_ENABLE: publicDashboards
GF_LOG_MODE: console
GF_NEWS_NEWS_FEED_ENABLED: "false"
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: natel-discrete-panel,pr0ps-trackmap-panel,panodata-map-panel
GF_SECURITY_COOKIE_SAMESITE: grafana
GF_SECURITY_ANGULAR_SUPPORT_ENABLED: "true"
dataFrom:
- extract:
key: Authentik
rewrite:
- regexp:
source: "(.*)"
target: "authentik_$1"
- extract:
key: grafana
rewrite:
- regexp:
source: "(.*)"
target: "grafana_$1"

View file

@ -1,401 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: grafana
spec:
interval: 30m
chart:
spec:
chart: grafana
version: 8.3.7
sourceRef:
kind: HelmRepository
name: grafana
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
dependsOn:
- name: kube-prometheus-stack
namespace: observability
- name: loki
namespace: observability
values:
replicas: 1
envFromSecret: grafana-secret
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: default
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default-folder
- name: ceph
orgId: 1
folder: Ceph
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/ceph-folder
- name: crunchy-postgres
orgId: 1
folder: Crunchy-postgres
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/crunchy-postgres-folder
- name: flux
orgId: 1
folder: Flux
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/flux-folder
- name: kubernetes
orgId: 1
folder: Kubernetes
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/kubernetes-folder
- name: nginx
orgId: 1
folder: Nginx
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/nginx-folder
- name: prometheus
orgId: 1
folder: Prometheus
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/prometheus-folder
- name: thanos
orgId: 1
folder: Thanos
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/thanos-folder
- name: unifi
orgId: 1
folder: Unifi
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/unifi-folder
datasources:
datasources.yaml:
apiVersion: 1
deleteDatasources:
- { name: Alertmanager, orgId: 1 }
- { name: Loki, orgId: 1 }
- { name: Prometheus, orgId: 1 }
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
url: http://thanos-query-frontend.observability.svc.cluster.local:10902
jsonData:
prometheusType: Thanos
timeInterval: 1m
isDefault: true
- name: Loki
type: loki
uid: loki
access: proxy
url: http://loki-gateway.observability.svc.cluster.local
jsonData:
maxLines: 250
- name: Alertmanager
type: alertmanager
uid: alertmanager
access: proxy
url: http://alertmanager-operated.observability.svc.cluster.local:9093
jsonData:
implementation: prometheus
dashboards:
default:
cloudflared:
# renovate: depName="Cloudflare Tunnels (cloudflared)"
gnetId: 17457
revision: 6
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
external-dns:
# renovate: depName="External-dns"
gnetId: 15038
revision: 3
datasource: Prometheus
minio:
# renovate: depName="MinIO Dashboard"
gnetId: 13502
revision: 25
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
node-exporter-full:
# renovate: depName="Node Exporter Full"
gnetId: 1860
revision: 33
datasource: Prometheus
postgres:
# renovate: depName="PostgreSQL Database"
gnetId: 9628
revision: 7
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
smartctl-exporter:
# renovate: depName="smartctl_exporter"
gnetId: 20204
revision: 1
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
spegel:
# renovate: depName="Spegel"
gnetId: 18089
revision: 1
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
unpackerr:
# renovate: depName="Unpackerr"
gnetId: 18817
revision: 1
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
zfs:
# renovate: depName="ZFS"
gnetId: 7845
revision: 4
datasource: Prometheus
dragonflydb:
url: https://raw.githubusercontent.com/dragonflydb/dragonfly/main/tools/local/monitoring/grafana/provisioning/dashboards/dashboard.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
cert-manager:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json
datasource: Prometheus
external-secrets:
url: https://raw.githubusercontent.com/external-secrets/external-secrets/main/docs/snippets/dashboard.json
datasource: Prometheus
node-feature-discovery:
url: https://raw.githubusercontent.com/kubernetes-sigs/node-feature-discovery/master/examples/grafana-dashboard.json
datasource: Prometheus
crunchy-postgres:
pgbackrest:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/pgbackrest.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
pods:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/pod_details.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
postgresql:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/postgresql_details.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
postgresql-overview:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/postgresql_overview.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
postgresql-health:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/postgresql_service_health.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
postgresql-alerts:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/prometheus_alerts.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
query-stats:
url: https://raw.githubusercontent.com/CrunchyData/pgmonitor/development/grafana/containers/query_statistics.json
datasource:
- { name: DS_PROMETHEUS, value: Prometheus }
ceph:
ceph-cluster:
# renovate: depName="Ceph Cluster"
gnetId: 2842
revision: 17
datasource: Prometheus
ceph-osd:
# renovate: depName="Ceph - OSD (Single)"
gnetId: 5336
revision: 9
datasource: Prometheus
ceph-pools:
# renovate: depName="Ceph - Pools"
gnetId: 5342
revision: 9
datasource: Prometheus
flux:
flux-cluster:
url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/cluster.json
datasource: Prometheus
flux-control-plane:
url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/control-plane.json
datasource: Prometheus
kubernetes:
kubernetes-api-server:
# renovate: depName="Kubernetes / System / API Server"
gnetId: 15761
revision: 16
datasource: Prometheus
kubernetes-coredns:
# renovate: depName="Kubernetes / System / CoreDNS"
gnetId: 15762
revision: 17
datasource: Prometheus
kubernetes-global:
# renovate: depName="Kubernetes / Views / Global"
gnetId: 15757
revision: 37
datasource: Prometheus
kubernetes-namespaces:
# renovate: depName="Kubernetes / Views / Namespaces"
gnetId: 15758
revision: 34
datasource: Prometheus
kubernetes-nodes:
# renovate: depName="Kubernetes / Views / Nodes"
gnetId: 15759
revision: 29
datasource: Prometheus
kubernetes-pods:
# renovate: depName="Kubernetes / Views / Pods"
gNetId: 15760
revision: 21
datasource: Prometheus
kubernetes-volumes:
# renovate: depName="K8s / Storage / Volumes / Cluster"
gnetId: 11454
revision: 14
datasource: Prometheus
nginx:
nginx:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json
datasource: Prometheus
nginx-request-handling-performance:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json
datasource: Prometheus
prometheus:
prometheus:
# renovate: depName="Prometheus"
gnetId: 19105
revision: 3
datasource: Prometheus
thanos:
thanos-bucket-replicate:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/bucket-replicate.json
datasource: Prometheus
thanos-compact:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/compact.json
datasource: Prometheus
thanos-overview:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/overview.json
datasource: Prometheus
thanos-query:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/query.json
datasource: Prometheus
thanos-query-frontend:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/query-frontend.json
datasource: Prometheus
thanos-receieve:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/receive.json
datasource: Prometheus
thanos-rule:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/rule.json
datasource: Prometheus
thanos-sidecar:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/sidecar.json
datasource: Prometheus
thanos-store:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/thanos/dashboards/store.json
datasource: Prometheus
unifi:
unifi-insights:
# renovate: depName="UniFi-Poller: Client Insights - Prometheus"
gnetId: 11315
revision: 9
datasource: Prometheus
unifi-network-sites:
# renovate: depName="UniFi-Poller: Network Sites - Prometheus"
gnetId: 11311
revision: 5
datasource: Prometheus
unifi-uap:
# renovate: depName="UniFi-Poller: UAP Insights - Prometheus"
gnetId: 11314
revision: 10
datasource: Prometheus
unifi-usw:
# renovate: depName="UniFi-Poller: USW Insights - Prometheus"
gnetId: 11312
revision: 9
datasource: Prometheus
sidecar:
dashboards:
enabled: true
searchNamespace: ALL
labelValue: ""
label: grafana_dashboard
folderAnnotation: grafana_folder
provider:
disableDelete: true
foldersFromFilesStructure: true
datasources:
enabled: true
searchNamespace: ALL
labelValue: ""
plugins:
- grafana-clock-panel
- grafana-piechart-panel
- grafana-worldmap-panel
- natel-discrete-panel
- pr0ps-trackmap-panel
- vonage-status-panel
serviceMonitor:
enabled: true
ingress:
enabled: true
ingressClassName: external-nginx
annotations:
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
external-dns.alpha.kubernetes.io/target: external.hsn.dev
hosts:
- &host grafana.hsn.dev
tls:
- hosts:
- *host
persistence:
enabled: false
testFramework:
enabled: false
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: grafana

View file

@ -1,29 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app grafana
namespace: flux-system
spec:
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: crunchy-postgres-operator
- name: external-secrets-stores
path: ./kubernetes/apps/observability/grafana/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m
postBuild:
substitute:
APP: *app
DB_NAME: grafana
DB_USER: grafana

View file

@ -1,22 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: alertmanager
spec:
refreshInterval: 5m
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: alertmanager-secret
template:
templateFrom:
- configMap:
name: alertmanager-config-tpl
items:
- key: alertmanager.yaml
dataFrom:
- extract:
key: pushover

View file

@ -1,190 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: kube-prometheus-stack
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: kube-prometheus-stack
version: 61.6.0
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: flux-system
install:
crds: CreateReplace
remediation:
retries: 3
upgrade:
cleanupOnFail: true
crds: CreateReplace
remediation:
strategy: rollback
retries: 3
values:
crds:
enabled: true
cleanPrometheusOperatorObjectNames: true
alertmanager:
ingress:
enabled: true
pathType: Prefix
ingressClassName: internal-nginx
hosts:
- &host alertmanager.jahanson.tech
tls:
- hosts:
- *host
alertmanagerSpec:
replicas: 1
useExistingSecret: true
configSecret: alertmanager-secret
storage:
volumeClaimTemplate:
spec:
storageClassName: openebs-hostpath
resources:
requests:
storage: 1Gi
kubelet:
enabled: true
serviceMonitor:
metricRelabelings:
# Drop high cardinality labels
- action: labeldrop
regex: (uid)
- action: labeldrop
regex: (id|name)
- action: drop
sourceLabels: ["__name__"]
regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count)
kubeApiServer:
enabled: true
serviceMonitor:
metricRelabelings:
# Drop high cardinality labels
- action: drop
sourceLabels: ["__name__"]
regex: (apiserver|etcd|rest_client)_request(|_sli|_slo)_duration_seconds_bucket
- action: drop
sourceLabels: ["__name__"]
regex: (apiserver_response_sizes_bucket|apiserver_watch_events_sizes_bucket)
kubeControllerManager:
enabled: true
endpoints: &cp
- 10.1.1.61
kubeEtcd:
enabled: true
endpoints: *cp
kubeScheduler:
enabled: true
endpoints: *cp
kubeProxy:
enabled: false
prometheus:
ingress:
enabled: true
ingressClassName: internal-nginx
pathType: Prefix
hosts:
- &host prometheus.jahanson.tech
tls:
- hosts:
- *host
thanosService:
enabled: true
thanosServiceMonitor:
enabled: true
# thanosServiceExternal:
# enabled: true
# type: LoadBalancer
# annotations:
# external-dns.alpha.kubernetes.io/hostname: thanos.jahanson.tech
# io.cilium/lb-ipam-ips: 10.45.0.6
# externalTrafficPolicy: Cluster
prometheusSpec:
podMetadata:
annotations:
secret.reloader.stakater.com/reload: &secret thanos-objstore-config
replicas: 1
replicaExternalLabelName: __replica__
scrapeInterval: 1m # Must match interval in Grafana Helm chart
ruleSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
probeSelectorNilUsesHelmValues: false
scrapeConfigSelectorNilUsesHelmValues: false
enableAdminAPI: true
walCompression: true
enableFeatures:
- auto-gomemlimit
- memory-snapshot-on-shutdown
- new-service-discovery-manager
image:
registry: quay.io
repository: prometheus/prometheus
tag: v2.51.0-dedupelabels
thanos:
image: quay.io/thanos/thanos:${THANOS_VERSION}
version: "${THANOS_VERSION#v}"
objectStorageConfig:
existingSecret:
name: *secret
key: config
retention: 2d
retentionSize: 15GB
externalLabels:
cluster: main
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: openebs-hostpath
resources:
requests:
storage: 20Gi
nodeExporter:
enabled: true
prometheus-node-exporter:
fullnameOverride: node-exporter
prometheus:
monitor:
enabled: true
relabelings:
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: kubernetes_node
kubeStateMetrics:
enabled: true
kube-state-metrics:
fullnameOverride: kube-state-metrics
metricLabelsAllowlist:
- pods=[*]
- deployments=[*]
- persistentvolumeclaims=[*]
prometheus:
monitor:
enabled: true
relabelings:
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: kubernetes_node
grafana:
enabled: false
forceDeployDashboards: true
sidecar:
dashboards:
annotations:
grafana_folder: Kubernetes
multicluster:
etcd:
enabled: true

View file

@ -1,16 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
- ./prometheusrules
# - ./scrapeconfigs
- ./podmonitors
configMapGenerator:
- name: alertmanager-config-tpl
files:
- alertmanager.yaml=./resources/alertmanager.yaml
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,34 +0,0 @@
# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/podmonitor_v1.json
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: crunchy-postgres-exporter
spec:
selector:
matchLabels:
postgres-operator.crunchydata.com/crunchy-postgres-exporter: 'true'
namespaceSelector:
matchNames:
- database
- media
podMetricsEndpoints:
- port: "exporter"
relabelings:
- sourceLabels: [__meta_kubernetes_pod_container_port_number]
action: keep
regex: "9187"
- sourceLabels: [__meta_kubernetes_namespace]
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_pod_name]
targetLabel: pod
- sourceLabels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_postgres_operator_crunchydata_com_cluster]
separator: ":"
targetLabel: pg_cluster
replacement: "$1$2"
- sourceLabels: [__meta_kubernetes_pod_ip]
targetLabel: ip
- sourceLabels: [__meta_kubernetes_pod_label_postgres_operator_crunchydata_com_instance]
targetLabel: deployment
- sourceLabels: [__meta_kubernetes_pod_label_postgres_operator_crunchydata_com_role]
targetLabel: role

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./crunchy-postgres.yaml
- ./dragonflydb.yaml

View file

@ -1,6 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./prometheusrule.yaml

View file

@ -1,37 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/prometheusrule_v1.json
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: miscellaneous-rules
labels:
prometheus: k8s
role: alert-rules
spec:
groups:
- name: dockerhub
rules:
- alert: BootstrapRateLimitRisk
annotations:
summary: Kubernetes cluster at risk of being rate limited by dockerhub on bootstrap
expr: count(time() - container_last_seen{image=~"(docker.io).*",container!=""} < 30) > 100
for: 15m
labels:
severity: critical
- name: oom
rules:
- alert: OOMKilled
annotations:
summary: Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes.
expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1
labels:
severity: critical
- name: zfs
rules:
- alert: ZfsUnexpectedPoolState
annotations:
summary: ZFS pool {{$labels.zpool}} on {{$labels.instance}} is in a unexpected state {{$labels.state}}
expr: node_zfs_zpool_state{state!="online"} > 0
for: 15m
labels:
severity: critical

View file

@ -1,68 +0,0 @@
---
global:
resolve_timeout: 5m
route:
group_by: ["alertname", "job"]
group_interval: 10m
group_wait: 1m
receiver: pushover
repeat_interval: 12h
routes:
- receiver: heartbeat
group_interval: 5m
group_wait: 0s
matchers:
- alertname =~ "Watchdog"
repeat_interval: 5m
- receiver: "null"
matchers:
- alertname =~ "InfoInhibitor"
- receiver: pushover
continue: true
matchers:
- severity = "critical"
inhibit_rules:
- equal: ["alertname", "namespace"]
source_matchers:
- severity = "critical"
target_matchers:
- severity = "warning"
receivers:
- name: heartbeat
webhook_configs:
- send_resolved: true
url: "{{ .alertmanager_heartbeat_url }}"
- name: "null"
- name: pushover
pushover_configs:
- html: true
# Compooters are hard
message: |-
{{ "{{-" }} range .Alerts {{ "}}" }}
{{ "{{-" }} if ne .Annotations.description "" {{ "}}" }}
{{ "{{" }} .Annotations.description {{ "}}" }}
{{ "{{-" }} else if ne .Annotations.summary "" {{ "}}" }}
{{ "{{" }} .Annotations.summary {{ "}}" }}
{{ "{{-" }} else if ne .Annotations.message "" {{ "}}" }}
{{ "{{" }} .Annotations.message {{ "}}" }}
{{ "{{-" }} else {{ "}}" }}
Alert description not available
{{ "{{-" }} end {{ "}}" }}
{{ "{{-" }} if gt (len .Labels.SortedPairs) 0 {{ "}}" }}
<small>
{{ "{{-" }} range .Labels.SortedPairs {{ "}}" }}
<b>{{ "{{" }} .Name {{ "}}" }}:</b> {{ "{{" }} .Value {{ "}}" }}
{{ "{{-" }} end {{ "}}" }}
</small>
{{ "{{-" }} end {{ "}}" }}
{{ "{{-" }} end {{ "}}" }}
priority: |-
{{ "{{" }} if eq .Status "firing" {{ "}}" }}1{{ "{{" }} else {{ "}}" }}0{{ "{{" }} end {{ "}}" }}
send_resolved: true
sound: gamelan
title: >-
{{ "{{" }} .CommonLabels.alertname {{ "}}" }}
[{{ "{{" }} .Status | toUpper {{ "}}" }}{{ "{{" }} if eq .Status "firing" {{ "}}" }}:{{ "{{" }} .Alerts.Firing | len {{ "}}" }}{{ "{{" }} end {{ "}}" }}]
token: "{{ .alertmanager_token }}"
url_title: View in Alertmanager
user_key: "{{ .userkey_jahanson }}"

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./node-exporter.yaml
- ./zfs-exporter.yaml

View file

@ -1,11 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/scrapeconfig_v1alpha1.json
apiVersion: monitoring.coreos.com/v1alpha1
kind: ScrapeConfig
metadata:
name: node-exporter
spec:
staticConfigs:
- targets:
- 10.1.1.1:9100
metricsPath: /metrics

View file

@ -1,11 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/monitoring.coreos.com/scrapeconfig_v1alpha1.json
apiVersion: monitoring.coreos.com/v1alpha1
kind: ScrapeConfig
metadata:
name: zfs-exporter
spec:
staticConfigs:
- targets:
- 10.1.1.13:9134
metricsPath: /metrics

View file

@ -1,29 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app kube-prometheus-stack
namespace: flux-system
spec:
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: external-secrets-stores
- name: openebs
- name: volsync
path: ./kubernetes/apps/observability/kube-prometheus-stack/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 15m
postBuild:
substitute:
# renovate: datasource=docker depName=quay.io/thanos/thanos
THANOS_VERSION: v0.34.1

View file

@ -1,28 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: loki
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: loki-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
S3_HOST: s3.hsn.dev
S3_BUCKET: "{{ .minio_thanos_bucket_name }}"
S3_ACCESS_KEY: "{{ .minio_loki_access_key }}"
S3_SECRET_KEY: "{{ .minio_loki_secret_key }}"
S3_REGION: us-east-1
dataFrom:
- extract:
key: minio
rewrite:
- regexp:
source: "(.*)"
target: "minio_$1"

View file

@ -1,138 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: loki
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: loki
version: 6.7.3
sourceRef:
kind: HelmRepository
name: grafana
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: uninstall
retries: 3
valuesFrom:
- targetPath: loki.storage.bucketNames.chunks
kind: Secret
name: loki-secret
valuesKey: S3_BUCKET
- targetPath: loki.storage.s3.endpoint
kind: Secret
name: loki-secret
valuesKey: S3_HOST
- targetPath: loki.storage.s3.region
kind: Secret
name: loki-secret
valuesKey: S3_REGION
- targetPath: loki.storage.s3.accessKeyId
kind: Secret
name: loki-secret
valuesKey: S3_ACCESS_KEY
- targetPath: loki.storage.s3.secretAccessKey
kind: Secret
name: loki-secret
valuesKey: S3_SECRET_KEY
values:
deploymentMode: SimpleScalable
loki:
podAnnotations:
secret.reloader.stakater.com/reload: loki-secret
ingester:
chunk_encoding: snappy
storage:
type: s3
s3:
s3ForcePathStyle: true
insecure: true
schemaConfig:
configs:
- from: "2024-04-01"
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_index_
period: 24h
structuredConfig:
auth_enabled: false
server:
log_level: info
http_listen_port: 3100
grpc_listen_port: 9095
grpc_server_max_recv_msg_size: 8388608
grpc_server_max_send_msg_size: 8388608
limits_config:
ingestion_burst_size_mb: 128
ingestion_rate_mb: 64
max_query_parallelism: 100
per_stream_rate_limit: 64M
per_stream_rate_limit_burst: 128M
reject_old_samples: true
reject_old_samples_max_age: 168h
retention_period: 30d
shard_streams:
enabled: true
split_queries_by_interval: 1h
query_scheduler:
max_outstanding_requests_per_tenant: 4096
frontend:
max_outstanding_per_tenant: 4096
ruler:
enable_api: true
enable_alertmanager_v2: true
alertmanager_url: http://alertmanager-operated.observability.svc.cluster.local:9093
storage:
type: local
local:
directory: /rules
rule_path: /rules/fake
analytics:
reporting_enabled: false
backend:
replicas: 1
persistence:
size: 20Gi
storageClass: openebs-hostpath
gateway:
replicas: 1
image:
registry: ghcr.io
ingress:
enabled: true
ingressClassName: internal-nginx
hosts:
- host: &host loki.jahanson.tech
paths:
- path: /
pathType: Prefix
tls:
- hosts: [*host]
read:
replicas: 1
write:
replicas: 1
persistence:
size: 20Gi
storageClass: openebs-hostpath
sidecar:
image:
repository: ghcr.io/kiwigrid/k8s-sidecar
rules:
searchNamespace: ALL
folder: /rules/fake
lokiCanary:
enabled: false
test:
enabled: false

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml

View file

@ -1,25 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app loki
namespace: flux-system
spec:
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: external-secrets-stores
- name: openebs
- name: vector
path: ./kubernetes/apps/observability/loki/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 15m

View file

@ -1,28 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: thanos
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: thanos-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
S3_HOST: s3.hsn.dev
S3_BUCKET: "{{ .minio_thanos_bucket_name }}"
S3_ACCESS_KEY: "{{ .minio_thanos_access_key }}"
S3_SECRET_KEY: "{{ .minio_thanos_secret_key }}"
S3_REGION: us-east-1
dataFrom:
- extract:
key: Minio
rewrite:
- regexp:
source: "(.*)"
target: "minio_$1"

View file

@ -1,120 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: thanos
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: thanos
version: 1.17.2
sourceRef:
kind: HelmRepository
name: stevehipwell
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: rollback
retries: 3
valuesFrom:
- targetPath: objstoreConfig.value.config.bucket
kind: Secret
name: thanos-secret
valuesKey: S3_BUCKET
- targetPath: objstoreConfig.value.config.endpoint
kind: Secret
name: thanos-secret
valuesKey: S3_HOST
- targetPath: objstoreConfig.value.config.region
kind: Secret
name: thanos-secret
valuesKey: S3_REGION
- targetPath: objstoreConfig.value.config.access_key
kind: Secret
name: thanos-secret
valuesKey: S3_ACCESS_KEY
- targetPath: objstoreConfig.value.config.secret_key
kind: Secret
name: thanos-secret
valuesKey: S3_SECRET_KEY
values:
objstoreConfig:
value:
type: s3
config:
insecure: false
additionalEndpoints:
- dnssrv+_grpc._tcp.kube-prometheus-stack-thanos-discovery.observability.svc.cluster.local
additionalReplicaLabels: ["__replica__"]
serviceMonitor:
enabled: true
compact:
enabled: true
extraArgs:
- --compact.concurrency=4
- --delete-delay=30m
- --retention.resolution-raw=14d
- --retention.resolution-5m=30d
- --retention.resolution-1h=60d
persistence: &persistence
enabled: true
storageClass: openebs-hostpath
size: 10Gi
query:
replicas: 1
extraArgs: ["--alert.query-url=https://thanos.jahanson.tech"]
queryFrontend:
enabled: true
replicas: 1
extraEnv: &extraEnv
- name: THANOS_CACHE_CONFIG
valueFrom:
configMapKeyRef:
name: &configMap thanos-cache-configmap
key: cache.yaml
extraArgs: ["--query-range.response-cache-config=$(THANOS_CACHE_CONFIG)"]
ingress:
enabled: true
ingressClassName: internal-nginx
hosts:
- &host thanos.jahanson.tech
tls:
- hosts: [*host]
podAnnotations: &podAnnotations
configmap.reloader.stakater.com/reload: *configMap
rule:
enabled: true
replicas: 1
extraArgs: ["--web.prefix-header=X-Forwarded-Prefix"]
alertmanagersConfig:
value: |-
alertmanagers:
- api_version: v2
static_configs:
- dnssrv+_http-web._tcp.alertmanager-operated.observability.svc.cluster.local
rules:
value: |-
groups:
- name: PrometheusWatcher
rules:
- alert: PrometheusDown
annotations:
summary: A Prometheus has disappeared from Prometheus target discovery
expr: absent(up{job="kube-prometheus-stack-prometheus"})
for: 5m
labels:
severity: critical
persistence: *persistence
storeGateway:
replicas: 1
extraEnv: *extraEnv
extraArgs: ["--index-cache.config=$(THANOS_CACHE_CONFIG)"]
persistence: *persistence
podAnnotations: *podAnnotations

View file

@ -1,13 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ./externalsecret.yaml
configMapGenerator:
- name: thanos-cache-configmap
files:
- cache.yaml=./resources/cache.yml
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,5 +0,0 @@
---
type: REDIS
config:
addr: dragonfly.database.svc.cluster.local:6379
db: 1

View file

@ -1,25 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app thanos
namespace: flux-system
spec:
targetNamespace: observability
commonMetadata:
labels:
app.kubernetes.io/name: *app
dependsOn:
- name: external-secrets-stores
- name: openebs
- name: dragonfly-operator
path: ./kubernetes/apps/observability/thanos/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 15m

View file

@ -1,103 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: vector-agent
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: app-template
version: 3.3.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
dependsOn:
- name: vector-aggregator
namespace: observability
values:
controllers:
vector:
type: daemonset
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
containers:
app:
image:
repository: docker.io/timberio/vector
tag: 0.40.0-alpine@sha256:7a81fdd62e056321055a9e4bdec4073d752ecf68f4c192e676b85001721523c2
env:
PROCFS_ROOT: /host/proc
SYSFS_ROOT: /host/sys
VECTOR_SELF_NODE_NAME:
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
VECTOR_SELF_POD_NAME:
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
VECTOR_SELF_POD_NAMESPACE:
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
args: ["--config", "/etc/vector/vector.yaml"]
securityContext:
privileged: true
serviceAccount:
create: true
name: vector-agent
persistence:
config:
enabled: true
type: configMap
name: vector-agent-configmap
globalMounts:
- path: /etc/vector/vector.yaml
subPath: vector.yaml
readOnly: true
data:
type: emptyDir
globalMounts:
- path: /vector-data-dir
procfs:
type: hostPath
hostPath: /proc
hostPathType: Directory
globalMounts:
- path: /host/proc
readOnly: true
sysfs:
type: hostPath
hostPath: /sys
hostPathType: Directory
globalMounts:
- path: /host/sys
readOnly: true
var-lib:
type: hostPath
hostPath: /var/lib
hostPathType: Directory
globalMounts:
- readOnly: true
var-log:
type: hostPath
hostPath: /var/log
hostPathType: Directory
globalMounts:
- readOnly: true

View file

@ -1,13 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ./rbac.yaml
configMapGenerator:
- name: vector-agent-configmap
files:
- vector.yaml=./resources/vector.yaml
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,22 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: vector-agent
rules:
- apiGroups: [""]
resources: ["namespaces", "nodes", "pods"]
verbs: ["list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: vector-agent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: vector-agent
subjects:
- kind: ServiceAccount
name: vector-agent
namespace: observability

View file

@ -1,25 +0,0 @@
---
data_dir: /vector-data-dir
sources:
kubernetes_source:
type: kubernetes_logs
use_apiserver_cache: true
pod_annotation_fields:
container_image: container_image
container_name: container_name
pod_labels: pod_labels
pod_name: pod_name
pod_annotations: ""
namespace_annotation_fields:
namespace_labels: ""
node_annotation_fields:
node_labels: ""
sinks:
kubernetes:
type: vector
compression: true
version: "2"
address: vector-aggregator.observability.svc.cluster.local:6010
inputs: ["kubernetes_source"]

View file

@ -1,20 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: vector-aggregator
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: vector-aggregator-secret
template:
engineVersion: v2
data:
GEOIPUPDATE_ACCOUNT_ID: "{{ .account_id }}"
GEOIPUPDATE_LICENSE_KEY: "{{ .vector_license_key }}"
dataFrom:
- extract:
key: maxmind

View file

@ -1,91 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app vector-aggregator
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: app-template
version: 3.3.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: rollback
values:
controllers:
vector-aggregator:
replicas: 1
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
initContainers:
init-geoip:
image:
repository: ghcr.io/maxmind/geoipupdate
tag: v7.0.1@sha256:80c57598a9ff552953e499cefc589cfe7b563d64262742ea42f2014251b557b0
env:
GEOIPUPDATE_EDITION_IDS: GeoLite2-City
GEOIPUPDATE_FREQUENCY: "0"
GEOIPUPDATE_VERBOSE: "1"
envFrom:
- secretRef:
name: vector-aggregator-secret
containers:
app:
image:
repository: docker.io/timberio/vector
tag: 0.40.0-alpine@sha256:7a81fdd62e056321055a9e4bdec4073d752ecf68f4c192e676b85001721523c2
args: ["--config", "/etc/vector/vector.yaml"]
pod:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
service:
app:
controller: vector-aggregator
type: LoadBalancer
annotations:
external-dns.alpha.kubernetes.io/hostname: vector.jahanson.tech
io.cilium/lb-ipam-ips: 10.1.1.33
ports:
http:
port: 8686
journald:
port: 6000
kubernetes:
port: 6010
vyos:
port: 6020
persistence:
config:
enabled: true
type: configMap
name: vector-aggregator-configmap
globalMounts:
- path: /etc/vector/vector.yaml
subPath: vector.yaml
readOnly: true
data:
type: emptyDir
globalMounts:
- path: /vector-data-dir
geoip:
type: emptyDir
globalMounts:
- path: /usr/share/GeoIP

View file

@ -1,13 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
configMapGenerator:
- name: vector-aggregator-configmap
files:
- vector.yaml=./resources/vector.yaml
generatorOptions:
disableNameSuffixHash: true

View file

@ -1,132 +0,0 @@
---
data_dir: /vector-data-dir
api:
enabled: true
address: 0.0.0.0:8686
enrichment_tables:
geoip_table:
type: geoip
path: /usr/share/GeoIP/GeoLite2-City.mmdb
#
# Sources
#
sources:
journald_source:
type: vector
version: "2"
address: 0.0.0.0:6000
kubernetes_source:
type: vector
version: "2"
address: 0.0.0.0:6010
vyos_source:
type: syslog
address: 0.0.0.0:6020
mode: tcp
#
# Transforms
#
transforms:
kubernetes_remap:
type: remap
inputs: ["kubernetes_source"]
source: |
# Standardize 'app' index
.custom_app_name = .pod_labels."app.kubernetes.io/name" || .pod_labels.app || .pod_labels."k8s-app" || "unknown"
# Drop pod_labels
del(.pod_labels)
# [63950.153039] [wan-local-default-D]IN=eth4 OUT= MAC=xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx SRC=xxx.xxx.xxx.xxx DST=xxx.xxx.xxx.xxx LEN=40 TOS=0x00 PREC=0x00 TTL=240 ID=60610 PROTO=TCP SPT=53451 DPT=2002 WINDOW=1024 RES=0x00 SYN URGP=0
vyos_firewall_route:
type: route
inputs: ["vyos_source"]
route:
firewall: |
.facility == "kern" && match!(.message, r'^\[(.*?)\].(.*)')
vyos_firewall_remap:
type: remap
inputs: ["vyos_firewall_route.firewall"]
source: |
# Parse firewall rule message
split_message, split_err = parse_regex(.message, r'^\[.*\].\[(?P<rule>.*?)\](?P<fields>.*)')
if split_err != null {
abort
}
# Extract separate fields from message
split_message.fields, split_err = strip_whitespace(split_message.fields)
if split_err != null {
abort
}
.message, parse_err = parse_key_value(split_message.fields, whitespace: "strict")
if parse_err != null {
abort
}
# Add more information about the triggered rule
.message.RULE, parse_err = parse_regex(split_message.rule, r'^ipv4-(?P<from_zone>\w+)-(?P<to_zone>\w+)-(?P<id>\w+)-(?P<action>\w+)$')
if parse_err != null {
abort
}
vyos_firewall_wan_route:
type: route
inputs: ["vyos_firewall_remap"]
route:
from_wan: .message.RULE.from_zone == "wan"
vyos_firewall_geoip_remap:
type: remap
inputs: ["vyos_firewall_wan_route.from_wan"]
source: |
.geoip = get_enrichment_table_record!(
"geoip_table", {
"ip": .message.SRC
}
)
#
# Sinks
#
sinks:
journald:
inputs: ["journald_source"]
type: loki
endpoint: http://loki-gateway.observability.svc.cluster.local
encoding: { codec: json }
out_of_order_action: accept
remove_label_fields: true
remove_timestamp: true
labels:
hostname: '{{ host }}'
kubernetes:
inputs: ["kubernetes_remap"]
type: loki
endpoint: http://loki-gateway.observability.svc.cluster.local
encoding: { codec: json }
out_of_order_action: accept
remove_label_fields: true
remove_timestamp: true
labels:
app: '{{ custom_app_name }}'
namespace: '{{ kubernetes.pod_namespace }}'
node: '{{ kubernetes.pod_node_name }}'
vyos:
inputs: ["vyos_source", "vyos_firewall_geoip_remap"]
type: loki
endpoint: http://loki-gateway.observability.svc.cluster.local
encoding: { codec: json }
out_of_order_action: accept
remove_label_fields: true
remove_timestamp: true
labels:
hostname: '{{ host }}'

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./agent
- ./aggregator

View file

@ -1,9 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./openebs/ks.yaml

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: openebs-system
annotations:
kustomize.toolkit.fluxcd.io/prune: disabled
volsync.backube/privileged-movers: "true"

View file

@ -1,7 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./storageclass.yaml
- ./volumesnapshotclass.yaml

View file

@ -1,16 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-zfs
annotations:
storageclass.kubevirt.io/is-default-virt-class: "true"
storageclass.kubernetes.io/is-default-class: "true"
provisioner: zfs.csi.openebs.io
parameters:
recordsize: "128k"
compression: "off"
dedup: "off"
fstype: "zfs"
poolname: "nahar"
allowVolumeExpansion: true

View file

@ -1,10 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/snapshot.storage.k8s.io/volumesnapshotclass_v1.json
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
metadata:
name: openebs-zfs
annotations:
snapshot.storage.kubernetes.io/is-default-class: "true"
driver: zfs.csi.openebs.io
deletionPolicy: Delete

View file

@ -1,21 +0,0 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: &app openebs
namespace: flux-system
spec:
targetNamespace: openebs-system
commonMetadata:
labels:
app.kubernetes.io/name: *app
path: ./kubernetes/apps/openebs-system/openebs/app
prune: true
sourceRef:
kind: GitRepository
name: homelab
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -1,26 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: rook-ceph-dashboard-password
stringData:
password: ENC[AES256_GCM,data:WWTt7SN6ssndLahsOA1gujEeGAM=,iv:YbHGNN+11wA/MLq9vFVM6v4mhPO58JmwXBDj0Qs7+Wk=,tag:5Xn0tqpiIiEt8ZWZHRTM3w==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1eqlaq205y5jre9hu5hvulywa7w3d4qyxwmafneamxcn7nejesedsf4q9g6
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAzb2ZpaDd0azNHNTJoUTB6
VVpKbm94ZEprSHplb2UrQnkzTzdGUEFjcGxBCnhxR1BwNmFIOExtMW5GRkVJWTl5
blQzSmZ0Tm5CWTk3N25nUUM0dFpKUTQKLS0tIEgwSHNlVXNRdHZvcE10VzExU0hE
L0dGK1lFd0ZSQ0lTcEdMNTBkSDJ6WWsKQuiJmRSLbvmgenlu4F2/CQYCCbZTtS/K
nz7NsY2om+mWMvPSvLAp1pOHDAdFW79ggQAiCyslDi9iOkaD8MOnxQ==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-01-16T23:22:39Z"
mac: ENC[AES256_GCM,data:djsWoz/MuUhEKsM03+iaGV/dZUjRAGkiBEz4hROi+rfNWeHLJG2/xXPSKYYgT3h7JOZGh2Gnz7NXiB7TuixlWrAfT2BUBzd+2o9/hzg3xQzLAjApSfZdyap6oafatKxZAR/JHBSw7s0saVNnop9d/DZK4c1Fb1qNKoTrnWqqrF8=,iv:oitjHdZl07CaoBtNtX/sOPLHu7AS/R4YE4TKBJKrUBw=,tag:Br8mBH+mATEwsLzSZmoVYg==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.8.1

View file

@ -1,6 +0,0 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,27 @@
---
# yaml-language-server: $schema=https://ks.hsn.dev/external-secrets.io/externalsecret_v1beta1.json
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: vault
namespace: security
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: vault-secret
creationPolicy: Owner
data:
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
key: vault
property: AWS_SECRET_ACCESS_KEY
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
key: vault
property: AWS_ACCESS_KEY_ID
- secretKey: VAULT_AWSKMS_SEAL_KEY_ID
remoteRef:
key: vault
property: VAULT_AWSKMS_SEAL_KEY_ID

View file

@ -0,0 +1,141 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: vault
spec:
interval: 30m
chart:
spec:
chart: vault
version: 0.28.1
sourceRef:
kind: HelmRepository
name: hashicorp
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: uninstall
values:
server:
image:
repository: public.ecr.aws/hashicorp/vault
tag: "1.17.5"
logLevel: "info"
logFormat: "json"
ingress:
enabled: true
ingressClassName: internal-nginx
hosts:
- host: &host "vault.jahanson.tech"
paths: []
tls:
- hosts:
- *host
service:
type: "ClusterIP"
port: &port 8200
targetPort: *port
# off until it's online for the first time
readinessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# Port number on which readinessProbe will be checked.
port: *port
extraEnvironmentVars:
# This is required because they will lose their values when the pod is upgraded in my experience.
# Probably a Flux thing.
VAULT_CLUSTER_ADDR: http://$(HOSTNAME).vault-internal:8201
extraSecretEnvironmentVars:
- envName: AWS_SECRET_ACCESS_KEY
secretName: vault-secret
secretKey: AWS_SECRET_ACCESS_KEY
- envName: AWS_ACCESS_KEY_ID
secretName: vault-secret
secretKey: AWS_ACCESS_KEY_ID
- envName: VAULT_AWSKMS_SEAL_KEY_ID
secretName: vault-secret
secretKey: VAULT_AWSKMS_SEAL_KEY_ID
# These are defaults but explicitly set here for clarity.
dataStorage:
size: 4Gi
mountPath: /vault/data
storageClass: ceph-block
auditStorage:
enabled: true
size: 10Gi
mountPath: /vault/audit
storageClass: ceph-block
# We want high availability. If standalone is true it sets the storage backend to file
# and the max replicas can only be 1.
standalone:
enabled: false
ha:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of replicas
# so if you have 6 replicas, maxUnavailable will be 2 unless you set it specifically.
replicas: 3
config: ""
raft:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
# For prometheus!
telemetry {
unauthenticated_metrics_access = "true"
}
}
storage "raft" {
path = "/vault/data"
retry_join {
auto_join = "provider=k8s label_selector=\"app.kubernetes.io/name=vault,component=server\" namespace=\"security\""
auto_join_scheme = "http"
}
}
seal "awskms" {
region = "us-east-2"
}
service_registration "kubernetes" {}
statefulSet:
securityContext:
pod:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups: [10000]
container:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- "ALL"
ui:
enabled: true
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: true
serviceType: "LoadBalancer"
externalPort: *port
targetPort: *port

Some files were not shown because too many files have changed in this diff Show more