Kubernetes bootstrap

This commit is contained in:
Joseph Hanson 2023-08-13 17:13:53 +00:00
parent d25d93469d
commit 15ec3fde97
192 changed files with 19105 additions and 0 deletions

15
.sops.yaml Normal file
View file

@ -0,0 +1,15 @@
---
creation_rules:
- path_regex: kubernetes/.*\.sops\.ya?ml
encrypted_regex: "^(data|stringData)$"
# Valinor
age: >-
age1f5pr5rss0n8z4d6r8kky9umr5g48tt03reuyjushw5kayrpsxvgsw80gsj
- path_regex: .*\.sops\.(env|ini|json|toml)
# Valinor
age: >-
age1f5pr5rss0n8z4d6r8kky9umr5g48tt03reuyjushw5kayrpsxvgsw80gsj
- path_regex: (ansible|terraform)/.*\.sops\.ya?ml
# Valinor
age: >-
age1f5pr5rss0n8z4d6r8kky9umr5g48tt03reuyjushw5kayrpsxvgsw80gsj

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,650 @@
# Copyright 2023 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: cockroach-operator
name: cockroach-operator-sa
namespace: database
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: cockroach-operator-role
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- patch
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- patch
- update
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets/scale
verbs:
- get
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets/status
verbs:
- get
- patch
- update
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
resources:
- jobs/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs/status
verbs:
- get
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/approval
verbs:
- update
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- list
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- delete
- deletecollection
- get
- list
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- patch
- update
- apiGroups:
- crdb.cockroachlabs.com
resources:
- crdbclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- crdb.cockroachlabs.com
resources:
- crdbclusters/finalizers
verbs:
- update
- apiGroups:
- crdb.cockroachlabs.com
resources:
- crdbclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- get
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets/status
verbs:
- get
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- get
- list
- watch
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cockroach-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cockroach-operator-role
subjects:
- kind: ServiceAccount
name: cockroach-operator-sa
namespace: database
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: cockroach-operator
name: cockroach-operator-webhook-service
namespace: database
spec:
ports:
- port: 443
targetPort: 9443
selector:
app: cockroach-operator
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cockroach-operator
name: cockroach-operator-manager
namespace: database
spec:
replicas: 1
selector:
matchLabels:
app: cockroach-operator
template:
metadata:
labels:
app: cockroach-operator
spec:
containers:
- args:
- -zap-log-level
- info
env:
- name: RELATED_IMAGE_COCKROACH_v20_1_4
value: cockroachdb/cockroach:v20.1.4
- name: RELATED_IMAGE_COCKROACH_v20_1_5
value: cockroachdb/cockroach:v20.1.5
- name: RELATED_IMAGE_COCKROACH_v20_1_8
value: cockroachdb/cockroach:v20.1.8
- name: RELATED_IMAGE_COCKROACH_v20_1_11
value: cockroachdb/cockroach:v20.1.11
- name: RELATED_IMAGE_COCKROACH_v20_1_12
value: cockroachdb/cockroach:v20.1.12
- name: RELATED_IMAGE_COCKROACH_v20_1_13
value: cockroachdb/cockroach:v20.1.13
- name: RELATED_IMAGE_COCKROACH_v20_1_15
value: cockroachdb/cockroach:v20.1.15
- name: RELATED_IMAGE_COCKROACH_v20_1_16
value: cockroachdb/cockroach:v20.1.16
- name: RELATED_IMAGE_COCKROACH_v20_1_17
value: cockroachdb/cockroach:v20.1.17
- name: RELATED_IMAGE_COCKROACH_v20_2_0
value: cockroachdb/cockroach:v20.2.0
- name: RELATED_IMAGE_COCKROACH_v20_2_1
value: cockroachdb/cockroach:v20.2.1
- name: RELATED_IMAGE_COCKROACH_v20_2_2
value: cockroachdb/cockroach:v20.2.2
- name: RELATED_IMAGE_COCKROACH_v20_2_3
value: cockroachdb/cockroach:v20.2.3
- name: RELATED_IMAGE_COCKROACH_v20_2_4
value: cockroachdb/cockroach:v20.2.4
- name: RELATED_IMAGE_COCKROACH_v20_2_5
value: cockroachdb/cockroach:v20.2.5
- name: RELATED_IMAGE_COCKROACH_v20_2_6
value: cockroachdb/cockroach:v20.2.6
- name: RELATED_IMAGE_COCKROACH_v20_2_8
value: cockroachdb/cockroach:v20.2.8
- name: RELATED_IMAGE_COCKROACH_v20_2_9
value: cockroachdb/cockroach:v20.2.9
- name: RELATED_IMAGE_COCKROACH_v20_2_10
value: cockroachdb/cockroach:v20.2.10
- name: RELATED_IMAGE_COCKROACH_v20_2_11
value: cockroachdb/cockroach:v20.2.11
- name: RELATED_IMAGE_COCKROACH_v20_2_12
value: cockroachdb/cockroach:v20.2.12
- name: RELATED_IMAGE_COCKROACH_v20_2_13
value: cockroachdb/cockroach:v20.2.13
- name: RELATED_IMAGE_COCKROACH_v20_2_14
value: cockroachdb/cockroach:v20.2.14
- name: RELATED_IMAGE_COCKROACH_v20_2_15
value: cockroachdb/cockroach:v20.2.15
- name: RELATED_IMAGE_COCKROACH_v20_2_16
value: cockroachdb/cockroach:v20.2.16
- name: RELATED_IMAGE_COCKROACH_v20_2_17
value: cockroachdb/cockroach:v20.2.17
- name: RELATED_IMAGE_COCKROACH_v20_2_18
value: cockroachdb/cockroach:v20.2.18
- name: RELATED_IMAGE_COCKROACH_v20_2_19
value: cockroachdb/cockroach:v20.2.19
- name: RELATED_IMAGE_COCKROACH_v21_1_0
value: cockroachdb/cockroach:v21.1.0
- name: RELATED_IMAGE_COCKROACH_v21_1_1
value: cockroachdb/cockroach:v21.1.1
- name: RELATED_IMAGE_COCKROACH_v21_1_2
value: cockroachdb/cockroach:v21.1.2
- name: RELATED_IMAGE_COCKROACH_v21_1_3
value: cockroachdb/cockroach:v21.1.3
- name: RELATED_IMAGE_COCKROACH_v21_1_4
value: cockroachdb/cockroach:v21.1.4
- name: RELATED_IMAGE_COCKROACH_v21_1_5
value: cockroachdb/cockroach:v21.1.5
- name: RELATED_IMAGE_COCKROACH_v21_1_6
value: cockroachdb/cockroach:v21.1.6
- name: RELATED_IMAGE_COCKROACH_v21_1_7
value: cockroachdb/cockroach:v21.1.7
- name: RELATED_IMAGE_COCKROACH_v21_1_9
value: cockroachdb/cockroach:v21.1.9
- name: RELATED_IMAGE_COCKROACH_v21_1_10
value: cockroachdb/cockroach:v21.1.10
- name: RELATED_IMAGE_COCKROACH_v21_1_11
value: cockroachdb/cockroach:v21.1.11
- name: RELATED_IMAGE_COCKROACH_v21_1_12
value: cockroachdb/cockroach:v21.1.12
- name: RELATED_IMAGE_COCKROACH_v21_1_13
value: cockroachdb/cockroach:v21.1.13
- name: RELATED_IMAGE_COCKROACH_v21_1_14
value: cockroachdb/cockroach:v21.1.14
- name: RELATED_IMAGE_COCKROACH_v21_1_15
value: cockroachdb/cockroach:v21.1.15
- name: RELATED_IMAGE_COCKROACH_v21_1_16
value: cockroachdb/cockroach:v21.1.16
- name: RELATED_IMAGE_COCKROACH_v21_1_17
value: cockroachdb/cockroach:v21.1.17
- name: RELATED_IMAGE_COCKROACH_v21_1_18
value: cockroachdb/cockroach:v21.1.18
- name: RELATED_IMAGE_COCKROACH_v21_1_19
value: cockroachdb/cockroach:v21.1.19
- name: RELATED_IMAGE_COCKROACH_v21_1_20
value: cockroachdb/cockroach:v21.1.20
- name: RELATED_IMAGE_COCKROACH_v21_1_21
value: cockroachdb/cockroach:v21.1.21
- name: RELATED_IMAGE_COCKROACH_v21_2_0
value: cockroachdb/cockroach:v21.2.0
- name: RELATED_IMAGE_COCKROACH_v21_2_1
value: cockroachdb/cockroach:v21.2.1
- name: RELATED_IMAGE_COCKROACH_v21_2_2
value: cockroachdb/cockroach:v21.2.2
- name: RELATED_IMAGE_COCKROACH_v21_2_3
value: cockroachdb/cockroach:v21.2.3
- name: RELATED_IMAGE_COCKROACH_v21_2_4
value: cockroachdb/cockroach:v21.2.4
- name: RELATED_IMAGE_COCKROACH_v21_2_5
value: cockroachdb/cockroach:v21.2.5
- name: RELATED_IMAGE_COCKROACH_v21_2_7
value: cockroachdb/cockroach:v21.2.7
- name: RELATED_IMAGE_COCKROACH_v21_2_8
value: cockroachdb/cockroach:v21.2.8
- name: RELATED_IMAGE_COCKROACH_v21_2_9
value: cockroachdb/cockroach:v21.2.9
- name: RELATED_IMAGE_COCKROACH_v21_2_10
value: cockroachdb/cockroach:v21.2.10
- name: RELATED_IMAGE_COCKROACH_v21_2_11
value: cockroachdb/cockroach:v21.2.11
- name: RELATED_IMAGE_COCKROACH_v21_2_12
value: cockroachdb/cockroach:v21.2.12
- name: RELATED_IMAGE_COCKROACH_v21_2_13
value: cockroachdb/cockroach:v21.2.13
- name: RELATED_IMAGE_COCKROACH_v21_2_14
value: cockroachdb/cockroach:v21.2.14
- name: RELATED_IMAGE_COCKROACH_v21_2_15
value: cockroachdb/cockroach:v21.2.15
- name: RELATED_IMAGE_COCKROACH_v21_2_16
value: cockroachdb/cockroach:v21.2.16
- name: RELATED_IMAGE_COCKROACH_v21_2_17
value: cockroachdb/cockroach:v21.2.17
- name: RELATED_IMAGE_COCKROACH_v22_1_0
value: cockroachdb/cockroach:v22.1.0
- name: RELATED_IMAGE_COCKROACH_v22_1_1
value: cockroachdb/cockroach:v22.1.1
- name: RELATED_IMAGE_COCKROACH_v22_1_2
value: cockroachdb/cockroach:v22.1.2
- name: RELATED_IMAGE_COCKROACH_v22_1_3
value: cockroachdb/cockroach:v22.1.3
- name: RELATED_IMAGE_COCKROACH_v22_1_4
value: cockroachdb/cockroach:v22.1.4
- name: RELATED_IMAGE_COCKROACH_v22_1_5
value: cockroachdb/cockroach:v22.1.5
- name: RELATED_IMAGE_COCKROACH_v22_1_7
value: cockroachdb/cockroach:v22.1.7
- name: RELATED_IMAGE_COCKROACH_v22_1_8
value: cockroachdb/cockroach:v22.1.8
- name: RELATED_IMAGE_COCKROACH_v22_1_10
value: cockroachdb/cockroach:v22.1.10
- name: RELATED_IMAGE_COCKROACH_v22_1_11
value: cockroachdb/cockroach:v22.1.11
- name: RELATED_IMAGE_COCKROACH_v22_1_12
value: cockroachdb/cockroach:v22.1.12
- name: RELATED_IMAGE_COCKROACH_v22_1_13
value: cockroachdb/cockroach:v22.1.13
- name: RELATED_IMAGE_COCKROACH_v22_1_14
value: cockroachdb/cockroach:v22.1.14
- name: RELATED_IMAGE_COCKROACH_v22_1_15
value: cockroachdb/cockroach:v22.1.15
- name: RELATED_IMAGE_COCKROACH_v22_1_16
value: cockroachdb/cockroach:v22.1.16
- name: RELATED_IMAGE_COCKROACH_v22_1_18
value: cockroachdb/cockroach:v22.1.18
- name: RELATED_IMAGE_COCKROACH_v22_1_20
value: cockroachdb/cockroach:v22.1.20
- name: RELATED_IMAGE_COCKROACH_v22_2_0
value: cockroachdb/cockroach:v22.2.0
- name: RELATED_IMAGE_COCKROACH_v22_2_1
value: cockroachdb/cockroach:v22.2.1
- name: RELATED_IMAGE_COCKROACH_v22_2_2
value: cockroachdb/cockroach:v22.2.2
- name: RELATED_IMAGE_COCKROACH_v22_2_3
value: cockroachdb/cockroach:v22.2.3
- name: RELATED_IMAGE_COCKROACH_v22_2_4
value: cockroachdb/cockroach:v22.2.4
- name: RELATED_IMAGE_COCKROACH_v22_2_5
value: cockroachdb/cockroach:v22.2.5
- name: RELATED_IMAGE_COCKROACH_v22_2_6
value: cockroachdb/cockroach:v22.2.6
- name: RELATED_IMAGE_COCKROACH_v22_2_7
value: cockroachdb/cockroach:v22.2.7
- name: RELATED_IMAGE_COCKROACH_v22_2_8
value: cockroachdb/cockroach:v22.2.8
- name: RELATED_IMAGE_COCKROACH_v22_2_9
value: cockroachdb/cockroach:v22.2.9
- name: RELATED_IMAGE_COCKROACH_v22_2_10
value: cockroachdb/cockroach:v22.2.10
- name: RELATED_IMAGE_COCKROACH_v23_1_0
value: cockroachdb/cockroach:v23.1.0
- name: RELATED_IMAGE_COCKROACH_v23_1_1
value: cockroachdb/cockroach:v23.1.1
- name: RELATED_IMAGE_COCKROACH_v23_1_2
value: cockroachdb/cockroach:v23.1.2
- name: RELATED_IMAGE_COCKROACH_v23_1_3
value: cockroachdb/cockroach:v23.1.3
- name: RELATED_IMAGE_COCKROACH_v23_1_4
value: cockroachdb/cockroach:v23.1.4
- name: OPERATOR_NAME
value: cockroachdb
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: cockroachdb/cockroach-operator:v2.11.0@sha256:be00a73a683e860bbcdca3998168f1f512fe300cd9c2e2bcaa88dab8a3afe85b
imagePullPolicy: IfNotPresent
name: cockroach-operator
resources:
requests:
cpu: 10m
memory: 32Mi
serviceAccountName: cockroach-operator-sa
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
name: cockroach-operator-mutating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cockroach-operator-webhook-service
namespace: database
path: /mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
failurePolicy: Fail
name: mcrdbcluster.kb.io
rules:
- apiGroups:
- crdb.cockroachlabs.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- crdbclusters
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
name: cockroach-operator-validating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cockroach-operator-webhook-service
namespace: database
path: /validate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
failurePolicy: Fail
name: vcrdbcluster.kb.io
rules:
- apiGroups:
- crdb.cockroachlabs.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- crdbclusters
sideEffects: None

View file

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: database
resources:
- ./crdb-crd.yaml
- ./crdb-operator.yaml

View file

@ -0,0 +1,14 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-database-cockroachdb
namespace: flux-system
spec:
interval: 30m
path: "./kubernetes/valinor/apps/database/cockroachdb/app"
prune: false
sourceRef:
kind: GitRepository
name: valinor
wait: false # no flux ks dependents

View file

@ -0,0 +1,46 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager
namespace: cert-manager
spec:
interval: 30m
chart:
spec:
chart: cert-manager
version: v1.12.3
sourceRef:
kind: HelmRepository
name: jetstack
namespace: flux-system
interval: 30m
install:
crds: CreateReplace
upgrade:
crds: CreateReplace
values:
installCRDs: true
webhook:
enabled: true
extraArgs:
- --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53
- --dns01-recursive-nameservers-only
- --enable-certificate-owner-ref
replicaCount: 1
podDnsPolicy: "None"
podDnsConfig:
nameservers:
- "1.1.1.1"
- "9.9.9.9"
prometheus:
enabled: true
servicemonitor:
enabled: true
prometheusInstance: monitoring

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cert-manager
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,22 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: dnsimple-api-token
namespace: cert-manager
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: dnsimple-api-token
creationPolicy: Owner
data:
- secretKey: api-token
remoteRef:
key: DNSimple
property: cert-manager
- secretKey: letsencrypt-email
remoteRef:
key: DNSimple
property: letsencrypt-email

View file

@ -0,0 +1,34 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: dnsimple-issuer
namespace: cert-manager
spec:
interval: 30m
chart:
spec:
chart: cert-manager-webhook-dnsimple
version: 0.0.6
interval: 30m
sourceRef:
kind: HelmRepository
name: jahanson
namespace: flux-system
values:
controller:
annotations:
reloader.stakater.com/auto: "true"
dnsimple:
token:
valueFrom:
secretKeyRef:
name: dnsimple-api-token
key: api-token
clusterIssuer:
email:
valueFrom:
secretKeyRef:
name: dnsimple-api-token
key: letsencrypt-email

View file

@ -0,0 +1,21 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
email: "joe@veri.dev"
preferredChain: ""
privateKeySecretRef:
name: letsencrypt-production
server: https://acme-v02.api.letsencrypt.org/directory
solvers:
- dns01:
webhook:
config:
tokenSecretRef:
key: api-token
name: dnsimple-api-token
solverName: dnsimple
groupName: acme.jahanson.com

View file

@ -0,0 +1,20 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
preferredChain: ""
privateKeySecretRef:
name: letsencrypt-staging
server: https://acme-staging-v02.api.letsencrypt.org/directory
solvers:
- dns01:
webhook:
config:
tokenSecretRef:
key: api-token
name: dnsimple-api-token
solverName: dnsimple
groupName: acme.jahanson.com

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cert-manager
resources:
- ./externalsecret.yaml
- ./issuer-letsencrypt-prod.yaml
- ./issuer-letsencrypt-staging.yaml
- ./helmrelease.yaml

View file

@ -0,0 +1,30 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cert-manager
namespace: flux-system
spec:
interval: 10m
path: "./kubernetes/valinor/apps/cert-manager/cert-manager/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cert-manager-issuers
namespace: flux-system
spec:
interval: 10m
path: "./kubernetes/valinor/apps/cert-manager/cert-manager/issuers"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-cert-manager

View file

@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./cert-manager/ks.yaml

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,35 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: cloudnative-pg
namespace: fediverse
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: cloudnative-pg-secret
creationPolicy: Owner
template:
engineVersion: v2
metadata:
labels:
cnpg.io/reload: "true"
data:
- secretKey: username
remoteRef:
key: cloudnative-pg
property: POSTGRES_SUPER_USER
- secretKey: password
remoteRef:
key: cloudnative-pg
property: POSTGRES_SUPER_PASS
- secretKey: aws-access-key-id
remoteRef:
key: cloudnative-pg
property: AWS_ACCESS_KEY_ID
- secretKey: aws-secret-access-key
remoteRef:
key: cloudnative-pg
property: AWS_SECRET_ACCESS_KEY

View file

@ -0,0 +1,33 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cloudnative-pg
namespace: fediverse
spec:
interval: 30m
chart:
spec:
chart: cloudnative-pg
version: 0.18.2
sourceRef:
kind: HelmRepository
name: cloudnative-pg
namespace: flux-system
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
crds:
create: true
config:
data:
INHERITED_ANNOTATIONS: kyverno.io/ignore

View file

@ -0,0 +1,17 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
configMapGenerator:
- name: cloudnative-pg-dashboard
files:
- cloudnative-pg-dashboard.json=https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/grafana-dashboard.json
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled
labels:
grafana_dashboard: "true"

View file

@ -0,0 +1,42 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres
namespace: fediverse
annotations:
kyverno.io/ignore: "true"
spec:
instances: 3
imageName: ghcr.io/cloudnative-pg/postgresql:14.8-6
inheritedMetadata:
labels:
kube-image-keeper.enix.io/image-caching-policy: ignore
primaryUpdateStrategy: unsupervised
storage:
size: 60Gi
storageClass: ceph-block
superuserSecret:
name: cloudnative-pg-secret
postgresql:
parameters:
max_connections: "600"
shared_buffers: 512MB
monitoring:
enablePodMonitor: true
backup:
retentionPolicy: 30d
barmanObjectStore:
wal:
compression: bzip2
maxParallel: 8
destinationPath: s3://valinor-cnpg/
endpointURL: https://${SECRET_CLOUDFLARE_ACCOUNT_ID}.r2.cloudflarestorage.com
serverName: postgres-v3
s3Credentials:
accessKeyId:
name: cloudnative-pg-secret
key: aws-access-key-id
secretAccessKey:
name: cloudnative-pg-secret
key: aws-secret-access-key

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./cluster.yaml
- ./scheduledbackup.yaml
- ./prometheusrule.yaml
# - ./service.yaml

View file

@ -0,0 +1,67 @@
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: cloudnative-pg-rules
namespace: fediverse
labels:
prometheus: k8s
role: alert-rules
spec:
groups:
- name: cloudnative-pg.rules
rules:
- alert: LongRunningTransaction
annotations:
description: Pod {{ $labels.pod }} is taking more than 5 minutes (300 seconds) for a query.
summary: A query is taking longer than 5 minutes.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
- alert: BackendsWaiting
annotations:
description: Pod {{ $labels.pod }} has been waiting for longer than 5 minutes
summary: If a backend is waiting for longer than 5 minutes
expr: |-
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
- alert: PGDatabase
annotations:
description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }}
summary: Number of transactions from the frozen XID to the current one
expr: |-
cnpg_pg_database_xid_age > 150000000
for: 1m
labels:
severity: warning
- alert: PGReplication
annotations:
description: Standby is lagging behind by over 300 seconds (5 minutes)
summary: The standby is lagging behind the primary
expr: |-
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
- alert: LastFailedArchiveTime
annotations:
description: Archiving failed for {{ $labels.pod }}
summary: Checks the last time archiving failed. Will be -1 when it has not failed.
expr: |-
delta(cnpg_pg_stat_archiver_last_failed_time[5m]) > 0
for: 1m
labels:
severity: warning
- alert: DatabaseDeadlockConflicts
annotations:
description: There are over 10 deadlock conflicts in {{ $labels.pod }}
summary: Checks the number of database conflicts
expr: |-
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning

View file

@ -0,0 +1,12 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: postgres
namespace: fediverse
spec:
schedule: "@weekly"
immediate: true
backupOwnerReference: self
cluster:
name: postgres

View file

@ -0,0 +1,35 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cloudnative-pg
namespace: flux-system
spec:
path: ./kubernetes/valinor/apps/database/cloudnative-pg/app
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
interval: 30m
retryInterval: 1m
timeout: 5m
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cloudnative-pg-cluster
namespace: flux-system
spec:
dependsOn:
- name: cluster-apps-cloudnative-pg
- name: cluster-apps-external-secrets-stores
path: ./kubernetes/valinor/apps/database/cloudnative-pg/cluster
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -0,0 +1,59 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: dragonfly-valinor
namespace: fediverse
spec:
interval: 30m
chart:
spec:
chart: dragonfly
version: v1.7.1
interval: 30m
sourceRef:
kind: HelmRepository
name: dragonflydb
namespace: flux-system
values:
replicaCount: 1
controller:
annotations:
reloader.stakater.com/auto: "true"
podSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- 65539
storage:
enabled: true
requests: 128Mi # Set as desired
resources:
requests:
cpu: 1000m
memory: 6Gi
limits:
memory: 8Gi
extraArgs:
- --dbfilename=dump
- --save_schedule=*:* # HH:MM glob format
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
spec:
- alert: DragonflyMissing
expr: absent(dragonfly_uptime_in_seconds) == 1
for: 0m
labels:
severity: critical
annotations:
summary: Dragonfly is missing
description: "Dragonfly is missing"

View file

@ -0,0 +1,16 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml
configMapGenerator:
- name: dragonflydb-dashboard
files:
- dragonflydb-dashboard.json=https://raw.githubusercontent.com/dragonflydb/dragonfly/main/tools/local/monitoring/grafana/provisioning/dashboards/dashboard.json
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled
labels:
grafana_dashboard: "true"

View file

@ -0,0 +1,14 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-database-dragonflydb
namespace: flux-system
spec:
interval: 30m
path: "./kubernetes/valinor/apps/database/dragonflydb/app"
prune: false
sourceRef:
kind: GitRepository
name: valinor
wait: false # no flux ks dependents

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./cloudnative-pg/ks.yaml
- ./dragonflydb/ks.yaml

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: database
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: rocky-linux
namespace: default
spec:
containers:
- name: rocky
image: rockylinux:9
command: ["/bin/bash", "-c", "while true; do sleep 10; done"]
resources:
requests:
cpu: 50m
memory: 443M
limits:
cpu: 323m
memory: 886M

View file

@ -0,0 +1,42 @@
apiVersion: v1
kind: Pod
metadata:
name: ubuntu-server
namespace: default
spec:
# serviceAccount: tailscale
containers:
- name: ubuntu
image: ubuntu:latest@sha256:0bced47fffa3361afa981854fcabcd4577cd43cebbb808cea2b1f33a3dd7f508
command: ["/bin/bash", "-c", "while true; do sleep 10; done"]
resources:
requests:
cpu: 50m
memory: 443M
limits:
cpu: 323m
memory: 886M
# - name: tailscale
# imagePullPolicy: Always
# image: "ghcr.io/tailscale/tailscale:v1.42.0"
# env:
# - name: TS_KUBE_SECRET
# value: "tailscale-state"
# - name: TS_USERSPACE
# value: "false"
# - name: TS_EXTRA_ARGS
# value: "--accept-routes"
# envFrom:
# - secretRef:
# name: tailscale-auth
# resources:
# requests:
# cpu: 50m
# memory: 50Mi
# limits:
# cpu: 100m
# memory: 128Mi
# securityContext:
# capabilities:
# add:
# - NET_ADMIN

View file

@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./operator/ks.yaml

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: elastic
labels:
goldilocks.fairwinds.com/enabled: "true"
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,25 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: eck-operator
namespace: elastic
spec:
releaseName: eck-operator
interval: 1h
chart:
spec:
chart: eck-operator
version: 2.9.0
sourceRef:
kind: HelmRepository
name: elastic
namespace: flux-system
interval: 1h
install:
createNamespace: true
upgrade:
crds: CreateReplace
values:
config:
logVerbosity: "-1"

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: elastic
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,21 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
kind: Kustomization
metadata:
name: cluster-apps-elastic-operator
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/valinor/apps/elastic/operator/app
prune: false
sourceRef:
kind: GitRepository
name: valinor
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: eck-operator
namespace: elastic
interval: 30m
retryInterval: 1m
timeout: 3m

View file

@ -0,0 +1,33 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: elk
namespace: fediverse
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: elk-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
NUXT_CLOUDFLARE_API_TOKEN: "{{ .cloudflare_kv_storage_apikey }}"
NUXT_CLOUDFLARE_ACCOUNT_ID: "{{ .cloudflare_account_id }}"
NUXT_CLOUDFLARE_NAMESPACE_ID: "{{ .cloudflare_kv_storage_namespace }}"
NUXT_STORAGE_DRIVER: "cloudflare"
data:
- secretKey: cloudflare_kv_storage_apikey
remoteRef:
key: cloudflare
property: elk_kv_storage_apikey
- secretKey: cloudflare_account_id
remoteRef:
key: cloudflare
property: account_id
- secretKey: cloudflare_kv_storage_namespace
remoteRef:
key: cloudflare
property: elk_kv_storage_namespace

View file

@ -0,0 +1,75 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: elk
namespace: fediverse
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
annotations:
reloader.stakater.com/auto: "true"
image:
repository: ghcr.io/elk-zone/elk
tag: v0.10.0@sha256:8cbc1d4627de0f9c81e1fa08bcbff251d42b43ea36f443e748eda25cdef1ee23
env:
NUXT_PUBLIC_DEFAULT_SERVER: "valinor.social"
# envFrom:
# - secretRef:
# name: elk-secret
podSecurityContext:
runAsUser: 911
runAsGroup: 911
fsGroup: 911
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- 65539
service:
main:
ports:
http:
port: 5314
ingress:
main:
enabled: true
ingressClassName: "nginx"
annotations:
external-dns.alpha.kubernetes.io/target: ingress.valinor.social
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: &host "elk.valinor.social"
paths:
- path: /
tls:
- hosts:
- *host
persistence:
config:
enabled: true
existingClaim: elk-config
mountPath: /elk/data
resources:
requests:
cpu: 15m
memory: 64M
limits:
cpu: 46m
memory: 154M

View file

@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml
- ./externalsecret.yaml
- ./pvc.yaml

View file

@ -0,0 +1,15 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elk-config
namespace: fediverse
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 256Mi
storageClassName: ceph-block

View file

@ -0,0 +1,18 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-elk
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/elk/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-external-secrets-stores

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./elk/ks.yaml
- ./mastodon/ks.yaml

View file

@ -0,0 +1,15 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
configMapGenerator:
# Ref: https://grafana.com/grafana/dashboards/17492
- name: mastodon-dashboard
files:
- mastodon-stats_rev5.json
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled
labels:
grafana_dashboard: "true"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./dashboard

View file

@ -0,0 +1,41 @@
---
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: mastodon
namespace: fediverse
spec:
version: 7.17.9
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: default
count: 2
config:
node.store.allow_mmap: false
xpack.security.authc:
anonymous:
username: anonymous
roles: superuser
authz_exception: false
podTemplate:
spec:
containers:
- name: elasticsearch
resources:
requests:
cpu: 62m
memory: 512Mi
limits:
memory: 8Gi
volumeClaimTemplates:
- metadata:
name: elasticsearch-data # Do not change this name unless you set up a volume mount for the data path.
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi

View file

@ -0,0 +1,86 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: mastodon
namespace: fediverse
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: mastodon-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
DB_SSLMODE: "require"
LOCAL_DOMAIN: "valinor.social"
SINGLE_USER_MODE: "false"
SECRET_KEY_BASE: "{{ .mastodon_secret_key_base }}"
OTP_SECRET: "{{ .mastodon_otp_secret }}"
VAPID_PRIVATE_KEY: "{{ .mastodon_vapid_private_key }}"
VAPID_PUBLIC_KEY: "{{ .mastodon_vapid_public_key }}"
DB_HOST: "{{ .mastodon_db_host }}"
DB_USER: "{{ .mastodon_db_user }}"
DB_PORT: "{{ .mastodon_db_port }}"
DB_PASS: "{{ .mastodon_db_pass }}"
REDIS_URL: "{{ .mastodon_redis_url }}"
S3_ENABLED: "true"
S3_PROTOCOL: "https"
S3_ENDPOINT: "{{ .s3_valinor_endpoint }}"
S3_HOSTNAME: "{{ .s3_valinor_hostname }}"
S3_BUCKET: "{{ .s3_valinor_bucket }}"
S3_ALIAS_HOST: "{{ .mastodon_s3_alias_host }}"
S3_PERMISSION: "private"
AWS_ACCESS_KEY_ID: "{{ .s3_valinor_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ .s3_valinor_secret_key }}"
SMTP_SERVER: "smtp.mailgun.org"
SMTP_PORT: "587"
SMTP_LOGIN: "{{ .mailgun_smtp_user }}"
SMTP_PASSWORD: "{{ .mailgun_smtp_password }}"
SMTP_AUTH_METHOD: "plain"
SMTP_OPENSSL_VERIFY_MODE: "peer"
SMTP_ENABLE_STARTTLS: "auto"
SMTP_FROM_ADDRESS: "Mastodon <notifications@valinor.social>"
DB_POOL: "25"
DEEPL_PLAN: "free"
DEEPL_API_KEY: "{{ .deepl_api_key }}"
ES_ENABLED: "{{ .mastodon_es_enabled }}"
ES_HOST: "{{ .mastodon_es_host }}"
ES_PORT: "{{ .mastodon_es_port }}"
STATSD_ADDR: "statsd-exporter.fediverse.svc.cluster.local:9125"
CP_DB_PORT: "{{ .mastodon_cp_db_port }}"
CP_SIDEKIQ_LOW_VOLUME: "{{ .mastodon_db_name_cp_sidekiq_low_volume }}"
CP_SIDEKIQ_HIGH_PRIORITY: "{{ .mastodon_db_name_cp_sidekiq_high_priority }}"
CP_SIDEKIQ_INGRESS: "{{ .mastodon_db_name_cp_sidekiq_ingress }}"
CP_SIDEKIQ_PULL: "{{ .mastodon_db_name_cp_sidekiq_pull }}"
CP_MASTODON_WEB: "{{ .mastodon_db_name_cp_mastodon_web }}"
CP_MASTODON_STREAMING: "{{ .mastodon_db_name_cp_mastodon_streaming }}"
dataFrom:
- extract:
key: s3
rewrite:
- regexp:
source: "(.*)"
target: "s3_$1"
- extract:
key: mastodon
rewrite:
- regexp:
source: "(.*)"
target: "mastodon_$1"
data:
- secretKey: mailgun_smtp_user
remoteRef:
key: mailgun
property: mastodon_smtp_user
- secretKey: mailgun_smtp_password
remoteRef:
key: mailgun
property: mastodon_smtp_password
- secretKey: deepl_api_key
remoteRef:
key: deepl
property: api_key

View file

@ -0,0 +1,31 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mastodon-ingress
namespace: fediverse
annotations:
nginx.org/websocket-services: "mastodon-streaming"
spec:
ingressClassName: "nginx"
tls:
- hosts:
- &host valinor.social
rules:
- host: *host
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mastodon-web
port:
number: 3000
- path: /api/v1/streaming
pathType: Prefix
backend:
service:
name: mastodon-streaming
port:
number: 4000

View file

@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./ingress.yaml
- ./externalsecret.yaml
- ./elasticsearch.yaml

View file

@ -0,0 +1,51 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app mastodon-sidekiq-high-priority
namespace: fediverse
labels:
app: mastodon
component: sidekiq
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 1 #Cannot be more than one unless you split out scheduler.
strategy: Recreate #We don't want one to exist while the other is being created.
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: sidekiq
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command: ["bundle", "exec", "sidekiq", "-c", "25", "-q", "default"]
envFrom:
- secretRef:
name: mastodon-secret
env:
DB_NAME: "$(CP_SIDEKIQ_HIGH_PRIORITY)"
DB_PORT: "${CP_DB_PORT}"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
service:
main:
enabled: false
# nodeSelector:
# doks.digitalocean.com/node-pool: pool-valinor
resources:
requests:
cpu: 1000m
memory: 250M
limits:
cpu: 3000m
memory: 972M

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,63 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app mastodon-sidekiq-low-volume
namespace: fediverse
labels:
app: mastodon
component: sidekiq
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 1
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: sidekiq
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command:
[
"bundle",
"exec",
"sidekiq",
"-c",
"25",
"-q",
"push",
"-q",
"mailers",
"-q",
"scheduler",
]
envFrom:
- secretRef:
name: mastodon-secret
env:
DB_NAME: "$(CP_SIDEKIQ_LOW_VOLUME)"
DB_PORT: "${CP_DB_PORT}"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
service:
main:
enabled: false
# nodeSelector:
# doks.digitalocean.com/node-pool: pool-valinor
resources:
requests:
cpu: 500m
memory: 250M
limits:
memory: 972M

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,51 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app mastodon-sidekiq-remote-ingress
namespace: fediverse
labels:
app: mastodon
component: sidekiq
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 1
# My default nodes have 2cpus so it seems easier to scale nodes in the panel
# rather than messing with tolerations and affinitie. The only reason I can
# think of to scale nodes is to scale sidekiq.
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: sidekiq
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command: ["bundle", "exec", "sidekiq", "-c", "25", "-q", "ingress"]
envFrom:
- secretRef:
name: mastodon-secret
env:
DB_NAME: "$(CP_SIDEKIQ_INGRESS)"
DB_PORT: "${CP_DB_PORT}"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
service:
main:
enabled: false
# nodeSelector:
# doks.digitalocean.com/node-pool: pool-sidekiq
resources:
requests:
cpu: 2000m
memory: 800Mi

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,48 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app mastodon-sidekiq-remote-pull
namespace: fediverse
labels:
app: mastodon
component: sidekiq
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 1
strategy: RollingUpdate
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: sidekiq
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command: ["bundle", "exec", "sidekiq", "-c", "25", "-q", "pull"]
envFrom:
- secretRef:
name: mastodon-secret
env:
DB_NAME: "$(CP_SIDEKIQ_PULL)"
DB_PORT: "${CP_DB_PORT}"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
service:
main:
enabled: false
# nodeSelector:
# doks.digitalocean.com/node-pool: pool-sidekiq
resources:
requests:
cpu: 2000m
memory: 800Mi

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,75 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: &app mastodon-streaming
namespace: fediverse
labels:
app: mastodon
component: streaming
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 2
strategy: Recreate
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: streaming
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command: [
"node",
"./streaming"
]
envFrom:
- secretRef:
name: mastodon-secret
env:
NODE_TLS_REJECT_UNAUTHORIZED: "0"
DB_SSLMODE: "no-verify"
DB_NAME: "$(CP_MASTODON_STREAMING)"
DB_PORT: "$(DB_PORT)"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
livenessProbe:
httpGet:
path: /api/v1/streaming/health
port: streaming
readinessProbe:
httpGet:
path: /api/v1/streaming/health
port: streaming
resources:
requests:
cpu: 15m
memory: 50Mi
# limits:
# cpu: 500m
# memory: 250Mi
service:
main:
ports:
http:
enabled: false
streaming:
port: 4000
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values: ["mastodon-streaming"]
topologyKey: kubernetes.io/hostname

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,77 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: mastodon-web
namespace: fediverse
labels:
app: mastodon
component: web
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
replicas: 2
strategy: Recreate
annotations:
reloader.stakater.com/auto: "true"
labels:
app: mastodon
component: web
image:
repository: ghcr.io/mastodon/mastodon
tag: v4.1.6@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command:
- bundle
- exec
- puma
- -C
- config/puma.rb
envFrom:
- secretRef:
name: mastodon-secret
env:
DB_NAME: "$(CP_MASTODON_WEB)"
DB_PORT: "$(DB_PORT)"
PREPARED_STATEMENTS: "false" # neccessary for pgbouncer
resources:
requests:
cpu: 15m
memory: 500Mi
# limits:
# memory: 1Gi
service:
main:
ports:
http:
port: 3000
livenessProbe:
tcpSocket:
port: http
readinessProbe:
httpGet:
path: /health
port: http
startupProbe:
httpGet:
path: /health
port: http
failureThreshold: 30
periodSeconds: 5
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values: ["mastodon-web"]
topologyKey: kubernetes.io/hostname

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,56 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: statsd-exporter
namespace: fediverse
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
annotations:
reloader.stakater.com/auto: "true"
command: ["/bin/sh", "-c", "statsd_exporter --statsd.mapping-config=/config/mastodon-mapping.yaml"]
image:
repository: docker.io/prom/statsd-exporter
tag: v0.24.0@sha256:61d866e93b56c7d5c69ae5ba5ce4f8a16a98f4b13985ad3385bd8e0b2371126e
service:
main:
ports:
http:
port: 9125
enabled: true
primary: true
protocol: TCP
api:
enabled: true
port: 9102
persistence:
config:
enabled: true
type: configMap
name: statsd-configmap
subPath: mastodon-mapping.yaml
mountPath: /config/mastodon-mapping.yaml
readOnly: true
resources:
requests:
cpu: 15m
memory: 64M
limits:
cpu: 46m
memory: 270M

View file

@ -0,0 +1,12 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: fediverse
resources:
- ./helmrelease.yaml
configMapGenerator:
- name: statsd-configmap
files:
- mappings/mastodon-mapping.yaml
generatorOptions:
disableNameSuffixHash: true

View file

@ -0,0 +1,95 @@
## Prometheus Statsd Exporter mapping for Mastodon 4.0+
##
## Version 1.0, November 2022
##
## Documentation: https://ipng.ch/s/articles/2022/11/27/mastodon-3.html
mappings:
## Web collector
- match: Mastodon\.production\.web\.(.+)\.(.+)\.(.+)\.status\.(.+)
match_type: regex
name: "mastodon_controller_status"
labels:
controller: $1
action: $2
format: $3
status: $4
mastodon: "web"
- match: Mastodon\.production\.web\.(.+)\.(.+)\.(.+)\.db_time
match_type: regex
name: "mastodon_controller_db_time"
labels:
controller: $1
action: $2
format: $3
mastodon: "web"
- match: Mastodon\.production\.web\.(.+)\.(.+)\.(.+)\.view_time
match_type: regex
name: "mastodon_controller_view_time"
labels:
controller: $1
action: $2
format: $3
mastodon: "web"
- match: Mastodon\.production\.web\.(.+)\.(.+)\.(.+)\.total_duration
match_type: regex
name: "mastodon_controller_duration"
labels:
controller: $1
action: $2
format: $3
mastodon: "web"
## Database collector
- match: Mastodon\.production\.db\.tables\.(.+)\.queries\.(.+)\.duration
match_type: regex
name: "mastodon_db_operation"
labels:
table: "$1"
operation: "$2"
mastodon: "db"
## Cache collector
- match: Mastodon\.production\.cache\.(.+)\.duration
match_type: regex
name: "mastodon_cache_duration"
labels:
operation: "$1"
mastodon: "cache"
## Sidekiq collector
- match: Mastodon\.production\.sidekiq\.(.+)\.processing_time
match_type: regex
name: "mastodon_sidekiq_worker_processing_time"
labels:
worker: "$1"
mastodon: "sidekiq"
- match: Mastodon\.production\.sidekiq\.(.+)\.success
match_type: regex
name: "mastodon_sidekiq_worker_success_total"
labels:
worker: "$1"
mastodon: "sidekiq"
- match: Mastodon\.production\.sidekiq\.(.+)\.failure
match_type: regex
name: "mastodon_sidekiq_worker_failure_total"
labels:
worker: "$1"
mastodon: "sidekiq"
- match: Mastodon\.production\.sidekiq\.queues\.(.+)\.enqueued
match_type: regex
name: "mastodon_sidekiq_queue_enqueued"
labels:
queue: "$1"
mastodon: "sidekiq"
- match: Mastodon\.production\.sidekiq\.queues\.(.+)\.latency
match_type: regex
name: "mastodon_sidekiq_queue_latency"
labels:
queue: "$1"
mastodon: "sidekiq"
- match: Mastodon\.production\.sidekiq\.(.+)
match_type: regex
name: "mastodon_sidekiq_$1"
labels:
mastodon: "sidekiq"

View file

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: mastodon-tootctl
namespace: fediverse
spec:
containers:
- name: mastodon-tootctl
image: tootsuite/mastodon:latest@sha256:51405bec6529860c88640bbca3d30bf3a9a82456a3fdae122dc52f4f941808ff
command: ["/bin/bash", "-c", "--"]
args: ["while true; do sleep 30; done;"]
envFrom:
- secretRef:
name: mastodon-secret
env:
- name: DB_NAME
value: "mastodon"
- name: DB_PORT
value: "5432"
resources:
limits:
cpu: "1"
memory: "2Gi"
requests:
cpu: "50m"
memory: "256Mi"

View file

@ -0,0 +1,166 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-external-secrets-stores
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-web
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-web"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-streaming
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-streaming"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-statsd
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/statsd"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-addons
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/add-ons"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-sidekiq-high-priority
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-sidekiq/local/high-priority"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: false
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-sidekiq-low-volume
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-sidekiq/local/low-volume"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: false
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-sidekiq-ingress
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-sidekiq/remote/ingress"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: false
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-mastodon-sidekiq-pull
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/fediverse/mastodon/app/mastodon-sidekiq/remote/pull"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: false
dependsOn:
- name: cluster-apps-external-secrets-stores
- name: cluster-apps-mastodon

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: fediverse
labels:
kustomize.toolkit.fluxcd.io/prune: disabled
goldilocks.fairwinds.com/enabled: "true"

View file

@ -0,0 +1,32 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-flux-webhooks
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: ./kubernetes/valinor/apps/flux-system/add-ons/webhooks
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-flux-monitoring
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: ./kubernetes/valinor/apps/flux-system/add-ons/monitoring
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true

View file

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: flux-system
resources:
- ./podmonitor.yaml
- ./prometheusrule.yaml

View file

@ -0,0 +1,31 @@
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: flux-system
namespace: flux-system
labels:
app.kubernetes.io/part-of: flux
app.kubernetes.io/component: monitoring
spec:
namespaceSelector:
matchNames:
- flux-system
selector:
matchExpressions:
- key: app
operator: In
values:
- helm-controller
- source-controller
- kustomize-controller
- notification-controller
- image-automation-controller
- image-reflector-controller
podMetricsEndpoints:
- port: http-prom
relabelings:
# https://github.com/prometheus-operator/prometheus-operator/issues/4816
- sourceLabels: [__meta_kubernetes_pod_phase]
action: keep
regex: Running

View file

@ -0,0 +1,31 @@
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: flux-rules
namespace: flux-system
spec:
groups:
- name: flux.rules
rules:
- alert: FluxComponentAbsent
annotations:
summary: Flux component has disappeared from Prometheus target discovery.
expr: |
absent(up{job=~".*flux-system.*"} == 1)
for: 15m
labels:
severity: critical
- alert: FluxReconciliationFailure
annotations:
summary: >-
{{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation
has been failing for more than 15 minutes.
expr: |
max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind)
+
on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"})
by (namespace, name, kind)) * 2 == 1
for: 15m
labels:
severity: critical

View file

@ -0,0 +1,18 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: github-webhook-token
namespace: flux-system
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: github-webhook-token
creationPolicy: Owner
data:
- secretKey: token
remoteRef:
key: flux
property: github_webhook_token

View file

@ -0,0 +1,24 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: webhook-receiver
namespace: flux-system
annotations:
external-dns.alpha.kubernetes.io/target: ingress.valinor.social
spec:
ingressClassName: "nginx"
rules:
- host: &host "flux-receiver-valinor.valinor.social"
http:
paths:
- path: /hook/
pathType: Prefix
backend:
service:
name: webhook-receiver
port:
number: 80
tls:
- hosts:
- *host

View file

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
- ./ingress.yaml
- ./receiver.yaml

View file

@ -0,0 +1,28 @@
---
apiVersion: notification.toolkit.fluxcd.io/v1beta2
kind: Receiver
metadata:
name: github-receiver
namespace: flux-system
spec:
type: github
events:
- "ping"
- "push"
secretRef:
name: github-webhook-token
resources:
- apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
name: "valinor"
namespace: "flux-system"
- apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
name: "cluster"
namespace: "flux-system"
- apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
name: "cluster-apps"
namespace: "flux-system"

View file

@ -0,0 +1,5 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./github

View file

@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./add-ons/ks.yaml

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: flux-system
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,20 @@
---
apiVersion: cilium.io/v2alpha1
kind: CiliumL2AnnouncementPolicy
metadata:
name: policy
spec:
loadBalancerIPs: true
interfaces:
- ^enp.*
nodeSelector:
matchLabels:
kubernetes.io/os: linux
---
apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: pool
spec:
cidrs:
- cidr: 10.2.42.0/24

View file

@ -0,0 +1,59 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cilium
namespace: kube-system
spec:
chart:
spec:
chart: cilium
interval: 30m
sourceRef:
kind: HelmRepository
name: cilium
namespace: flux-system
version: 1.14.0
interval: 30m
valuesFrom:
- kind: ConfigMap
name: cilium-values
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
hubble:
ui:
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
className: "nginx"
hosts:
- &host hubble-valinor.valinor.social
tls:
- hosts:
- *host
metrics:
serviceMonitor:
enabled: true
relay:
prometheus:
serviceMonitor:
enabled: true
prometheus:
serviceMonitor:
enabled: true
operator:
prometheus:
serviceMonitor:
enabled: true

View file

@ -0,0 +1,7 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ./helmrelease.yaml
- ./cilium-l2.yaml

View file

@ -0,0 +1,14 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cilium
namespace: flux-system
spec:
interval: 10m
path: "./kubernetes/apps/kube-system/cilium/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./metrics-server/ks.yaml
- ./cilium/ks.yaml

View file

@ -0,0 +1,20 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metrics-server
namespace: kube-system
spec:
interval: 30m
chart:
spec:
chart: metrics-server
version: 3.11.0
sourceRef:
kind: HelmRepository
name: kubernetes-sigs-metrics-server
namespace: flux-system
interval: 30m
values:
metrics:
enabled: true

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,16 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-metrics-server
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/valinor/apps/kube-system/metrics-server/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,9 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# Pre Flux-Kustomizations
- ./namespace.yaml
# Flux-Kustomizations
- ./kyverno/ks.yaml

View file

@ -0,0 +1,79 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kyverno
namespace: kyverno
spec:
interval: 30m
chart:
spec:
chart: kyverno
version: 3.0.5
sourceRef:
kind: HelmRepository
name: kyverno
namespace: flux-system
maxHistory: 2
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
crds:
install: true
grafana:
enabled: true
annotations:
grafana_folder: System
backgroundController:
serviceMonitor:
enabled: true
rbac:
clusterRole:
extraResources:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- update
- patch
- delete
- get
- list
cleanupController:
serviceMonitor:
enabled: true
reportsController:
serviceMonitor:
enabled: true
admissionController:
replicas: 3
serviceMonitor:
enabled: true
rbac:
clusterRole:
extraResources:
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- update
- delete
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/instance: kyverno
app.kubernetes.io/component: kyverno

View file

@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kyverno
resources:
- ./helmrelease.yaml

View file

@ -0,0 +1,34 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-kyverno
namespace: flux-system
spec:
path: ./kubernetes/apps/kyverno/kyverno/app
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
interval: 30m
retryInterval: 1m
timeout: 5m
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cluster-policies
namespace: flux-system
spec:
dependsOn:
- name: cluster-apps-kyverno
path: ./kubernetes/apps/kyverno/kyverno/policies
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: false
interval: 30m
retryInterval: 1m
timeout: 5m

View file

@ -0,0 +1,6 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./remove-cpu-limits.yaml

View file

@ -0,0 +1,43 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: remove-cpu-limit
annotations:
policies.kyverno.io/title: Remove CPU limits
policies.kyverno.io/category: Best Practices
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
This policy removes CPU limits from all Pods.
pod-policies.kyverno.io/autogen-controllers: none
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: remove-containers-cpu-limits
match:
any:
- resources:
kinds: ["Pod"]
mutate:
foreach:
- list: "request.object.spec.containers"
patchesJson6902: |-
- path: /spec/containers/{{elementIndex}}/resources/limits/cpu
op: remove
- name: delete-initcontainers-cpu-limits
match:
any:
- resources:
kinds: ["Pod"]
preconditions:
all:
- key: "{{ request.object.spec.initContainers[] || `[]` | length(@) }}"
operator: GreaterThanOrEquals
value: 1
mutate:
foreach:
- list: "request.object.spec.initContainers"
patchesJson6902: |-
- path: /spec/initContainers/{{elementIndex}}/resources/limits/cpu
op: remove

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kyverno
labels:
kustomize.toolkit.fluxcd.io/prune: disabled

View file

@ -0,0 +1,58 @@
receivers:
- name: "null"
- name: "pushover"
pushover_configs:
- html: true
token_file: /etc/secrets/pushover_api_token
user_key_file: /etc/secrets/pushover_api_userkey
send_resolved: true
priority: |-
{{ if eq .Status "firing" }}1{{ else }}0{{ end }}
url_title: View in Alert Manager
title: |-
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }}
message: |-
{{- range .Alerts }}
{{- if ne .Labels.severity "" }}
<b>Severity:</b> <i>{{ .Labels.severity }}</i>
{{- else }}
<b>Severity:</b> <i>N/A</i>
{{- end }}
{{- if ne .Annotations.description "" }}
<b>Description:</b> <i>{{ .Annotations.description }}</i>
{{- else if ne .Annotations.summary "" }}
<b>Summary:</b> <i>{{ .Annotations.summary }}</i>
{{- else if ne .Annotations.message "" }}
<b>Message:</b> <i>{{ .Annotations.message }}</i>
{{- else }}
<b>Description:</b> <i>N/A</i>
{{- end }}
{{- if gt (len .Labels.SortedPairs) 0 }}
<b>Details:</b>
{{- range .Labels.SortedPairs }}
• <b>{{ .Name }}:</b> <i>{{ .Value }}</i>
{{- end }}
{{- end }}
{{- end }}
route:
group_by: ["alertname", "job"]
group_wait: 30s
group_interval: 5m
repeat_interval: 6h
receiver: "pushover"
routes:
- receiver: "null"
matchers:
- alertname =~ "InfoInhibitor|Watchdog"
- receiver: "pushover"
matchers:
- severity = critical
continue: true
inhibit_rules:
- source_matchers:
- severity = "critical"
target_matchers:
- severity = "warning"
equal: ["alertname", "namespace"]

View file

@ -0,0 +1,22 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: alertmanager-secret
namespace: monitoring
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: alertmanager-secret
creationPolicy: Owner
data:
- secretKey: pushover_api_token
remoteRef:
key: Pushover
property: alertmanager_token
- secretKey: pushover_api_userkey
remoteRef:
key: Pushover
property: userkey_jahanson

View file

@ -0,0 +1,75 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: alertmanager
namespace: monitoring
spec:
interval: 30m
chart:
spec:
chart: app-template
version: 1.5.1
interval: 30m
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
values:
controller:
type: statefulset
annotations:
reloader.stakater.com/auto: "true"
image:
repository: quay.io/prometheus/alertmanager
tag: main@sha256:9ec2c0c85673a6fefb650bf77d2204984f0d77a25c156f353edd650c32221dbf
podAnnotations:
reloader.stakater.com/auto: "true"
service:
main:
ports:
http:
port: 9093
ingress:
main:
enabled: true
ingressClassName: nginx
hosts:
- host: &host alertmanager.valinor.social
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
persistence:
config:
enabled: true
type: configMap
name: alertmanager-configmap
mountPath: /etc/alertmanager
readOnly: true
secrets:
enabled: true
type: secret
name: alertmanager-secret
mountPath: /etc/secrets
readOnly: true
resources:
requests:
cpu: 11m
memory: 50M
limits:
memory: 99M
volumeClaimTemplates:
- name: storage
mountPath: /alertmanager
accessMode: ReadWriteOnce
size: 50Mi

View file

@ -0,0 +1,15 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
configMapGenerator:
- name: alertmanager-configmap
files:
- config/alertmanager.yml
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View file

@ -0,0 +1,16 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-alertmanager
namespace: flux-system
spec:
interval: 10m
path: "./kubernetes/valinor/apps/monitoring/alertmanager/app"
prune: true
sourceRef:
kind: GitRepository
name: valinor
wait: true
dependsOn:
- name: cluster-apps-external-secrets-stores

View file

@ -0,0 +1,28 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: grafana-secrets
namespace: monitoring
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: grafana-secrets
creationPolicy: Owner
template:
engineVersion: v2
data:
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "{{ .auth0_generic_client_id }}"
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "{{ .auth0_generic_client_secret }}"
GF_AUTH_GENERIC_OAUTH_AUTH_URL: "{{ .auth0_hsn_domain }}/authorize"
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: "{{ .auth0_hsn_domain }}/oauth/token"
GF_AUTH_GENERIC_OAUTH_API_URL: "{{ .auth0_hsn_domain }}/userinfo"
dataFrom:
- extract:
key: auth0
rewrite:
- regexp:
source: "(.*)"
target: "auth0_$1"

View file

@ -0,0 +1,239 @@
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: grafana
namespace: monitoring
spec:
chart:
spec:
chart: grafana
interval: 30m
sourceRef:
kind: HelmRepository
name: grafana
namespace: flux-system
version: 6.58.7
interval: 30m
timeout: 20m
maxHistory: 2
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
replicas: 1
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: default
orgId: 1
folder: ""
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
datasources:
datasources.yaml:
apiVersion: 1
deleteDatasources:
- name: Loki
orgId: 1
- name: Alertmanager
orgId: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://thanos-query-frontend.monitoring.svc.cluster.local:9090
isDefault: true
- name: Loki
type: loki
access: proxy
url: http://loki-gateway.monitoring.svc.cluster.local
jsonData:
maxLines: 250
- name: Alertmanager
type: alertmanager
access: proxy
url: http://kube-prometheus-stack-alertmanager.monitoring.svc.cluster.local:9093
jsonData:
implementation: prometheus
dashboards:
default:
# Ref: https://grafana.com/grafana/dashboards/1860-node-exporter-full/
node-exporter-full:
gnetId: 1860
revision: 30
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/5342-ceph-pools/
ceph-pools:
gnetId: 5342
revision: 9
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/5336-ceph-osd-single/
ceph-osd:
gnetId: 5336
revision: 9
datasource: Prometheus
# Ref: https://grafana.com/grafana/dashboards/2842-ceph-cluster/
ceph-cluster:
gnetId: 2842
revision: 16
datasource: Prometheus
cert-manager:
url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json
datasource: Prometheus
external-secrets:
url: https://raw.githubusercontent.com/external-secrets/external-secrets/main/docs/snippets/dashboard.json
datasource: Prometheus
flux-cluster:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/cluster.json
datasource: Prometheus
flux-control-plane:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/control-plane.json
datasource: Prometheus
flux-logs:
url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/logs.json
datasource: Prometheus
kubernetes-api-server:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json
datasource: Prometheus
kubernetes-coredns:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json
datasource: Prometheus
kubernetes-global:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json
datasource: Prometheus
kubernetes-namespaces:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
datasource: Prometheus
kubernetes-nodes:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json
datasource: Prometheus
kubernetes-pods:
url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
datasource: Prometheus
ingress-nginx:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json
datasource: Prometheus
ingress-nginx-request-handling-performance:
url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json
datasource: Prometheus
deploymentStrategy:
type: Recreate
env:
GF_ANALYTICS_CHECK_FOR_UPDATES: false
GF_DATE_FORMATS_USE_BROWSER_LOCALE: true
GF_EXPLORE_ENABLED: true
GF_GRAFANA_NET_URL: https://grafana.net
GF_LOG_FILTERS: rendering:debug
GF_PANELS_DISABLE_SANITIZE_HTML: true
GF_SECURITY_ALLOW_EMBEDDING: true
GF_SECURITY_COOKIE_SAMESITE: grafana
GF_SERVER_ROOT_URL: "https://grafana.valinor.social"
envFromSecret: grafana-secrets
grafana.ini:
analytics:
check_for_updates: false
auth:
oauth_auto_login: true
auth.basic:
enabled: false
auth.generic_oauth:
enabled: true
name: Auth0
scopes: "openid profile email"
client_id: # Set by env vars
client_secret: # Set by env vars
auth_url: # Set by env vars
token_url: # Set by env vars
api_url: # Set by env vars
use_pkce: true
auth.generic_oauth.group_mapping:
role_attribute_path: |
contains("https://hsndev/groups"[*], 'grafana-admin') && 'Admin' || contains("https://hsndev/groups"[*], 'grafana-viewer') && 'Viewer'
org_id: 1
grafana_net:
url: https://grafana.net
log:
mode: console
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
server:
root_url: https://grafana.valinor.social
imageRenderer:
enabled: true
ingress:
enabled: true
ingressClassName: nginx
annotations:
external-dns.alpha.kubernetes.io/target: ingress.valinor.social
hosts:
- &host grafana.valinor.social
tls:
- hosts:
- *host
persistence:
enabled: false
plugins:
- natel-discrete-panel
- pr0ps-trackmap-panel
- grafana-piechart-panel
- vonage-status-panel
- grafana-worldmap-panel
- grafana-clock-panel
podAnnotations:
configmap.reloader.stakater.com/reload: grafana
secret.reloader.stakater.com/reload: grafana-secrets
rbac:
pspEnabled: false
resources:
requests:
cpu: 23m
memory: 127M
serviceMonitor:
enabled: true
sidecar:
dashboards:
enabled: true
labelValue: ""
label: grafana_dashboard
folderAnnotation: grafana_folder
searchNamespace: ALL
provider:
disableDelete: true
foldersFromFilesStructure: true
datasources:
enabled: true
labelValue: ""
searchNamespace: ALL
logLevel: INFO

Some files were not shown because too many files have changed in this diff Show more