theshire/.archive/vault/app/helmrelease.yaml
Joseph Hanson d93afbcd92
undeploy :(
too much management, using 1pass for everything secrets related instead.
2024-09-11 22:49:47 -05:00

141 lines
4.2 KiB
YAML

---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: vault
spec:
interval: 30m
chart:
spec:
chart: vault
version: 0.28.1
sourceRef:
kind: HelmRepository
name: hashicorp
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: uninstall
values:
server:
image:
repository: public.ecr.aws/hashicorp/vault
tag: "1.17.5"
logLevel: "info"
logFormat: "json"
ingress:
enabled: true
ingressClassName: internal-nginx
hosts:
- host: &host "vault.jahanson.tech"
paths: []
tls:
- hosts:
- *host
service:
type: "ClusterIP"
port: &port 8200
targetPort: *port
# off until it's online for the first time
readinessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# Port number on which readinessProbe will be checked.
port: *port
extraEnvironmentVars:
# This is required because they will lose their values when the pod is upgraded in my experience.
# Probably a Flux thing.
VAULT_CLUSTER_ADDR: http://$(HOSTNAME).vault-internal:8201
extraSecretEnvironmentVars:
- envName: AWS_SECRET_ACCESS_KEY
secretName: vault-secret
secretKey: AWS_SECRET_ACCESS_KEY
- envName: AWS_ACCESS_KEY_ID
secretName: vault-secret
secretKey: AWS_ACCESS_KEY_ID
- envName: VAULT_AWSKMS_SEAL_KEY_ID
secretName: vault-secret
secretKey: VAULT_AWSKMS_SEAL_KEY_ID
# These are defaults but explicitly set here for clarity.
dataStorage:
size: 4Gi
mountPath: /vault/data
storageClass: ceph-block
auditStorage:
enabled: true
size: 10Gi
mountPath: /vault/audit
storageClass: ceph-block
# We want high availability. If standalone is true it sets the storage backend to file
# and the max replicas can only be 1.
standalone:
enabled: false
ha:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of replicas
# so if you have 6 replicas, maxUnavailable will be 2 unless you set it specifically.
replicas: 3
config: ""
raft:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
# For prometheus!
telemetry {
unauthenticated_metrics_access = "true"
}
}
storage "raft" {
path = "/vault/data"
retry_join {
auto_join = "provider=k8s label_selector=\"app.kubernetes.io/name=vault,component=server\" namespace=\"security\""
auto_join_scheme = "http"
}
}
seal "awskms" {
region = "us-east-2"
}
service_registration "kubernetes" {}
statefulSet:
securityContext:
pod:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups: [10000]
container:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- "ALL"
ui:
enabled: true
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: true
serviceType: "LoadBalancer"
externalPort: *port
targetPort: *port