theshire/.archive/vault/app/helmrelease.yaml

142 lines
4.2 KiB
YAML
Raw Normal View History

2024-09-10 20:00:42 -05:00
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: vault
spec:
interval: 30m
chart:
spec:
chart: vault
version: 0.28.1
sourceRef:
kind: HelmRepository
name: hashicorp
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
strategy: uninstall
2024-09-10 20:00:42 -05:00
values:
server:
image:
2024-09-11 14:30:49 -05:00
repository: public.ecr.aws/hashicorp/vault
tag: "1.17.5"
2024-09-10 20:00:42 -05:00
logLevel: "info"
logFormat: "json"
ingress:
enabled: true
ingressClassName: internal-nginx
hosts:
2024-09-10 20:16:33 -05:00
- host: &host "vault.jahanson.tech"
2024-09-10 21:11:57 -05:00
paths: []
2024-09-10 20:00:42 -05:00
tls:
- hosts:
- *host
service:
type: "ClusterIP"
2024-09-10 21:11:57 -05:00
port: &port 8200
2024-09-10 20:52:52 -05:00
targetPort: *port
2024-09-10 20:00:42 -05:00
# off until it's online for the first time
readinessProbe:
2024-09-11 00:43:51 -05:00
enabled: true
path: "/v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204"
livenessProbe:
enabled: true
path: "/v1/sys/health?standbyok=true"
initialDelaySeconds: 60
2024-09-10 20:00:42 -05:00
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# Port number on which readinessProbe will be checked.
port: *port
extraEnvironmentVars:
2024-09-11 15:58:34 -05:00
# This is required because they will lose their values when the pod is upgraded in my experience.
# Probably a Flux thing.
VAULT_CLUSTER_ADDR: http://$(HOSTNAME).vault-internal:8201
2024-09-11 00:16:07 -05:00
extraSecretEnvironmentVars:
- envName: AWS_SECRET_ACCESS_KEY
secretName: vault-secret
secretKey: AWS_SECRET_ACCESS_KEY
- envName: AWS_ACCESS_KEY_ID
secretName: vault-secret
secretKey: AWS_ACCESS_KEY_ID
- envName: VAULT_AWSKMS_SEAL_KEY_ID
secretName: vault-secret
secretKey: VAULT_AWSKMS_SEAL_KEY_ID
2024-09-10 20:00:42 -05:00
# These are defaults but explicitly set here for clarity.
dataStorage:
2024-09-10 22:52:32 -05:00
size: 4Gi
2024-09-10 20:00:42 -05:00
mountPath: /vault/data
storageClass: ceph-block
auditStorage:
enabled: true
size: 10Gi
mountPath: /vault/audit
storageClass: ceph-block
# We want high availability. If standalone is true it sets the storage backend to file
# and the max replicas can only be 1.
standalone:
enabled: false
ha:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of replicas
# so if you have 6 replicas, maxUnavailable will be 2 unless you set it specifically.
2024-09-11 00:16:07 -05:00
replicas: 3
2024-09-10 20:00:42 -05:00
config: ""
raft:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
# For prometheus!
telemetry {
unauthenticated_metrics_access = "true"
}
}
storage "raft" {
path = "/vault/data"
2024-09-10 22:47:02 -05:00
retry_join {
auto_join = "provider=k8s label_selector=\"app.kubernetes.io/name=vault,component=server\" namespace=\"security\""
auto_join_scheme = "http"
}
2024-09-10 20:00:42 -05:00
}
2024-09-11 00:16:07 -05:00
seal "awskms" {
region = "us-east-2"
2024-09-10 23:15:26 -05:00
}
2024-09-10 20:00:42 -05:00
service_registration "kubernetes" {}
statefulSet:
securityContext:
pod:
runAsUser: 568
runAsGroup: 568
runAsNonRoot: true
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups: [10000]
container:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
2024-09-10 20:00:42 -05:00
capabilities:
drop:
- "ALL"
ui:
enabled: true
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: true
serviceType: "LoadBalancer"
2024-09-10 20:52:52 -05:00
externalPort: *port
2024-09-10 20:00:42 -05:00
targetPort: *port