Merge branch 'main' into renovate/cert-manager-1.x

This commit is contained in:
Joseph Hanson 2023-10-02 13:32:34 +00:00
commit c22347f6cb
29 changed files with 407 additions and 327 deletions

View file

@ -0,0 +1,14 @@
#!/usr/bin/env bash
JOB_NAME=$1
NAMESPACE="${2:-default}"
[[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1
while true; do
STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')"
if [ "${STATUS}" == "Pending" ]; then
break
fi
sleep 1
done

View file

@ -0,0 +1,47 @@
---
version: "3"
tasks:
gr-sync:
desc: Sync all Flux GitRepositories
cmds:
- |
kubectl get gitrepositories --all-namespaces --no-headers | awk '{print $1, $2}' \
| xargs -P 4 -L 1 bash -c \
'kubectl -n $0 annotate gitrepository/$1 reconcile.fluxcd.io/requestedAt=$(date +%s) --field-manager=flux-client-side-apply --overwrite'
ks-sync:
desc: Sync all Flux Kustomizations
cmds:
- |
kubectl get kustomization --all-namespaces --no-headers | awk '{print $1, $2}' \
| xargs -P 4 -L 1 bash -c \
'kubectl -n $0 annotate kustomization/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --field-manager=flux-client-side-apply --overwrite'
hr-sync:
desc: Sync all Flux HelmReleases
cmds:
- |
kubectl get helmreleases --all-namespaces --no-headers | awk '{print $1, $2}' \
| xargs -P 4 -L 1 bash -c \
'kubectl -n $0 annotate helmrelease/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --overwrite'
tf-sync:
desc: Sync Flux Terraforms
cmds:
- |
kubectl get terraforms --all-namespaces --no-headers | awk '{print $1, $2}' \
| xargs -P 4 -L 1 bash -c \
'kubectl -n $0 annotate terraform/$1 reconcile.fluxcd.io/requestedAt="$(date +%s)" --overwrite'
hr-suspend:
desc: Suspend all Flux HelmReleases
cmds:
- |
flux get helmrelease --all-namespaces --no-header | awk '{print $1, $2}' \
| xargs -L 1 bash -c 'flux -n $0 suspend helmrelease $1'
hr-resume:
desc: Resume all Flux HelmReleases
cmds:
- |
flux get helmrelease --all-namespaces --no-header | awk '{print $1, $2}' \
| xargs -L 1 bash -c 'flux -n $0 resume helmrelease $1'

View file

@ -0,0 +1,91 @@
---
version: "3"
x-task-vars: &task-vars
node: "{{.node}}"
ceph_disk: "{{.ceph_disk}}"
ts: "{{.ts}}"
jobName: "{{.jobName}}"
vars:
waitForJobScript: "../_scripts/wait-for-k8s-job.sh"
ts: '{{now | date "150405"}}'
tasks:
wipe-node-aule:
desc: Trigger a wipe of Rook-Ceph data on node "aule"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460833"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: aule
wipe-node-eonwe:
desc: Trigger a wipe of Rook-Ceph data on node "eonwe"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460887"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: eonwe
wipe-node-arlen:
desc: Trigger a wipe of Rook-Ceph data on node "arlen"
cmds:
- task: wipe-disk
vars:
node: "{{.node}}"
ceph_disk: "/dev/disk/by-id/scsi-0HC_Volume_37460897"
- task: wipe-data
vars:
node: "{{.node}}"
vars:
node: arlen
wipe-disk:
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-disk node=aule ceph_disk="/dev/nvme0n1")
silent: true
internal: true
cmds:
- envsubst < <(cat {{.wipeRookDiskJobTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} {{.wipeCephDiskJobName}} default
- kubectl -n default wait job/{{.wipeCephDiskJobName}} --for condition=complete --timeout=1m
- kubectl -n default logs job/{{.wipeCephDiskJobName}} --container list
- kubectl -n default delete job {{.wipeCephDiskJobName}}
vars:
node: '{{ or .node (fail "`node` is required") }}'
ceph_disk: '{{ or .ceph_disk (fail "`ceph_disk` is required") }}'
jobName: 'wipe-disk-{{- .node -}}-{{- .ceph_disk | replace "/" "-" -}}-{{- .ts -}}'
wipeRookDiskJobTemplate: "WipeDiskJob.tmpl.yaml"
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: test -f {{.wipeRookDiskJobTemplate}}
wipe-data:
desc: Wipe all remnants of rook-ceph from a given disk (ex. task rook:wipe-data node=aule)
silent: true
internal: true
cmds:
- envsubst < <(cat {{.wipeRookDataJobTemplate}}) | kubectl apply -f -
- bash {{.waitForJobScript}} {{.wipeRookDataJobName}} default
- kubectl -n default wait job/{{.wipeRookDataJobName}} --for condition=complete --timeout=1m
- kubectl -n default logs job/{{.wipeRookDataJobName}} --container list
- kubectl -n default delete job {{.wipeRookDataJobName}}
vars:
node: '{{ or .node (fail "`node` is required") }}'
jobName: "wipe-rook-data-{{- .node -}}-{{- .ts -}}"
wipeRookDataJobTemplate: "WipeRookDataJob.tmpl.yaml"
env: *task-vars
preconditions:
- sh: test -f {{.waitForJobScript}}
- sh: test -f {{.wipeRookDataJobTemplate}}

View file

@ -0,0 +1,26 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "${jobName}"
namespace: "default"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: Never
nodeName: ${node}
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4
securityContext:
privileged: true
resources: {}
command: ["/bin/sh", "-c"]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all ${ceph_disk};
blkdiscard ${ceph_disk};
dd if=/dev/zero bs=1M count=10000 oflag=direct of=${ceph_disk};
partprobe ${ceph_disk};

View file

@ -0,0 +1,29 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: "${jobName}"
namespace: "default"
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
automountServiceAccountToken: false
restartPolicy: Never
nodeName: ${node}
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.17.3@sha256:999384960b6114496a5e4036e945141c205d064ce23b87326bd3f8d878c5a9d4
securityContext:
privileged: true
resources: {}
command: ["/bin/sh", "-c"]
args:
- rm -rf /mnt/host_var/lib/rook
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var

View file

@ -21,6 +21,12 @@ env:
includes:
volsync: .taskfiles/VolSync/Tasks.yaml
precommit: .taskfiles/PreCommit/Tasks.yaml
rook:
taskfile: ".taskfiles/rook"
dir: .taskfiles/rook
flux:
dir: .taskfiles/flux
taskfile: .taskfiles/flux
tasks:

View file

@ -10,7 +10,7 @@ spec:
chart:
spec:
chart: cert-manager-webhook-dnsimple
version: 0.0.6
version: 0.0.7
interval: 30m
sourceRef:
kind: HelmRepository

View file

@ -1,20 +0,0 @@
---
apiVersion: cilium.io/v2alpha1
kind: CiliumL2AnnouncementPolicy
metadata:
name: policy
spec:
loadBalancerIPs: true
interfaces:
- ^eth1$
nodeSelector:
matchLabels:
kubernetes.io/os: linux
---
apiVersion: cilium.io/v2alpha1
kind: CiliumLoadBalancerIPPool
metadata:
name: pool
spec:
cidrs:
- cidr: 10.2.42.0/24

View file

@ -1,114 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cilium
namespace: kube-system
spec:
interval: 30m
chart:
spec:
chart: cilium
version: 1.14.2
sourceRef:
kind: HelmRepository
name: cilium
namespace: flux-system
maxHistory: 2
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
autoDirectNodeRoutes: true
bpf:
masquerade: true
bgp:
enabled: false
cluster:
name: kubernetes
id: 1
containerRuntime:
integration: containerd
socketPath: /var/run/k3s/containerd/containerd.sock
endpointRoutes:
enabled: true
hubble:
enabled: true
metrics:
enabled:
- dns:query
- drop
- tcp
- flow
- port-distribution
- icmp
- http
serviceMonitor:
enabled: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
relay:
enabled: true
rollOutPods: true
prometheus:
serviceMonitor:
enabled: true
ui:
enabled: true
rollOutPods: true
ingress:
enabled: true
className: nginx
hosts:
- &host hubble.valinor.social
tls:
- hosts:
- *host
ipam:
mode: kubernetes
ipv4NativeRoutingCIDR: 10.32.0.0/16
k8sServiceHost: 10.2.0.6
k8sServicePort: 6443
kubeProxyReplacement: true
kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256
l2announcements:
enabled: true
leaseDuration: 120s
leaseRenewDeadline: 60s
leaseRetryPeriod: 1s
loadBalancer:
algorithm: maglev
mode: dsr
localRedirectPolicy: true
operator:
rollOutPods: true
prometheus:
enabled: true
serviceMonitor:
enabled: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
prometheus:
enabled: true
serviceMonitor:
enabled: true
trustCRDsExist: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
rollOutCiliumPods: true
securityContext:
privileged: true
tunnel: disabled

View file

@ -1,7 +0,0 @@
---
nameReference:
- kind: ConfigMap
version: v1
fieldSpecs:
- path: spec/valuesFrom/name
kind: HelmRelease

View file

@ -1,109 +0,0 @@
autoDirectNodeRoutes: true
bandwidthManager:
enabled: true
bbr: true
bpf:
masquerade: true
bgp:
enabled: false
cluster:
name: valinor
id: 1
containerRuntime:
integration: containerd
endpointRoutes:
enabled: true
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
hubble:
enabled: true
metrics:
enabled:
- dns:query
- drop
- tcp
- flow
- port-distribution
- icmp
- http
serviceMonitor:
enabled: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
relay:
enabled: true
rollOutPods: true
prometheus:
serviceMonitor:
enabled: true
ui:
enabled: true
rollOutPods: true
ingress:
enabled: true
className: internal
hosts:
- &host hubble.hsn.dev
tls:
- hosts:
- *host
ipam:
mode: kubernetes
ipv4NativeRoutingCIDR: 10.32.0.0/16
k8sServiceHost: localhost
k8sServicePort: 7445
kubeProxyReplacement: true
kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256
l2announcements:
enabled: true
leaseDuration: 120s
leaseRenewDeadline: 60s
leaseRetryPeriod: 1s
loadBalancer:
algorithm: maglev
mode: dsr
localRedirectPolicy: true
operator:
rollOutPods: true
prometheus:
enabled: true
serviceMonitor:
enabled: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
prometheus:
enabled: true
serviceMonitor:
enabled: true
trustCRDsExist: true
dashboards:
enabled: true
annotations:
grafana_folder: Cilium
rollOutCiliumPods: true
securityContext:
privileged: true
capabilities:
ciliumAgent:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
cleanCiliumState:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
tunnel: disabled

View file

@ -0,0 +1,22 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: hcloud
namespace: kube-system
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: hcloud
creationPolicy: Owner
data:
- secretKey: token
remoteRef:
key: hetzner
property: cloud-api-token
- secretKey: network
remoteRef:
key: hetzner
property: cloud-network-name

View file

@ -0,0 +1,24 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: hccm
namespace: kube-system
spec:
interval: 30m
chart:
spec:
chart: hcloud-cloud-controller-manager
version: v1.18.0
sourceRef:
kind: HelmRepository
name: hetzner
namespace: flux-system
interval: 30m
values:
metrics:
enabled: true
networking:
enabled: true
clusterCIDR: 10.244.0.0/16

View file

@ -1,15 +1,8 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ./externalsecret.yaml
- ./helmrelease.yaml
- ./cilium-l2.yaml
configMapGenerator:
- name: cilium-values
files:
- values.yaml=./values.yaml
configurations:
- kustomizeconfig.yaml

View file

@ -2,11 +2,13 @@
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: cluster-apps-cilium
name: cluster-apps-hetzner-hccm
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
interval: 10m
path: "./kubernetes/apps/kube-system/cilium/app"
path: "./kubernetes/apps/kube-system/hccm/app"
prune: true
sourceRef:
kind: GitRepository

View file

@ -1,3 +1,4 @@
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
@ -6,4 +7,4 @@ resources:
- ./namespace.yaml
# Flux-Kustomizations
- ./metrics-server/ks.yaml
- ./cilium/ks.yaml
- ./hccm/ks.yaml

View file

@ -1,3 +1,4 @@
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -184,7 +184,7 @@ spec:
root_url: https://grafana.valinor.social
imageRenderer:
enabled: true
enabled: false
ingress:
enabled: true

View file

@ -203,7 +203,7 @@ spec:
thanos:
image: quay.io/thanos/thanos:v0.32.3
objectStorageConfig:
name: thanos-objstore-secret
name: thanos-s3-secret
key: objstore.yml
retention: 2d
retentionSize: 15GB
@ -222,8 +222,3 @@ spec:
resources:
requests:
storage: 20Gi
valuesFrom:
- targetPath: objstoreConfig.config.bucket
kind: ConfigMap
name: thanos-bucket-v1
valuesKey: BUCKET_NAME

View file

@ -0,0 +1,31 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: thanos
namespace: monitoring
spec:
secretStoreRef:
kind: ClusterSecretStore
name: onepassword-connect
target:
name: thanos-s3-secret
creationPolicy: Owner
template:
engineVersion: v2
data:
objstore.yml: |-
type: s3
config:
access_key: {{ .minio_thanos_access_key }}
bucket: thanos
endpoint: {{ .minio_s3_host }}
region: us-east-1
secret_key: {{ .minio_thanos_secret_key }}
dataFrom:
- extract:
key: minio
rewrite:
- regexp:
source: "(.*)"
target: "minio_$1"

View file

@ -35,10 +35,7 @@ spec:
registry: quay.io
repository: thanos/thanos
tag: v0.32.3
objstoreConfig:
type: s3
config:
insecure: true
existingObjstoreSecret: thanos-s3-secret
queryFrontend:
enabled: true
replicaCount: 3
@ -71,14 +68,14 @@ spec:
persistence:
enabled: true
storageClass: ceph-block
size: 100Gi
size: 20Gi
storegateway:
enabled: true
replicaCount: 3
persistence:
enabled: true
storageClass: ceph-block
size: 20Gi
size: 10Gi
ruler:
enabled: true
replicaCount: 3
@ -99,29 +96,8 @@ spec:
persistence:
enabled: true
storageClass: ceph-block
size: 20Gi
size: 5Gi
metrics:
enabled: true
serviceMonitor:
enabled: true
valuesFrom:
- targetPath: objstoreConfig.config.bucket
kind: ConfigMap
name: thanos-bucket-v1
valuesKey: BUCKET_NAME
- targetPath: objstoreConfig.config.endpoint
kind: ConfigMap
name: thanos-bucket-v1
valuesKey: BUCKET_HOST
- targetPath: objstoreConfig.config.region
kind: ConfigMap
name: thanos-bucket-v1
valuesKey: BUCKET_REGION
- targetPath: objstoreConfig.config.access_key
kind: Secret
name: thanos-bucket-v1
valuesKey: AWS_ACCESS_KEY_ID
- targetPath: objstoreConfig.config.secret_key
kind: Secret
name: thanos-bucket-v1
valuesKey: AWS_SECRET_ACCESS_KEY

View file

@ -4,7 +4,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: monitoring
resources:
- ./objectbucketclaim.yaml
- ./externalsecret.yaml
- ./helmrelease.yaml
configMapGenerator:
- name: thanos-bucket-replicate-dashboard

View file

@ -1,9 +0,0 @@
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: thanos-bucket-v1
namespace: monitoring
spec:
bucketName: thanos-v1
storageClassName: ceph-bucket

View file

@ -18,11 +18,11 @@ spec:
controller:
replicaCount: 3
hostPort:
enabled: true
ports:
http: 81
https: 444
# hostPort:
# enabled: true
# ports:
# http: 81
# https: 444
updateStrategy:
type: Recreate
@ -31,8 +31,12 @@ spec:
enabled: true
type: LoadBalancer
annotations:
external-dns.alpha.kubernetes.io/hostname: "ingress.valinor.social"
io.cilium/lb-ipam-ips: "10.2.42.1"
load-balancer.hetzner.cloud/location: fsn1
load-balancer.hetzner.cloud/protocol: tcp
load-balancer.hetzner.cloud/name: valinor-nginx
load-balancer.hetzner.cloud/use-private-ip: true
load-balancer.hetzner.cloud/uses-proxyprotocol: true
externalTrafficPolicy: Local
publishService:

View file

@ -53,15 +53,15 @@ spec:
config:
osdsPerDevice: "1"
nodes:
- name: "valinor-1"
- name: "aule"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37231496
- name: "valinor-2"
- name: /dev/disk/by-id/scsi-0HC_Volume_37460833
- name: "eonwe"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37231521
- name: "valinor-3"
- name: /dev/disk/by-id/scsi-0HC_Volume_37460887
- name: "arlen"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37231596
- name: /dev/disk/by-id/scsi-0HC_Volume_37460897
ingress:
ingressClassName: "nginx"
@ -101,3 +101,68 @@ spec:
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: "35m"
memory: "64M"
limits:
memory: "144M"
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephObjectStores:
- name: ceph-objectstore
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
preservePoolsOnDelete: true
gateway:
port: 80
resources:
requests:
cpu: 100m
memory: 128M
limits:
memory: 2Gi
instances: 1
healthCheck:
bucket:
interval: 60s
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
parameters:
region: us-east-1

View file

@ -23,7 +23,7 @@ spec:
image:
repository: docker.io/1password/connect-api
tag: 1.7.2@sha256:6aa94cf713f99c0fa58c12ffdd1b160404b4c13a7f501a73a791aa84b608c5a1
tag: 1.7.2
env:
OP_BUS_PORT: "11220"

View file

@ -0,0 +1,10 @@
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: hetzner
namespace: flux-system
spec:
interval: 30m
url: https://charts.hetzner.cloud
timeout: 3m

View file

@ -1,3 +1,4 @@
# yaml-language-server: $schema=https://json.schemastore.org/kustomization.json
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
@ -12,6 +13,7 @@ resources:
- external-secrets.yaml
- fairwinds.yaml
- grafana.yaml
- hetzner.yaml
- ingress-nginx.yaml
- jahanson.yaml
- jetstack.yaml

View file

@ -2,11 +2,11 @@
# shellcheck disable=2312
pushd integrations >/dev/null 2>&1 || exit 1
rm -rf cni/charts
envsubst < ../../kubernetes/apps/kube-system/cilium/app/values.yaml > cni/values.yaml
kustomize build --enable-helm cni | kubectl apply -f -
rm cni/values.yaml
rm -rf cni/charts
#rm -rf cni/charts
#envsubst < ../../kubernetes/apps/kube-system/cilium/app/values.yaml > cni/values.yaml
#kustomize build --enable-helm cni | kubectl apply -f -
#rm cni/values.yaml
#rm -rf cni/charts
rm -rf kubelet-csr-approver/charts
envsubst < ../../kubernetes/apps/system/kubelet-csr-approver/app/values.yaml > kubelet-csr-approver/values.yaml