theshire/kubernetes/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml

199 lines
5.5 KiB
YAML

---
# yaml-language-server: $schema=https://ks.hsn.dev/helm.toolkit.fluxcd.io/helmrelease_v2beta2.json
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: rook-ceph-cluster
spec:
interval: 30m
timeout: 15m
chart:
spec:
chart: rook-ceph-cluster
version: v1.14.2
sourceRef:
kind: HelmRepository
name: rook-ceph
namespace: flux-system
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
dependsOn:
- name: rook-ceph-operator
namespace: rook-ceph
- name: snapshot-controller
namespace: volsync-system
values:
monitoring:
enabled: true
createPrometheusRules: true
ingress:
dashboard:
ingressClassName: internal-nginx
host:
name: &host rook.jahanson.tech
path: /
tls:
- hosts:
- *host
toolbox:
enabled: true
configOverride: |
[global]
bdev_enable_discard = true
bdev_async_discard = true
osd_class_update_on_start = false
cephClusterSpec:
network:
provider: host
connections:
requireMsgr2: true
crashCollector:
disable: false
dashboard:
enabled: true
urlPrefix: /
ssl: false
storage:
useAllNodes: true
useAllDevices: false
deviceFilter: "xvdb|nvme1n1|nvme0n1"
placement:
mgr: &placement
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations: # allow mgr to run on control plane nodes
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
mon: *placement
resources:
mgr:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2Gi
mon:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 4000m
memory: 4Gi
osd:
requests:
cpu: 500m
memory: 4Gi
limits:
cpu: 4000m
memory: 8Gi
cephBlockPools:
- name: ceph-blockpool
spec:
failureDomain: host
replicated:
size: 3
storageClass:
enabled: true
name: ceph-block
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephBlockPoolsVolumeSnapshotClass:
enabled: true
name: csi-ceph-blockpool
isDefault: false
deletionPolicy: Delete
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: 1000m
memory: 4Gi
limits:
memory: 4Gi
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephFileSystemVolumeSnapshotClass:
enabled: true
name: csi-ceph-filesystem
isDefault: false
deletionPolicy: Delete
cephObjectStores:
- name: ceph-objectstore
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
preservePoolsOnDelete: true
gateway:
port: 80
resources:
requests:
cpu: 1000m
memory: 1Gi
limits:
memory: 2Gi
instances: 2
healthCheck:
bucket:
interval: 60s
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
parameters:
region: us-east-1