This repository has been archived on 2024-02-11. You can view files and clone it, but cannot push or open issues or pull requests.
valinor/kubernetes/apps/rook-ceph/rook-ceph/cluster/helmrelease.yaml

179 lines
5 KiB
YAML

---
# yaml-language-server: $schema=https://raw.githubusercontent.com/fluxcd-community/flux2-schemas/main/helmrelease-helm-v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: rook-ceph-cluster
namespace: rook-ceph
spec:
interval: 30m
chart:
spec:
chart: rook-ceph-cluster
version: v1.12.4
sourceRef:
kind: HelmRepository
name: rook-ceph
namespace: flux-system
maxHistory: 2
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
toolbox:
enabled: true
monitoring:
enabled: true
createPrometheusRules: true
configOverride: |
[global]
bdev_enable_discard = true
bdev_async_discard = true
cephClusterSpec:
network:
provider: host
crashCollector:
disable: false
dashboard:
enabled: true
urlPrefix: /
storage:
useAllNodes: false
useAllDevices: false
config:
osdsPerDevice: "1"
nodes:
- name: "aule"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37460833
- name: "eonwe"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37460887
- name: "arlen"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37460897
- name: "orome"
devices:
- name: /dev/disk/by-id/scsi-0HC_Volume_37645333
resources:
mgr:
limits:
cpu: "1000m"
memory: "1Gi"
requests:
cpu: "1000m"
memory: "1Gi"
ingress:
ingressClassName: "nginx"
dashboard:
annotations:
nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
host:
name: &host rook.valinor.social
path: "/"
tls:
- hosts:
- *host
cephBlockPoolsVolumeSnapshotClass:
enabled: false
cephBlockPools:
- name: ceph-blockpool
spec:
failureDomain: host
replicated:
size: 3
storageClass:
enabled: true
name: ceph-block
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
metadataServer:
activeCount: 1
activeStandby: true
resources:
requests:
cpu: "35m"
memory: "64M"
limits:
memory: "144M"
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: []
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
csi.storage.k8s.io/fstype: ext4
cephObjectStores:
- name: ceph-objectstore
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
preservePoolsOnDelete: true
gateway:
port: 80
resources:
requests:
cpu: 100m
memory: 128M
limits:
memory: 2Gi
instances: 1
healthCheck:
bucket:
interval: 60s
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
parameters:
region: us-east-1