Adding hdds to rook-ceph

This commit is contained in:
Joseph Hanson 2023-12-28 14:01:08 -06:00
parent 27de5b2df2
commit 02a8e2b4a5
2 changed files with 109 additions and 3 deletions

View file

@ -43,6 +43,7 @@ spec:
count: 5
network:
provider: host
# placement:
# mon:
# nodeAffinity:
@ -53,9 +54,6 @@ spec:
# operator: In
# values:
# - control-plane
crashCollector:
disable: false
dashboard:
@ -72,12 +70,15 @@ spec:
- name: "nienna"
devices:
- name: /dev/disk/by-id/nvme-SAMSUNG_MZVLB1T0HALR-00000_S3W6NA0M610693
- name: /dev/disk/by-id/ata-ST16000NM001J-2TW113_ZR5E7NQR
- name: "orome"
devices:
- name: /dev/disk/by-id/nvme-SAMSUNG_MZVLB1T0HBLR-00000_S4GJNX0R613503
- name: /dev/disk/by-id/ata-ST16000NM001J-2TW113_ZR6021Z3
- name: "nessa"
devices:
- name: /dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NU0W641201
- name: /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2
resources:
mgr:
limits:

View file

@ -0,0 +1,105 @@
---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-nessa
spec:
restartPolicy: Never
nodeName: nessa
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var
---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-nienna
spec:
restartPolicy: Never
nodeName: nienna
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var
---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-orome
spec:
restartPolicy: Never
nodeName: orome
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var