theshire/kubernetes/tools/wipe-rook-slow.yaml
2024-01-11 15:03:54 -06:00

105 lines
2.4 KiB
YAML

---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-slow-nessa
spec:
restartPolicy: Never
nodeName: nessa
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var
---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-slow-nienna
spec:
restartPolicy: Never
nodeName: nienna
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var
---
apiVersion: v1
kind: Pod
metadata:
name: disk-wipe-slow-orome
spec:
restartPolicy: Never
nodeName: orome
containers:
- name: disk-wipe
image: ghcr.io/onedr0p/alpine:3.19.0@sha256:51632d23e56ae28a34f8e90df6fe8d02730b5462697ae52e8b01ad6484497819
securityContext:
privileged: true
resources: {}
env:
- name: CEPH_DISK
value: "/dev/sda"
command:
[
"/bin/sh",
"-c"
]
args:
- apk add --no-cache sgdisk util-linux parted;
sgdisk --zap-all $CEPH_DISK;
dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK;
partprobe $CEPH_DISK;
volumeMounts:
- mountPath: /mnt/host_var
name: host-var
volumes:
- name: host-var
hostPath:
path: /var