--- apiVersion: v1 kind: Pod metadata: name: disk-wipe-nessa namespace: rook-ceph spec: restartPolicy: Never nodeName: talos-nz9-4fz containers: - name: disk-wipe image: ghcr.io/onedr0p/alpine:3.19.1@sha256:b3d3b9e47e3b5736d045a5e9f2f1d1bbf5c1c38b8c0c296d4609079cfaf9a4bd securityContext: privileged: true resources: {} env: - name: CEPH_DISK value: "/dev/xvdb" command: [ "/bin/sh", "-c" ] args: - apk add --no-cache sgdisk util-linux parted; sgdisk --zap-all $CEPH_DISK; blkdiscard $CEPH_DISK; dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; partprobe $CEPH_DISK; volumeMounts: - mountPath: /mnt/host_var name: host-var volumes: - name: host-var hostPath: path: /var --- apiVersion: v1 kind: Pod metadata: name: disk-wipe-nienna namespace: rook-ceph spec: restartPolicy: Never nodeName: talos-9c9-dj0 containers: - name: disk-wipe image: ghcr.io/onedr0p/alpine:3.19.1@sha256:b3d3b9e47e3b5736d045a5e9f2f1d1bbf5c1c38b8c0c296d4609079cfaf9a4bd securityContext: privileged: true resources: {} env: - name: CEPH_DISK value: "/dev/xvdb" command: [ "/bin/sh", "-c" ] args: - apk add --no-cache sgdisk util-linux parted; sgdisk --zap-all $CEPH_DISK; blkdiscard $CEPH_DISK; dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; partprobe $CEPH_DISK; volumeMounts: - mountPath: /mnt/host_var name: host-var volumes: - name: host-var hostPath: path: /var --- apiVersion: v1 kind: Pod metadata: name: disk-wipe-orome namespace: rook-ceph spec: restartPolicy: Never nodeName: talos-dz9-5ys containers: - name: disk-wipe image: ghcr.io/onedr0p/alpine:3.19.1@sha256:b3d3b9e47e3b5736d045a5e9f2f1d1bbf5c1c38b8c0c296d4609079cfaf9a4bd securityContext: privileged: true resources: {} env: - name: CEPH_DISK value: "/dev/xvdb" command: [ "/bin/sh", "-c" ] args: - apk add --no-cache sgdisk util-linux parted; sgdisk --zap-all $CEPH_DISK; blkdiscard $CEPH_DISK; dd if=/dev/zero bs=1M count=1000 oflag=direct of=$CEPH_DISK; partprobe $CEPH_DISK; volumeMounts: - mountPath: /mnt/host_var name: host-var volumes: - name: host-var hostPath: path: /var