theshire/kubernetes/bootstrap/talos/talconfig.yaml

191 lines
4.5 KiB
YAML
Raw Normal View History

2024-05-06 08:53:47 -05:00
---
2024-07-20 10:43:02 -05:00
# yaml-language-server: $schema=https://ks.hsn.dev/talhelper-schema.json
2024-05-06 09:46:03 -05:00
clusterName: homelab
2024-05-06 08:53:47 -05:00
talosVersion: v1.8.0-alpha.1
2024-07-04 11:33:24 -05:00
kubernetesVersion: 1.30.2
2024-05-06 08:53:47 -05:00
endpoint: "https://10.1.1.57:6443"
additionalApiServerCertSans:
- 10.1.1.57
additionalMachineCertSans:
- 10.1.1.57
nodes:
- hostname: shadowfax
disableSearchDomain: true
ipAddress: 10.1.1.61
controlPlane: true
installDiskSelector:
2024-05-07 19:05:31 -05:00
busPath: /pci0000:20/0000:20:01.2/0000:2c:00.0/nvme/nvme4/nvme4n1
machineDisks:
2024-07-11 20:52:32 -05:00
- device: /dev/disk/by-id/nvme-SOLIDIGM_SSDPFKNU020TZ_PHEH3142017H2P0C
partitions:
- mountpoint: /var/mnt/nvme1
2024-05-06 08:53:47 -05:00
networkInterfaces:
- interface: enp37s0f1
2024-05-06 08:53:47 -05:00
dhcp: true
- interface: enp37s0f0
dhcp: false
2024-05-06 08:53:47 -05:00
kernelModules:
- name: nvidia
- name: nvidia_uvm
- name: nvidia_drm
- name: nvidia_modeset
schematic:
customization:
systemExtensions:
officialExtensions:
2024-05-07 19:05:31 -05:00
- siderolabs/amd-ucode
- siderolabs/nonfree-kmod-nvidia
- siderolabs/nvidia-container-toolkit
2024-07-08 10:25:04 -05:00
# Need talos 1.8 for nvidia and zfs to coexist
# https://github.com/siderolabs/extensions/issues/380
- siderolabs/zfs
2024-05-07 19:05:31 -05:00
2024-05-06 08:53:47 -05:00
patches:
- |-
machine:
sysctls:
net.core.bpf_jit_harden: 1
vm.nr_hugepages: "1024"
- &kubelet_extra_mounts |-
machine:
kubelet:
extraMounts:
- destination: /var/mnt/nvme1
type: bind
source: /var/mnt/nvme1
options:
- rbind
- rshared
- rw
2024-05-06 08:53:47 -05:00
controlPlane:
patches:
# Disable search domain everywhere
- |-
machine:
network:
disableSearchDomain: true
# Force nameserver
- |-
machine:
network:
nameservers:
- 10.1.1.1
2024-05-06 08:53:47 -05:00
# Configure NTP
- |-
machine:
time:
disabled: false
servers:
- 10.1.1.1
# Enable KubePrism
- |-
machine:
features:
kubePrism:
enabled: true
port: 7445
# Cluster configuration
- |-
cluster:
allowSchedulingOnMasters: true
proxy:
disabled: true
network:
cni:
name: none
2024-05-14 08:56:38 -05:00
controllerManager:
extraArgs:
bind-address: 0.0.0.0
etcd:
extraArgs:
listen-metrics-urls: http://0.0.0.0:2381
scheduler:
extraArgs:
bind-address: 0.0.0.0
2024-05-06 08:53:47 -05:00
# ETCD configuration
- |-
cluster:
etcd:
advertisedSubnets:
- 10.1.1.0/24
# Disable default API server admission plugins.
- |-
- op: remove
path: /cluster/apiServer/admissionControl
# Enable K8s Talos API Access
- |-
machine:
features:
kubernetesTalosAPIAccess:
enabled: true
allowedRoles:
- os:admin
allowedKubernetesNamespaces:
- system-upgrade
# Kubelet configuration
- |-
machine:
kubelet:
defaultRuntimeSeccompProfileEnabled: true
extraArgs:
rotate-server-certificates: "true"
extraConfig:
maxPods: 150
nodeIP:
validSubnets:
- 10.1.1.0/24
extraMounts:
2024-05-07 19:05:31 -05:00
- destination: /var/openebs/keys
2024-07-08 10:25:04 -05:00
type: bind
source: /var/openebs/keys
2024-05-06 08:53:47 -05:00
options:
- bind
- rshared
- rw
2024-07-08 10:25:04 -05:00
- destination: /var/openebs/local
2024-05-06 08:53:47 -05:00
type: bind
2024-07-08 10:25:04 -05:00
source: /var/openebs/local
options:
- bind
- rshared
- rw
2024-05-06 08:53:47 -05:00
# Custom sysctls
- |-
machine:
sysctls:
fs.inotify.max_queued_events: "65536"
fs.inotify.max_user_instances: "8192"
fs.inotify.max_user_watches: "524288"
net.core.rmem_max: "2500000"
net.core.wmem_max: "2500000"
# Configure nfs mount options
2024-07-23 23:48:58 -05:00
- |
machine:
files:
- op: overwrite
path: /etc/nfsmount.conf
permissions: 0o644
content: |
[ NFSMount_Global_Options ]
2024-07-04 14:34:08 -05:00
nfsvers=4.1
hard=True
noatime=True
nodiratime=True
rsize=131072
wsize=131072
nconnect=8