Cluster is not k3s anymore.

This commit is contained in:
Joseph Hanson 2024-03-04 09:05:47 -06:00
parent ddca67b512
commit 203a3967eb
27 changed files with 0 additions and 993 deletions

View file

@ -1,10 +0,0 @@
#!/bin/bash
cilium install \
--helm-set=ipam.mode=kubernetes \
--helm-set=kubeProxyReplacement=true \
--helm-set=k8sServiceHost=167.235.217.82 \
--helm-set=policyAuditMode=true \
--helm-set=hostFirewall.enabled=true \
--helm-set=extraConfig.allow-localhost=policy \
--helm-set=hubble.relay.enabled=true \
--helm-set=hubble.ui.enabled=true

View file

@ -1,8 +0,0 @@
#shellcheck disable=SC2148,SC2155
export SOPS_AGE_KEY_FILE="$(expand_path ../../age.key)"
export VIRTUAL_ENV="$(expand_path ../../.venv)"
export ANSIBLE_COLLECTIONS_PATH=$(expand_path ../../.venv/galaxy)
export ANSIBLE_ROLES_PATH=$(expand_path ../../.venv/galaxy/ansible_roles)
export ANSIBLE_VARS_ENABLED="host_group_vars,community.sops.sops"
export ANSIBLE_INVENTORY=$(expand_path ./inventory/hosts.yaml)
PATH_add "$(expand_path ../../.venv/bin)"

View file

@ -1,28 +0,0 @@
---
# renovate: datasource=github-releases depName=k3s-io/k3s
k3s_release_version: "v1.29.0+k3s1"
k3s_install_hard_links: true
k3s_become: true
k3s_etcd_datastore: true
k3s_registration_address: 10.5.0.2
# /var/lib/rancher/k3s/server/manifests
k3s_server_manifests_urls:
# Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release)
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
filename: custom-prometheus-podmonitors.yaml
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
filename: custom-prometheus-prometheusrules.yaml
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
filename: custom-prometheus-scrapeconfigs.yaml
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
filename: custom-prometheus-servicemonitors.yaml
# /var/lib/rancher/k3s/server/manifests
k3s_server_manifests_templates:
- custom-coredns-helmchart.yaml.j2
- custom-cilium-helmchart.yaml.j2
# k3s_registries:
# mirrors:
# docker.io:
# endpoint: ["http://harbor.hsn.dev/v2/docker.io"]
# ghcr.io:
# endpoint: ["http://harbor.hsn.dev/v2/ghcr.io"]

View file

@ -1,3 +0,0 @@
---
github_username: jahanson
timezone: America/Chicago

View file

@ -1,25 +0,0 @@
---
k3s_control_node: true
k3s_server:
cluster-cidr: 10.32.0.0/16
disable: ["coredns", "flannel", "local-storage", "metrics-server", "servicelb", "traefik"]
disable-cloud-controller: true
disable-helm-controller: false
disable-kube-proxy: true
disable-network-policy: true
docker: false
etcd-disable-snapshots: true
etcd-expose-metrics: true
flannel-backend: "none" # quote
https-listen-port: 6443
# kube-apiserver-arg: ["anonymous-auth=true"]
# kubelet-arg: ["feature-gates=ImageMaximumGCAge=true","imageMaximumGCAge=30m"]
kubelet-arg: ["image-gc-high-threshold=85","image-gc-low-threshold=80"]
kube-controller-manager-arg: ["bind-address=0.0.0.0"]
kube-scheduler-arg: ["bind-address=0.0.0.0"]
node-ip: "{{ ansible_host }}"
pause-image: registry.k8s.io/pause:3.9
secrets-encryption: true
service-cidr: 10.33.0.0/16
tls-san: ["{{ k3s_registration_address }}"]
write-kubeconfig-mode: "0644"

View file

@ -1,5 +0,0 @@
---
k3s_control_node: false
k3s_agent:
node-ip: "{{ ansible_host }}"
pause-image: registry.k8s.io/pause:3.9

View file

@ -1,32 +0,0 @@
---
kubernetes:
vars:
ansible_user: jahanson
ansible_ssh_port: 22
children:
master:
hosts:
galadriel:
ansible_host: 10.1.1.61
thrain:
ansible_host: 10.1.1.62
cirdan:
ansible_host: 10.1.1.63
workers:
hosts:
nenya:
ansible_host: 10.1.1.41
ceph_drives:
- /dev/disk/by-id/nvme-PC300_NVMe_SK_hynix_256GB_EJ75N587410705M4U
vilya:
ansible_host: 10.1.1.42
ceph_drives:
- /dev/disk/by-id/nvme-PC300_NVMe_SK_hynix_256GB_EJ75N587411205N58
elrond:
ansible_host: 10.1.1.43
ceph_drives:
- /dev/xvdb
narya:
ansible_host: 10.1.1.44
ceph_drives:
- /dev/disk/by-id/nvme-Samsung_SSD_960_EVO_250GB_S3ESNX0K308438J

View file

@ -1,44 +0,0 @@
---
- name: Add user 'jahanson' and add to sudo group
hosts: all
become: true
tasks:
- name: Create user 'jahanson'
ansible.builtin.user:
name: jahanson
state: present
- name: Add user 'jahanson' to sudo group
when: ansible_user == 'root'
ansible.builtin.user:
name: jahanson
groups: sudo
append: true
- name: User Configuration | SSH keys
ansible.posix.authorized_key:
user: "jahanson"
key: "https://github.com/jahanson.keys"
- name: User Configuration | Silence login
ansible.builtin.file:
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.hushlogin"
state: touch
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
modification_time: preserve
access_time: preserve
- name: Copy .vimrc file
ansible.builtin.copy:
src: "files/.vimrc"
dest: "/home/jahanson/.vimrc"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0644"
- name: User Configuration | Add user to sudoers
ansible.builtin.copy:
content: "jahanson ALL=(ALL:ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/jahanson"
owner: root
group: root
mode: "0440"

View file

@ -1,40 +0,0 @@
---
- name: Reset Ceph Drives
hosts: kubernetes
become: true
gather_facts: true
any_errors_fatal: true
pre_tasks:
- name: Pausing for 2 seconds...
ansible.builtin.pause:
seconds: 2
tasks:
- name: Reset Ceph Drives # noqa: ignore-errors
ignore_errors: true
when: ceph_drives | default([]) | length > 0
block:
- name: Delete (/var/lib/rook)
ansible.builtin.file:
state: absent
path: /var/lib/rook
- name: Delete (/dev/mapper/ceph-*) # noqa: no-changed-when
ansible.builtin.shell: |
set -o pipefail
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove_all --force % || true
- name: Delete (/dev/ceph-*) # noqa: no-changed-when
ansible.builtin.command: rm -rf /dev/ceph-*
- name: Delete (/dev/mapper/ceph--*) # noqa: no-changed-when
ansible.builtin.command: rm -rf /dev/mapper/ceph--*
- name: Wipe (sgdisk) # noqa: no-changed-when
ansible.builtin.command: "sgdisk --zap-all {{ item }}"
loop: "{{ ceph_drives }}"
- name: Wipe (dd) # noqa: no-changed-when
ansible.builtin.command: "dd if=/dev/zero of={{ item }} bs=1M count=100 oflag=direct,dsync"
loop: "{{ ceph_drives }}"
- name: Wipe (blkdiscard) # noqa: no-changed-when
ansible.builtin.command: "blkdiscard {{ item }}"
loop: "{{ ceph_drives }}"
when: "'nvme' in item"
- name: Wipe (partprobe) # noqa: no-changed-when
ansible.builtin.command: "partprobe {{ item }}"
loop: "{{ ceph_drives }}"

View file

@ -1,107 +0,0 @@
---
- name: Cluster Installation
hosts: kubernetes
become: true
gather_facts: true
any_errors_fatal: true
pre_tasks:
- name: Pausing for 2 seconds...
ansible.builtin.pause:
seconds: 2
tasks:
- name: Check if cluster is installed
check_mode: false
ansible.builtin.stat:
path: /etc/rancher/k3s/config.yaml
register: k3s_installed
- name: Ignore manifests templates and urls if the cluster is already installed
when: k3s_installed.stat.exists
ansible.builtin.set_fact:
k3s_server_manifests_templates: []
k3s_server_manifests_urls: []
- name: Install Kubernetes
ansible.builtin.include_role:
name: xanmanning.k3s
public: true
vars:
k3s_state: installed
- name: Kubeconfig
ansible.builtin.include_tasks: tasks/kubeconfig.yaml
vars:
repository_base: "{{ lookup('ansible.builtin.pipe', 'git rev-parse --show-toplevel') }}"
- name: Wait for custom manifests to rollout
when:
- k3s_primary_control_node
- (k3s_server_manifests_templates | length > 0
or k3s_server_manifests_urls | length > 0)
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
kind: "{{ item.kind }}"
name: "{{ item.name }}"
namespace: "{{ item.namespace | default('') }}"
wait: true
wait_sleep: 10
wait_timeout: 360
loop:
- { name: cilium, kind: HelmChart, namespace: kube-system }
- { name: coredns, kind: HelmChart, namespace: kube-system }
- { name: policy, kind: CiliumL2AnnouncementPolicy }
- { name: pool, kind: CiliumLoadBalancerIPPool }
- { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition }
- { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition }
- { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition }
- { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition }
- name: Coredns
when: k3s_primary_control_node
ansible.builtin.include_tasks: tasks/coredns.yaml
- name: Cilium
when: k3s_primary_control_node
ansible.builtin.include_tasks: tasks/cilium.yaml
- name: Cruft
when: k3s_primary_control_node
ansible.builtin.include_tasks: tasks/cruft.yaml
- name: Stale Containers
ansible.builtin.include_tasks: tasks/stale_containers.yaml
vars:
stale_containers_state: disabled
# - name: Helm controller
# notify: Restart Kubernetes
# when: k3s_control_node
# ansible.builtin.include_tasks: tasks/helm_controller.yaml
# TODO: Replace this with embedded spegel in the future
- name: Copy custom containerd configuration
when: inventory_hostname != 'temp'
notify: Restart Kubernetes
ansible.builtin.copy:
src: files/config.toml.tmpl
dest: /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
owner: root
group: root
mode: "0644"
- name: Copy custom containerd configuration
when: inventory_hostname == 'temp'
notify: Restart Kubernetes
ansible.builtin.copy:
src: files/config.nvidia.toml.tmpl
dest: /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
owner: root
group: root
mode: "0644"
handlers:
- name: Restart Kubernetes
ansible.builtin.systemd:
name: k3s
state: restarted

View file

@ -1,61 +0,0 @@
---
- name: Cluster Nuke
hosts: kubernetes
become: true
gather_facts: true
any_errors_fatal: true
pre_tasks:
- name: Pausing for 2 seconds...
ansible.builtin.pause:
seconds: 2
tasks:
- name: Stop Kubernetes # noqa: ignore-errors
ignore_errors: true
block:
- name: Stop Kubernetes
ansible.builtin.include_role:
name: xanmanning.k3s
public: true
vars:
k3s_state: stopped
# https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md
- name: Networking
block:
- name: Networking | Delete Cilium links
ansible.builtin.command:
cmd: "ip link delete {{ item }}"
removes: "/sys/class/net/{{ item }}"
loop: ["cilium_host", "cilium_net", "cilium_vxlan"]
- name: Networking | Flush iptables
ansible.builtin.iptables:
table: "{{ item }}"
flush: true
loop: ["filter", "nat", "mangle", "raw"]
- name: Networking | Flush ip6tables
ansible.builtin.iptables:
table: "{{ item }}"
flush: true
ip_version: ipv6
loop: ["filter", "nat", "mangle", "raw"]
- name: Networking | Delete CNI directory
ansible.builtin.file:
path: /etc/cni/net.d
state: absent
- name: Uninstall Kubernetes
ansible.builtin.include_role:
name: xanmanning.k3s
public: true
vars:
k3s_state: uninstalled
- name: Stale Containers
ansible.builtin.include_tasks: tasks/stale_containers.yaml
vars:
stale_containers_state: disabled
- name: Reboot
ansible.builtin.reboot:
msg: Rebooting nodes
reboot_timeout: 3600

View file

@ -1,130 +0,0 @@
---
- name: Prepare System
hosts: kubernetes
become: true
gather_facts: true
any_errors_fatal: true
pre_tasks:
- name: Pausing for 2 seconds...
ansible.builtin.pause:
seconds: 2
tasks:
- name: Locale
block:
- name: Locale | Set timezone
community.general.timezone:
name: "{{ timezone | default('Etc/UTC') }}"
- name: Packages
block:
- name: Packages | Add non-free repository
ansible.builtin.apt_repository:
repo: deb http://deb.debian.org/debian/ stable main contrib non-free
filename: non-free
update_cache: true
- name: Packages | Install Intel common packages
when: inventory_hostname != 'elrond'
ansible.builtin.apt:
name: vim,i965-va-driver-shaders,apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,
gnupg,hdparm,htop,btop,intel-gpu-tools,intel-media-va-driver-non-free,iperf3,iptables,iputils-ping,ipvsadm,
libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3,
python3-apt,python3-openshift,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common,
unzip,util-linux
install_recommends: false
- name: Packages | Install AMD common packages
when: inventory_hostname == 'elrond'
ansible.builtin.apt:
name: vim,apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,
gnupg,hdparm,htop,btop,iperf3,iptables,iputils-ping,ipvsadm,
libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3,
python3-apt,python3-openshift,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common,
unzip,util-linux
install_recommends: false
- name: Fish
block:
- name: Fish | Add fish apt key
ansible.builtin.get_url:
url: https://download.opensuse.org/repositories/shells:fish:release:3/Debian_12/Release.key
dest: /etc/apt/trusted.gpg.d/fish.asc
owner: root
group: root
mode: "0644"
- name: Fish | Add fish repository
ansible.builtin.apt_repository:
repo: deb [signed-by=/etc/apt/trusted.gpg.d/fish.asc] http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_12/ /
filename: fish
update_cache: true
- name: Fish | Install fish
ansible.builtin.apt:
name: fish
install_recommends: false
- name: Fish | Set as default shell
ansible.builtin.user:
name: "{{ ansible_user }}"
shell: /usr/bin/fish
- name: Fish | Create configuration directory
ansible.builtin.file:
path: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
recurse: true
- name: Fish | Create neofetch greeting
ansible.builtin.copy:
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/fish_greeting.fish"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
content: neofetch --config none
- name: Fish | Create kubectl shorthand
ansible.builtin.copy:
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/k.fish"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: "0755"
content: |
function k --wraps=kubectl --description 'kubectl shorthand'
kubectl $argv
end
- name: System Configuration
notify: Reboot
block:
- name: System Configuration | Disable swap
ansible.posix.mount:
name: "{{ item }}"
fstype: swap
state: absent
loop: ["none", "swap"]
- name: System Configuration | Create Kernel modules
ansible.builtin.copy:
dest: "/etc/modules-load.d/{{ item }}.conf"
mode: "0644"
content: "{{ item }}"
loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "overlay", "rbd", "tcp_bbr"]
register: modules_status
- name: System Configuration | Reload Kernel modules # noqa: no-changed-when no-handler
when: modules_status.changed
ansible.builtin.systemd:
name: systemd-modules-load
state: restarted
- name: System Configuration | Sysctl
ansible.posix.sysctl:
name: "{{ item.key }}"
value: "{{ item.value }}"
sysctl_file: /etc/sysctl.d/99-kubernetes.conf
reload: true
with_dict: "{{ sysctl_config }}"
vars:
sysctl_config:
fs.inotify.max_queued_events: 65536
fs.inotify.max_user_watches: 524288
fs.inotify.max_user_instances: 8192
handlers:
- name: Reboot
ansible.builtin.reboot:
msg: Rebooting nodes
reboot_timeout: 3600

View file

@ -1,71 +0,0 @@
---
# https://github.com/kevincoakley/ansible-role-k8s-rolling-update
- name: Cluster update rollout
hosts: kubernetes
become: true
gather_facts: true
any_errors_fatal: true
serial: 1
pre_tasks:
- name: Pausing for 2 seconds...
ansible.builtin.pause:
seconds: 2
tasks:
- name: Details
ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json"
register: kubectl_get_node
delegate_to: "{{ groups['master'][0] }}"
failed_when: false
changed_when: false
- name: Update
when:
# When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False
- kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status")
# If spec.unschedulable is defined then the node is cordoned
- not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined
block:
- name: Cordon
kubernetes.core.k8s_drain:
name: "{{ inventory_hostname }}"
kubeconfig: /etc/rancher/k3s/k3s.yaml
state: cordon
delegate_to: "{{ groups['master'][0] }}"
- name: Drain
kubernetes.core.k8s_drain:
name: "{{ inventory_hostname }}"
kubeconfig: /etc/rancher/k3s/k3s.yaml
state: drain
delete_options:
delete_emptydir_data: true
ignore_daemonsets: true
terminate_grace_period: 600
wait_timeout: 900
pod_selectors:
- app!=rook-ceph-osd
delegate_to: "{{ groups['master'][0] }}"
- name: Update
ansible.builtin.apt:
upgrade: dist
update_cache: true
- name: Check if reboot is required
ansible.builtin.stat:
path: /var/run/reboot-required
register: reboot_required
- name: Reboot
when: reboot_required.stat.exists
ansible.builtin.reboot:
msg: Rebooting node
post_reboot_delay: 120
reboot_timeout: 3600
- name: Uncordon
kubernetes.core.k8s_drain:
name: "{{ inventory_hostname }}"
kubeconfig: /etc/rancher/k3s/k3s.yaml
state: uncordon
delegate_to: "{{ groups['master'][0] }}"

View file

@ -1,2 +0,0 @@
source $VIMRUNTIME/defaults.vim
set mouse-=a

View file

@ -1,35 +0,0 @@
version = 2
[plugins."io.containerd.internal.v1.opt"]
path = "/var/lib/rancher/k3s/agent/containerd"
[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
enable_selinux = false
enable_unprivileged_ports = true
enable_unprivileged_icmp = true
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
disable_snapshot_annotations = true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia]
privileged_without_host_devices = false
runtime_engine = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia.options]
BinaryName = "/usr/bin/nvidia-container-runtime"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/var/lib/rancher/k3s/agent/etc/containerd/certs.d"

View file

@ -1,25 +0,0 @@
version = 2
[plugins."io.containerd.internal.v1.opt"]
path = "/var/lib/rancher/k3s/agent/containerd"
[plugins."io.containerd.grpc.v1.cri"]
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
enable_selinux = false
enable_unprivileged_ports = true
enable_unprivileged_icmp = true
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
disable_snapshot_annotations = true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/var/lib/rancher/k3s/agent/etc/containerd/certs.d"

View file

@ -1,6 +0,0 @@
[Unit]
Description=Stale containers
[Service]
Type=oneshot
ExecStart=/usr/local/bin/k3s crictl rmi --prune

View file

@ -1,11 +0,0 @@
[Unit]
Description=Stale containers
[Timer]
OnCalendar=weekly
AccuracySec=1h
Persistent=true
RandomizedDelaySec=6000
[Install]
WantedBy=timers.target

View file

@ -1,56 +0,0 @@
---
- name: Cilium
block:
- name: Cilium | Check if Cilium HelmChart exists
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: cilium
kind: HelmChart
namespace: kube-system
register: cilium_helmchart
- name: Cilium | Wait for Cilium to rollout
when: cilium_helmchart.resources | count > 0
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: helm-install-cilium
kind: Job
namespace: kube-system
wait: true
wait_condition:
type: Complete
status: true
wait_timeout: 360
- name: Cilium | Patch the Cilium HelmChart to unmanage it
when: cilium_helmchart.resources | count > 0
kubernetes.core.k8s_json_patch:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: cilium
kind: HelmChart
namespace: kube-system
patch:
- op: add
path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged
value: "true"
- name: Cilium | Delete the Cilium HelmChart CR
when: cilium_helmchart.resources | count > 0
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: cilium
kind: HelmChart
namespace: kube-system
state: absent
- name: Cilium | Force delete the Cilium HelmChart
when: cilium_helmchart.resources | count > 0
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: cilium
kind: HelmChart
namespace: kube-system
state: patched
definition:
metadata:
finalizers: []

View file

@ -1,56 +0,0 @@
---
- name: Coredns
block:
- name: Coredns | Check if Coredns HelmChart exists
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: coredns
kind: HelmChart
namespace: kube-system
register: coredns_helmchart
- name: Coredns | Wait for Coredns to rollout
when: coredns_helmchart.resources | count > 0
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: helm-install-coredns
kind: Job
namespace: kube-system
wait: true
wait_condition:
type: Complete
status: true
wait_timeout: 360
- name: Coredns | Patch the Coredns HelmChart to unmanage it
when: coredns_helmchart.resources | count > 0
kubernetes.core.k8s_json_patch:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: coredns
kind: HelmChart
namespace: kube-system
patch:
- op: add
path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged
value: "true"
- name: Coredns | Delete the Coredns HelmChart CR
when: coredns_helmchart.resources | count > 0
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: coredns
kind: HelmChart
namespace: kube-system
state: absent
- name: Coredns | Force delete the Coredns HelmChart
when: coredns_helmchart.resources | count > 0
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: coredns
kind: HelmChart
namespace: kube-system
state: patched
definition:
metadata:
finalizers: []

View file

@ -1,32 +0,0 @@
---
# https://github.com/k3s-io/k3s/issues/1971
- name: Cruft
block:
- name: Cruft | Get list of custom manifests
ansible.builtin.find:
paths: "{{ k3s_server_manifests_dir }}"
file_type: file
use_regex: true
patterns: ["^custom-.*"]
register: custom_manifest
- name: Cruft | Delete custom manifests
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
loop: "{{ custom_manifest.files }}"
- name: Cruft | Get list of custom addons
kubernetes.core.k8s_info:
kubeconfig: /etc/rancher/k3s/k3s.yaml
kind: Addon
register: addons_list
- name: Cruft | Delete addons
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: "{{ item.metadata.name }}"
kind: Addon
namespace: kube-system
state: absent
loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}"

View file

@ -1,16 +0,0 @@
---
- name: Helm Controller
block:
- name: Helm Controller | Disable Helm controller
ansible.builtin.replace:
path: /etc/rancher/k3s/config.yaml
regexp: '^disable-helm-controller: false$'
replace: 'disable-helm-controller: true'
- name: Helm Controller | Delete Helm controller CRDs
kubernetes.core.k8s:
kubeconfig: /etc/rancher/k3s/k3s.yaml
name: "{{ item }}"
kind: CustomResourceDefinition
state: absent
loop: ["helmcharts.helm.cattle.io", "helmchartconfigs.helm.cattle.io"]

View file

@ -1,36 +0,0 @@
---
# https://github.com/k3s-io/k3s/issues/1900
- name: Enabled Stale containers
when: stale_containers_state == "enabled"
block:
- name: Stale containers | Create systemd unit
ansible.builtin.copy:
src: files/stale-containers.service
dest: /etc/systemd/system/stale-containers.service
owner: root
group: root
mode: "0644"
- name: Stale containers | Create systemd timer
ansible.builtin.copy:
src: files/stale-containers.timer
dest: /etc/systemd/system/stale-containers.timer
owner: root
group: root
mode: "0644"
- name: Stale containers | Start the systemd timer
ansible.builtin.systemd:
name: stale-containers.timer
enabled: true
daemon_reload: true
masked: false
state: started
- name: Disable Stale containers
when: stale_containers_state == "disabled"
block:
- name: Stale containers | Mask the systemd timer
ansible.builtin.systemd:
name: stale-containers.timer
masked: true

View file

@ -1,51 +0,0 @@
---
# https://docs.k3s.io/helm
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cilium
namespace: kube-system
spec:
# renovate: datasource=helm
repo: https://helm.cilium.io/
chart: cilium
version: 1.14.5
targetNamespace: kube-system
bootstrap: true
valuesContent: |-
cluster:
name: homelab
id: 1
containerRuntime:
integration: containerd
socketPath: /var/run/k3s/containerd/containerd.sock
hubble:
enabled: true
relay:
enabled: true
ui:
enabled: true
ipam:
mode: kubernetes
ipv4NativeRoutingCIDR: "{{ k3s_server['cluster-cidr'] }}"
k8sServiceHost: "{{ k3s_registration_address }}"
k8sServicePort: 6443
kubeProxyReplacement: true
localRedirectPolicy: true
operator:
rollOutPods: true
rollOutCiliumPods: true
securityContext:
privileged: true
policyAuditMode: true
hostFirewall:
enabled: true
extraConfig:
allow-localhost: policy
bgp:
enabled: false
announce:
loadbalancerIP: true
podCIDR: false
bgpControlPlane:
enabled: true

View file

@ -1,77 +0,0 @@
---
# https://docs.k3s.io/helm
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: coredns
namespace: kube-system
spec:
# renovate: datasource=helm
repo: https://coredns.github.io/helm
chart: coredns
version: 1.29.0
targetNamespace: kube-system
bootstrap: true
valuesContent: |-
fullnameOverride: coredns
replicaCount: 2
k8sAppLabelOverride: kube-dns
service:
name: kube-dns
clusterIP: {{ k3s_server['service-cidr'] | ansible.utils.nthhost(10) }}
serviceAccount:
create: true
deployment:
annotations:
reloader.stakater.com/auto: "true"
servers:
- zones:
- zone: .
scheme: dns://
use_tcp: true
port: 53
plugins:
- name: log
- name: errors
- name: health
configBlock: |-
lameduck 5s
- name: ready
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
- name: cache
parameters: 30
- name: loop
- name: reload
- name: loadbalance
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/instance: coredns

View file

@ -1,8 +0,0 @@
ansible==9.3.0
ansible-lint==24.2.0
# https://github.com/pyca/bcrypt/issues/684
bcrypt==4.1.2
jmespath==1.0.1
netaddr==1.2.1
openshift==0.13.2
passlib==1.7.4

View file

@ -1,18 +0,0 @@
---
collections:
- name: ansible.posix
version: 1.5.4
- name: ansible.utils
version: 3.1.0
- name: community.general
version: 8.4.0
- name: community.sops
version: 1.6.7
- name: kubernetes.core
version: 3.0.0
- name: onepassword.connect
version: 2.2.4
roles:
- name: xanmanning.k3s
src: https://github.com/PyratLabs/ansible-role-k3s
version: v3.4.4