Removing old ansible scripts.
This commit is contained in:
parent
900bfb96b2
commit
43e6b2ff48
22 changed files with 0 additions and 858 deletions
|
@ -1,8 +0,0 @@
|
|||
#shellcheck disable=SC2148,SC2155
|
||||
export SOPS_AGE_KEY_FILE="$(expand_path ../../age.key)"
|
||||
export VIRTUAL_ENV="$(expand_path ../../.venv)"
|
||||
export ANSIBLE_COLLECTIONS_PATH=$(expand_path ../../.venv/galaxy)
|
||||
export ANSIBLE_ROLES_PATH=$(expand_path ../../.venv/galaxy/ansible_roles)
|
||||
export ANSIBLE_VARS_ENABLED="host_group_vars,community.sops.sops"
|
||||
export ANSIBLE_INVENTORY=$(expand_path ./inventory/hosts.yaml)
|
||||
PATH_add "$(expand_path ../../.venv/bin)"
|
|
@ -1,24 +0,0 @@
|
|||
---
|
||||
# renovate: datasource=github-releases depName=k3s-io/k3s
|
||||
k3s_release_version: "v1.28.2+k3s1"
|
||||
k3s_install_hard_links: true
|
||||
k3s_become: true
|
||||
k3s_etcd_datastore: true
|
||||
k3s_registration_address: 10.2.0.6
|
||||
# /var/lib/rancher/k3s/server/manifests
|
||||
k3s_server_manifests_urls:
|
||||
# Essential Prometheus Operator CRDs (the rest are installed with the kube-prometheus-stack helm release)
|
||||
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
filename: custom-prometheus-podmonitors.yaml
|
||||
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
filename: custom-prometheus-prometheusrules.yaml
|
||||
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
|
||||
filename: custom-prometheus-scrapeconfigs.yaml
|
||||
- url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.68.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
filename: custom-prometheus-servicemonitors.yaml
|
||||
# /var/lib/rancher/k3s/server/manifests
|
||||
k3s_server_manifests_templates:
|
||||
- custom-cilium-helmchart.yaml.j2
|
||||
- custom-cilium-l2.yaml.j2
|
||||
- custom-coredns-helmchart.yaml.j2
|
||||
# /var/lib/rancher/k3s/agent/pod-manifests
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
github_username: jahanson
|
||||
timezone: America/Chicago
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
k3s_control_node: true
|
||||
k3s_server:
|
||||
node-ip: "{{ ansible_host }}"
|
||||
tls-san:
|
||||
- "{{ k3s_registration_address }}"
|
||||
https-listen-port: 6443
|
||||
docker: false
|
||||
flannel-backend: "none" # quote
|
||||
disable:
|
||||
- coredns
|
||||
- flannel
|
||||
- local-storage
|
||||
- metrics-server
|
||||
- servicelb
|
||||
- traefik
|
||||
disable-network-policy: true
|
||||
disable-cloud-controller: true
|
||||
disable-kube-proxy: true
|
||||
cluster-cidr: 10.32.0.0/16
|
||||
service-cidr: 10.33.0.0/16
|
||||
write-kubeconfig-mode: "0644"
|
||||
etcd-expose-metrics: true
|
||||
kube-controller-manager-arg:
|
||||
- bind-address=0.0.0.0
|
||||
kube-scheduler-arg:
|
||||
- bind-address=0.0.0.0
|
||||
kube-apiserver-arg:
|
||||
- anonymous-auth=true
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
k3s_control_node: false
|
||||
k3s_agent:
|
||||
node-ip: "{{ ansible_host }}"
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
kubernetes:
|
||||
vars:
|
||||
ansible_user: jahanson
|
||||
ansible_ssh_port: 22
|
||||
children:
|
||||
master:
|
||||
hosts:
|
||||
aule:
|
||||
ansible_host: 10.2.0.3
|
||||
ceph_drives:
|
||||
- /dev/disk/by-id/scsi-0HC_Volume_37231496
|
||||
eonwe:
|
||||
ansible_host: 10.2.0.4
|
||||
ceph_drives:
|
||||
- /dev/disk/by-id/scsi-0HC_Volume_37231521
|
||||
arlen:
|
||||
ansible_host: 10.2.0.5
|
||||
ceph_drives:
|
||||
- /dev/disk/by-id/scsi-0HC_Volume_37231596
|
|
@ -1,41 +0,0 @@
|
|||
---
|
||||
- name: Reset Ceph Drives
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 2 seconds...
|
||||
ansible.builtin.pause:
|
||||
seconds: 2
|
||||
tasks:
|
||||
- name: Reset Ceph Drives # noqa: ignore-errors
|
||||
ignore_errors: true
|
||||
when: ceph_drives | default([]) | length > 0
|
||||
block:
|
||||
- name: Delete (/var/lib/rook)
|
||||
ansible.builtin.file:
|
||||
state: absent
|
||||
path: /var/lib/rook
|
||||
- name: Delete (/dev/mapper/ceph-*) # noqa: no-changed-when
|
||||
ansible.builtin.shell:
|
||||
executable: /bin/bash
|
||||
cmd: |
|
||||
set -o pipefail
|
||||
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove_all --force % || true
|
||||
- name: Delete (/dev/ceph-*) # noqa: no-changed-when
|
||||
ansible.builtin.command: rm -rf /dev/ceph-*
|
||||
- name: Delete (/dev/mapper/ceph--*) # noqa: no-changed-when
|
||||
ansible.builtin.command: rm -rf /dev/mapper/ceph--*
|
||||
- name: Wipe (sgdisk) # noqa: no-changed-when
|
||||
ansible.builtin.command: "sgdisk --zap-all {{ item }}"
|
||||
loop: "{{ ceph_drives }}"
|
||||
- name: Wipe (dd) # noqa: no-changed-when
|
||||
ansible.builtin.command: "dd if=/dev/zero of={{ item }} bs=1M count=100 oflag=direct,dsync"
|
||||
loop: "{{ ceph_drives }}"
|
||||
- name: Wipe (blkdiscard) # noqa: no-changed-when
|
||||
ansible.builtin.command: "blkdiscard {{ item }}"
|
||||
loop: "{{ ceph_drives }}"
|
||||
- name: Wipe (partprobe) # noqa: no-changed-when
|
||||
ansible.builtin.command: "partprobe {{ item }}"
|
||||
loop: "{{ ceph_drives }}"
|
|
@ -1,69 +0,0 @@
|
|||
---
|
||||
- name: Cluster Installation
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 2 seconds...
|
||||
ansible.builtin.pause:
|
||||
seconds: 2
|
||||
tasks:
|
||||
- name: Check if cluster is installed
|
||||
check_mode: false
|
||||
ansible.builtin.stat:
|
||||
path: /etc/rancher/k3s/config.yaml
|
||||
register: k3s_installed
|
||||
|
||||
- name: Ignore manifests templates and urls if the cluster is already installed
|
||||
when: k3s_installed.stat.exists
|
||||
ansible.builtin.set_fact:
|
||||
k3s_server_manifests_templates: []
|
||||
k3s_server_manifests_urls: []
|
||||
|
||||
- name: Install Kubernetes
|
||||
ansible.builtin.include_role:
|
||||
name: xanmanning.k3s
|
||||
public: true
|
||||
vars:
|
||||
k3s_state: installed
|
||||
|
||||
- name: Wait for custom manifests to rollout
|
||||
when:
|
||||
- k3s_primary_control_node
|
||||
- (k3s_server_manifests_templates | length > 0
|
||||
or k3s_server_manifests_urls | length > 0)
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
kind: "{{ item.kind }}"
|
||||
name: "{{ item.name }}"
|
||||
namespace: "{{ item.namespace | default('') }}"
|
||||
wait: true
|
||||
wait_sleep: 10
|
||||
wait_timeout: 360
|
||||
loop:
|
||||
- { name: cilium, kind: HelmChart, namespace: kube-system }
|
||||
- { name: coredns, kind: HelmChart, namespace: kube-system }
|
||||
- { name: policy, kind: CiliumL2AnnouncementPolicy }
|
||||
- { name: pool, kind: CiliumLoadBalancerIPPool }
|
||||
- { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition }
|
||||
- { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition }
|
||||
- { name: scrapeconfigs.monitoring.coreos.com, kind: CustomResourceDefinition }
|
||||
- { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition }
|
||||
|
||||
- name: Coredns
|
||||
when: k3s_primary_control_node
|
||||
ansible.builtin.include_tasks: tasks/coredns.yaml
|
||||
|
||||
- name: Cilium
|
||||
when: k3s_primary_control_node
|
||||
ansible.builtin.include_tasks: tasks/cilium.yaml
|
||||
|
||||
- name: Cruft
|
||||
when: k3s_primary_control_node
|
||||
ansible.builtin.include_tasks: tasks/cruft.yaml
|
||||
|
||||
- name: Stale Containers
|
||||
ansible.builtin.include_tasks: tasks/stale_containers.yaml
|
||||
vars:
|
||||
stale_containers_state: enabled
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
- name: Cluster Nuke
|
||||
hosts:
|
||||
- master
|
||||
- worker
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 2 seconds...
|
||||
ansible.builtin.pause:
|
||||
seconds: 2
|
||||
tasks:
|
||||
- name: Uninstall k3s
|
||||
ansible.builtin.include_role:
|
||||
name: xanmanning.k3s
|
||||
public: true
|
||||
vars:
|
||||
k3s_state: uninstalled
|
||||
- name: Gather list of CNI files
|
||||
ansible.builtin.find:
|
||||
paths: /etc/cni/net.d
|
||||
patterns: "*"
|
||||
hidden: true
|
||||
register: directory_contents
|
||||
- name: Delete CNI files
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ directory_contents.files }}"
|
|
@ -1,184 +0,0 @@
|
|||
---
|
||||
- name: Prepare System
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
pre_tasks:
|
||||
- name: Pausing for 2 seconds...
|
||||
ansible.builtin.pause:
|
||||
seconds: 2
|
||||
tasks:
|
||||
- name: Locale
|
||||
block:
|
||||
- name: Locale | Set timezone
|
||||
community.general.timezone:
|
||||
name: "{{ timezone | default('Etc/UTC') }}"
|
||||
|
||||
- name: Packages
|
||||
block:
|
||||
- name: Packages | Add fish key
|
||||
ansible.builtin.get_url:
|
||||
url: https://download.opensuse.org/repositories/shells:fish:release:3/Debian_12/Release.key
|
||||
dest: /etc/apt/trusted.gpg.d/fish.asc
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
- name: Packages | Add fish repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: deb [signed-by=/etc/apt/trusted.gpg.d/fish.asc] http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_12/ /
|
||||
filename: fish
|
||||
update_cache: true
|
||||
- name: Packages | Add non-free repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: deb http://deb.debian.org/debian/ stable main contrib non-free
|
||||
filename: non-free
|
||||
update_cache: true
|
||||
- name: Packages | Install
|
||||
ansible.builtin.apt:
|
||||
name: apt-transport-https,ca-certificates,conntrack,curl,dirmngr,fish,gdisk,
|
||||
gnupg,hdparm,htop,iperf3,iptables,iputils-ping,ipvsadm,
|
||||
libseccomp2,lm-sensors,neofetch,net-tools,nfs-common,nvme-cli,open-iscsi,parted,psmisc,python3,
|
||||
python3-apt,python3-kubernetes,python3-yaml,smartmontools,socat,software-properties-common,
|
||||
unzip,util-linux
|
||||
install_recommends: false
|
||||
|
||||
- name: User Configuration
|
||||
block:
|
||||
- name: User Configuration | SSH keys
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ ansible_user }}"
|
||||
key: "https://github.com/{{ github_username }}.keys"
|
||||
- name: User Configuration | Silence login
|
||||
ansible.builtin.file:
|
||||
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.hushlogin"
|
||||
state: touch
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0644"
|
||||
modification_time: preserve
|
||||
access_time: preserve
|
||||
- name: User Configuration | Add user to sudoers
|
||||
when: ansible_user != 'root'
|
||||
ansible.builtin.copy:
|
||||
content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ ansible_user }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0440"
|
||||
- name: User Configuration | Fish shell (1)
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
shell: /usr/bin/fish
|
||||
- name: User Configuration | Fish shell (2)
|
||||
ansible.builtin.file:
|
||||
path: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions"
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
recurse: true
|
||||
- name: User Configuration | Fish shell (3)
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/fish_greeting.fish"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0755"
|
||||
content: neofetch --config none
|
||||
- name: User Configuration | Fish shell (3)
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ '/home/' + ansible_user if ansible_user != 'root' else '/root' }}/.config/fish/functions/k.fish"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: "0755"
|
||||
content: |
|
||||
function k --wraps=kubectl --description 'kubectl shorthand'
|
||||
kubectl $argv
|
||||
end
|
||||
|
||||
- name: Network Configuration
|
||||
notify: Reboot
|
||||
block:
|
||||
- name: Network Configuration | Set hostname
|
||||
ansible.builtin.hostname:
|
||||
name: "{{ inventory_hostname }}"
|
||||
- name: Network Configuration | Update hosts
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/hosts
|
||||
content: |
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 {{ inventory_hostname }}
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
mode: preserve
|
||||
# https://github.com/cilium/cilium/issues/18706
|
||||
- name: Network Configuration | Cilium (1)
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/systemd/networkd.conf
|
||||
regexp: ManageForeignRoutingPolicyRules
|
||||
line: ManageForeignRoutingPolicyRules=no
|
||||
- name: Network Configuration | Cilium (2)
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/systemd/networkd.conf
|
||||
regexp: ManageForeignRoutes
|
||||
line: ManageForeignRoutes=no
|
||||
|
||||
- name: System Configuration
|
||||
notify: Reboot
|
||||
block:
|
||||
- name: System Configuration | Neofetch
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/profile.d/neofetch.sh
|
||||
mode: "0755"
|
||||
content: neofetch --config none
|
||||
- name: System Configuration | Disable swap
|
||||
ansible.posix.mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
loop: ["none", "swap"]
|
||||
- name: System Configuration | Kernel modules (1)
|
||||
community.general.modprobe:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"]
|
||||
- name: System Configuration | Kernel modules (2)
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/modules-load.d/{{ item }}.conf"
|
||||
mode: "0644"
|
||||
content: "{{ item }}"
|
||||
loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"]
|
||||
- name: System Configuration | Sysctl
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.key }}"
|
||||
value: "{{ item.value }}"
|
||||
sysctl_file: /etc/sysctl.d/99-kubernetes.conf
|
||||
reload: true
|
||||
with_dict: "{{ sysctl_config }}"
|
||||
vars:
|
||||
sysctl_config:
|
||||
fs.inotify.max_queued_events: 65536
|
||||
fs.inotify.max_user_watches: 524288
|
||||
fs.inotify.max_user_instances: 8192
|
||||
- name: System Configuration | Grub (1)
|
||||
ansible.builtin.replace:
|
||||
path: /etc/default/grub
|
||||
regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ item.key | regex_escape }}=).)*)(?:[" ]{{ item.key | regex_escape }}=\S+)?(.*")$'
|
||||
replace: '\1 {{ item.key }}={{ item.value }}\2'
|
||||
with_dict: "{{ grub_config }}"
|
||||
vars:
|
||||
grub_config:
|
||||
apparmor: "0"
|
||||
mitigations: "off"
|
||||
register: grub_status
|
||||
- name: System Configuration | Grub (2) # noqa: no-changed-when no-handler
|
||||
ansible.builtin.command: update-grub
|
||||
when: grub_status.changed
|
||||
|
||||
handlers:
|
||||
- name: Reboot
|
||||
ansible.builtin.reboot:
|
||||
msg: Rebooting nodes
|
||||
reboot_timeout: 3600
|
|
@ -1,75 +0,0 @@
|
|||
---
|
||||
# https://github.com/kevincoakley/ansible-role-k8s-rolling-update
|
||||
- name: Cluster update rollout
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: true
|
||||
any_errors_fatal: true
|
||||
serial: 1
|
||||
pre_tasks:
|
||||
- name: Pausing for 2 seconds...
|
||||
ansible.builtin.pause:
|
||||
seconds: 2
|
||||
tasks:
|
||||
- name: Details
|
||||
ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json"
|
||||
register: kubectl_get_node
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Update
|
||||
when:
|
||||
# When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False
|
||||
- kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status")
|
||||
# If spec.unschedulable is defined then the node is cordoned
|
||||
- not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined
|
||||
block:
|
||||
- name: Cordon
|
||||
ansible.builtin.command: "kubectl cordon {{ inventory_hostname }}"
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Wait to cordon
|
||||
ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json"
|
||||
register: wait_for_cordon
|
||||
retries: 10
|
||||
delay: 10
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
changed_when: false
|
||||
until: (wait_for_cordon['stdout'] | from_json).spec.unschedulable
|
||||
|
||||
- name: Drain
|
||||
ansible.builtin.command: "kubectl drain --ignore-daemonsets --delete-emptydir-data --force {{ inventory_hostname }}"
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Update
|
||||
ansible.builtin.apt:
|
||||
upgrade: dist
|
||||
update_cache: true
|
||||
|
||||
- name: Check if reboot is required
|
||||
ansible.builtin.stat:
|
||||
path: /var/run/reboot-required
|
||||
register: reboot_required
|
||||
|
||||
- name: Reboot
|
||||
when: reboot_required.stat.exists
|
||||
ansible.builtin.reboot:
|
||||
msg: Rebooting node
|
||||
post_reboot_delay: 120
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Uncordon
|
||||
ansible.builtin.command: "kubectl uncordon {{ inventory_hostname }}"
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Wait to uncordon
|
||||
ansible.builtin.command: "kubectl get node {{ inventory_hostname }} -o json"
|
||||
retries: 10
|
||||
delay: 10
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
changed_when: false
|
||||
until: not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined
|
|
@ -1,6 +0,0 @@
|
|||
[Unit]
|
||||
Description=Stale containers
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/k3s crictl rmi --prune
|
|
@ -1,11 +0,0 @@
|
|||
[Unit]
|
||||
Description=Stale containers
|
||||
|
||||
[Timer]
|
||||
OnCalendar=weekly
|
||||
AccuracySec=1h
|
||||
Persistent=true
|
||||
RandomizedDelaySec=6000
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
|
@ -1,56 +0,0 @@
|
|||
---
|
||||
- name: Cilium
|
||||
block:
|
||||
- name: Cilium | Check if Cilium HelmChart exists
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: cilium
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
register: cilium_helmchart
|
||||
|
||||
- name: Cilium | Wait for Cilium to rollout
|
||||
when: cilium_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: helm-install-cilium
|
||||
kind: Job
|
||||
namespace: kube-system
|
||||
wait: true
|
||||
wait_condition:
|
||||
type: Complete
|
||||
status: true
|
||||
wait_timeout: 360
|
||||
|
||||
- name: Cilium | Patch the Cilium HelmChart to unmanage it
|
||||
when: cilium_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s_json_patch:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: cilium
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
patch:
|
||||
- op: add
|
||||
path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged
|
||||
value: "true"
|
||||
|
||||
- name: Cilium | Delete the Cilium HelmChart CR
|
||||
when: cilium_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: cilium
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
state: absent
|
||||
|
||||
- name: Cilium | Force delete the Cilium HelmChart
|
||||
when: cilium_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: cilium
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
state: patched
|
||||
definition:
|
||||
metadata:
|
||||
finalizers: []
|
|
@ -1,56 +0,0 @@
|
|||
---
|
||||
- name: Coredns
|
||||
block:
|
||||
- name: Coredns | Check if Coredns HelmChart exists
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: coredns
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
register: coredns_helmchart
|
||||
|
||||
- name: Coredns | Wait for Coredns to rollout
|
||||
when: coredns_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: helm-install-coredns
|
||||
kind: Job
|
||||
namespace: kube-system
|
||||
wait: true
|
||||
wait_condition:
|
||||
type: Complete
|
||||
status: true
|
||||
wait_timeout: 360
|
||||
|
||||
- name: Coredns | Patch the Coredns HelmChart to unmanage it
|
||||
when: coredns_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s_json_patch:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: coredns
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
patch:
|
||||
- op: add
|
||||
path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged
|
||||
value: "true"
|
||||
|
||||
- name: Coredns | Delete the Coredns HelmChart CR
|
||||
when: coredns_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: coredns
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
state: absent
|
||||
|
||||
- name: Coredns | Force delete the Coredns HelmChart
|
||||
when: coredns_helmchart.resources | count > 0
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: coredns
|
||||
kind: HelmChart
|
||||
namespace: kube-system
|
||||
state: patched
|
||||
definition:
|
||||
metadata:
|
||||
finalizers: []
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
# https://github.com/k3s-io/k3s/issues/1971
|
||||
- name: Cruft
|
||||
block:
|
||||
- name: Cruft | Get list of custom mantifests
|
||||
ansible.builtin.find:
|
||||
paths: "{{ k3s_server_manifests_dir }}"
|
||||
file_type: file
|
||||
use_regex: true
|
||||
patterns: ["^custom-.*"]
|
||||
register: custom_manifest
|
||||
|
||||
- name: Cruft | Delete custom mantifests
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ custom_manifest.files }}"
|
||||
|
||||
- name: Cruft | Get list of custom addons
|
||||
kubernetes.core.k8s_info:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
kind: Addon
|
||||
register: addons_list
|
||||
|
||||
- name: Cruft | Delete addons
|
||||
kubernetes.core.k8s:
|
||||
kubeconfig: /etc/rancher/k3s/k3s.yaml
|
||||
name: "{{ item.metadata.name }}"
|
||||
kind: Addon
|
||||
namespace: kube-system
|
||||
state: absent
|
||||
loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}"
|
|
@ -1,36 +0,0 @@
|
|||
---
|
||||
# https://github.com/k3s-io/k3s/issues/1900
|
||||
- name: Enabled Stale containers
|
||||
when: stale_containers_state == "enabled"
|
||||
block:
|
||||
- name: Stale containers | Create systemd unit
|
||||
ansible.builtin.copy:
|
||||
src: files/stale-containers.service
|
||||
dest: /etc/systemd/system/stale-containers.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Stale containers | Create systemd timer
|
||||
ansible.builtin.copy:
|
||||
src: files/stale-containers.timer
|
||||
dest: /etc/systemd/system/stale-containers.timer
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Stale containers | Start the systemd timer
|
||||
ansible.builtin.systemd:
|
||||
name: stale-containers.timer
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
masked: false
|
||||
state: started
|
||||
|
||||
- name: Disable Stale containers
|
||||
when: stale_containers_state == "disabled"
|
||||
block:
|
||||
- name: Stale containers | Mask the systemd timer
|
||||
ansible.builtin.systemd:
|
||||
name: stale-containers.timer
|
||||
masked: true
|
|
@ -1,52 +0,0 @@
|
|||
---
|
||||
# https://docs.k3s.io/helm
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# renovate: datasource=helm
|
||||
repo: https://helm.cilium.io/
|
||||
chart: cilium
|
||||
version: 1.14.2
|
||||
targetNamespace: kube-system
|
||||
bootstrap: true
|
||||
valuesContent: |-
|
||||
autoDirectNodeRoutes: true
|
||||
bpf:
|
||||
masquerade: true
|
||||
bgp:
|
||||
enabled: false
|
||||
cluster:
|
||||
name: kubernetes
|
||||
id: 1
|
||||
containerRuntime:
|
||||
integration: containerd
|
||||
socketPath: /var/run/k3s/containerd/containerd.sock
|
||||
endpointRoutes:
|
||||
enabled: true
|
||||
hubble:
|
||||
enabled: false
|
||||
ipam:
|
||||
mode: kubernetes
|
||||
ipv4NativeRoutingCIDR: "{{ k3s_server['cluster-cidr'] }}"
|
||||
k8sServiceHost: "{{ k3s_registration_address }}"
|
||||
k8sServicePort: 6443
|
||||
kubeProxyReplacement: true
|
||||
kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256
|
||||
l2announcements:
|
||||
enabled: true
|
||||
leaseDuration: 120s
|
||||
leaseRenewDeadline: 60s
|
||||
leaseRetryPeriod: 1s
|
||||
loadBalancer:
|
||||
algorithm: maglev
|
||||
mode: dsr
|
||||
localRedirectPolicy: true
|
||||
operator:
|
||||
rollOutPods: true
|
||||
rollOutCiliumPods: true
|
||||
securityContext:
|
||||
privileged: true
|
||||
tunnel: disabled
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
# https://docs.cilium.io/en/latest/network/l2-announcements
|
||||
apiVersion: cilium.io/v2alpha1
|
||||
kind: CiliumL2AnnouncementPolicy
|
||||
metadata:
|
||||
name: policy
|
||||
spec:
|
||||
loadBalancerIPs: true
|
||||
interfaces:
|
||||
- ^enp.*
|
||||
nodeSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/os: linux
|
||||
---
|
||||
apiVersion: cilium.io/v2alpha1
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: pool
|
||||
spec:
|
||||
cidrs:
|
||||
- cidr: "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ansible.utils.ipaddr('network/prefix') }}"
|
|
@ -1,77 +0,0 @@
|
|||
---
|
||||
# https://docs.k3s.io/helm
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# renovate: datasource=helm
|
||||
repo: https://coredns.github.io/helm
|
||||
chart: coredns
|
||||
version: 1.26.0
|
||||
targetNamespace: kube-system
|
||||
bootstrap: true
|
||||
valuesContent: |-
|
||||
fullnameOverride: coredns
|
||||
replicaCount: 2
|
||||
k8sAppLabelOverride: kube-dns
|
||||
service:
|
||||
name: kube-dns
|
||||
clusterIP: {{ k3s_server['service-cidr'] | ansible.utils.nthhost(10) }}
|
||||
serviceAccount:
|
||||
create: true
|
||||
deployment:
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
servers:
|
||||
- zones:
|
||||
- zone: .
|
||||
scheme: dns://
|
||||
use_tcp: true
|
||||
port: 53
|
||||
plugins:
|
||||
- name: log
|
||||
- name: errors
|
||||
- name: health
|
||||
configBlock: |-
|
||||
lameduck 5s
|
||||
- name: ready
|
||||
- name: kubernetes
|
||||
parameters: cluster.local in-addr.arpa ip6.arpa
|
||||
configBlock: |-
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
- name: prometheus
|
||||
parameters: 0.0.0.0:9153
|
||||
- name: forward
|
||||
parameters: . /etc/resolv.conf
|
||||
- name: cache
|
||||
parameters: 30
|
||||
- name: loop
|
||||
- name: reload
|
||||
- name: loadbalance
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: DoNotSchedule
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: coredns
|
|
@ -1,7 +0,0 @@
|
|||
ansible==8.5.0
|
||||
ansible-lint==6.21.1
|
||||
bcrypt==4.0.1
|
||||
jmespath==1.0.1
|
||||
netaddr==0.9.0
|
||||
openshift==0.13.2
|
||||
passlib==1.7.4
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
collections:
|
||||
- name: ansible.posix
|
||||
version: 1.5.4
|
||||
- name: ansible.utils
|
||||
version: 2.11.0
|
||||
- name: community.general
|
||||
version: 7.4.0
|
||||
- name: community.sops
|
||||
version: 1.6.6
|
||||
- name: kubernetes.core
|
||||
version: 2.4.0
|
||||
- name: onepassword.connect
|
||||
version: 2.2.3
|
||||
roles:
|
||||
- name: xanmanning.k3s
|
||||
version: v3.4.2
|
Reference in a new issue