Compare commits
No commits in common. "lxc-vm-builder" and "main" have entirely different histories.
lxc-vm-bui
...
main
13 changed files with 30 additions and 238 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,6 +1,5 @@
|
||||||
result*
|
result*
|
||||||
/secrets
|
/secrets
|
||||||
.secrets
|
|
||||||
age.key
|
age.key
|
||||||
**/*.tmp.sops.yaml
|
**/*.tmp.sops.yaml
|
||||||
**/*.sops.tmp.yaml
|
**/*.sops.tmp.yaml
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Function to check if a command was successful
|
|
||||||
check_command() {
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: $1 failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Build the qemu image and get the path
|
|
||||||
qemuImageBuildPath=$(nix build .#nixosConfigurations.fj-lxc-vm-x86_64.config.system.build.qemuImage --print-out-paths)
|
|
||||||
check_command "Building qemu image"
|
|
||||||
|
|
||||||
# Build the metadata and get the path
|
|
||||||
metadataBuildPath=$(nix build .#nixosConfigurations.fj-lxc-vm-x86_64.config.system.build.metadata --print-out-paths)
|
|
||||||
check_command "Building metadata"
|
|
||||||
|
|
||||||
# Set the paths for the metadata and qemu image
|
|
||||||
MBP="${metadataBuildPath}/tarball/nixos-system-x86_64-linux.tar.xz"
|
|
||||||
QBP="${qemuImageBuildPath}/nixos.qcow2"
|
|
||||||
|
|
||||||
# Import the image to Incus and capture the instance name
|
|
||||||
incus image import --alias nixos-gen/custom/fj-lxc-vm-x86_64 "$MBP" "$QBP"
|
|
||||||
check_command "Importing image to Incus"
|
|
||||||
|
|
||||||
echo "Process completed successfully"
|
|
|
@ -1,12 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
INCUS_PROFILE="forgejo-runner"
|
|
||||||
|
|
||||||
INSTANCE_NAME=$(incus launch nixos-gen/custom/fj-lxc-vm-x86_64 -p ${INCUS_PROFILE:-'default'} | grep -oP 'Instance name is: \K\S+')
|
|
||||||
echo "The captured instance name is: $INSTANCE_NAME"
|
|
||||||
|
|
||||||
# Call the push_token.sh script with the new instance name
|
|
||||||
./push_token.sh "$INSTANCE_NAME"
|
|
||||||
echo "Pushing token to instance"
|
|
||||||
|
|
||||||
echo "Process completed successfully"
|
|
|
@ -1,75 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Prerequisites:
|
|
||||||
# 1password vault created with a single item in it with the property 'runner_token'.
|
|
||||||
# Define the vault and item used.
|
|
||||||
# Usage: ./push_token.sh <incus vm instance name>
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Check if instance name is provided
|
|
||||||
if [ "$#" -ne 1 ]; then
|
|
||||||
echo "Usage: $0 <instance-name>" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
INCUS_INSTANCE="$1" # Use the provided instance name
|
|
||||||
|
|
||||||
# Set variables
|
|
||||||
OP_ITEM_NAME="forgejo-runner" # Name of the 1Password item containing the runner token
|
|
||||||
OP_VAULT_NAME="forgejo-runner" # Name of the 1Password vault
|
|
||||||
TOKEN_FILE="tokenfile" # Name of the temporary file to store the token
|
|
||||||
INCUS_PATH="/var/lib/gitea-runner/default/$TOKEN_FILE"
|
|
||||||
|
|
||||||
# Check if OP_SESSION environment variable exists, if not, sign in
|
|
||||||
if [ -z "${OP_SESSION:-}" ]; then
|
|
||||||
echo "Not logged in to 1Password CLI. Attempting to sign in..."
|
|
||||||
if ! eval $(op signin); then
|
|
||||||
echo "Failed to sign in to 1Password CLI. Please sign in manually using 'op signin'" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Retrieve the token from 1Password
|
|
||||||
TOKEN=$(op item get "$OP_ITEM_NAME" --vault "$OP_VAULT_NAME" --fields runner_token)
|
|
||||||
|
|
||||||
if [ -z "$TOKEN" ]; then
|
|
||||||
echo "Failed to retrieve token from 1Password" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create the token file
|
|
||||||
echo "TOKEN=$TOKEN" > "$TOKEN_FILE"
|
|
||||||
|
|
||||||
# Function to push file and check existence with retries
|
|
||||||
push_and_check_file() {
|
|
||||||
local retries=5
|
|
||||||
local count=0
|
|
||||||
while [ $count -lt $retries ]; do
|
|
||||||
echo "Attempt $((count+1)) of $retries: Pushing file to Incus instance..."
|
|
||||||
if incus file push "$TOKEN_FILE" "$INCUS_INSTANCE$INCUS_PATH"; then
|
|
||||||
if incus exec "$INCUS_INSTANCE" -- test -f "$INCUS_PATH"; then
|
|
||||||
echo "File successfully verified in Incus instance."
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
((count++))
|
|
||||||
echo "File not found or push failed. Retrying in 5 seconds..."
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
echo "Failed to push and verify file after $retries attempts." >&2
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Push the file to Incus and verify its existence
|
|
||||||
if push_and_check_file; then
|
|
||||||
echo "Token file successfully pushed and verified in Incus instance $INCUS_INSTANCE"
|
|
||||||
else
|
|
||||||
echo "Failed to push or verify token file in Incus instance $INCUS_INSTANCE" >&2
|
|
||||||
rm "$TOKEN_FILE"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up the local token file
|
|
||||||
rm "$TOKEN_FILE"
|
|
||||||
|
|
||||||
echo "Operation completed successfully"
|
|
35
README.md
35
README.md
|
@ -1,35 +0,0 @@
|
||||||
# Incus VM Build and Deploy
|
|
||||||
|
|
||||||
## Build
|
|
||||||
```sh
|
|
||||||
nix build .#nixosConfigurations.fj-lxc-vm-x86_64.config.system.build.qemuImage --print-out-paths
|
|
||||||
nix build .#nixosConfigurations.fj-lxc-vm-x86_64.config.system.build.metadata --print-out-paths
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploy
|
|
||||||
```sh
|
|
||||||
incus image import --alias nixos-gen/custom/fj-lxc-vm-x86_64 ${metadatapath}/tarball/nixos-system-x86_64-linux.tar.xz ${qemuimageoutputpath}/nixos.qcow2
|
|
||||||
incus file push "$TOKEN_FILE" "$INCUS_INSTANCE/var/lib/forgejo/$TOKEN_FILE" --mode 400
|
|
||||||
```
|
|
||||||
|
|
||||||
## Runner machine types
|
|
||||||
|
|
||||||
Notice: The runners only run on VMs. No baremetal runners are available.
|
|
||||||
|
|
||||||
Hetzner/x86
|
|
||||||
Hetzner/aarch64
|
|
||||||
lxc-vm/x86
|
|
||||||
|
|
||||||
## Tags used
|
|
||||||
|
|
||||||
### Runner Tags
|
|
||||||
|
|
||||||
| tag | description |
|
|
||||||
| --------------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| docker | docker nodes |
|
|
||||||
| docker-x86_64:docker://node:20-bullseye | specifically the debian bullseye with node 20 docker image |
|
|
||||||
| x86_64 | x86 builders only |
|
|
||||||
| aarch64 | ARM builders only |
|
|
||||||
| linux | Specify if linux |
|
|
||||||
| remote | only use offsite runners |
|
|
||||||
| native-aarch64:host | run on runner host -- not docker |
|
|
|
@ -1,9 +1,8 @@
|
||||||
{ pkgs, lib, ... }:
|
{ pkgs, config, ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
../cachix.nix
|
../cachix.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
# vim -- added by srvos.nixosModules.server
|
# vim -- added by srvos.nixosModules.server
|
||||||
# git -- srvos.nixosModules.server
|
# git -- srvos.nixosModules.server
|
||||||
|
@ -15,6 +14,13 @@
|
||||||
openssl
|
openssl
|
||||||
];
|
];
|
||||||
|
|
||||||
|
sops.secrets."forgejo-runner-token" = {
|
||||||
|
# configure secret for the gitea/forgejo runner.
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
mode = "0444";
|
||||||
|
restartUnits = [ "gitea-runner-default.service" ];
|
||||||
|
};
|
||||||
|
|
||||||
# Required for the gitea-runner to be able to pull images.
|
# Required for the gitea-runner to be able to pull images.
|
||||||
nix.settings.trusted-users = [ "gitea-runner" ];
|
nix.settings.trusted-users = [ "gitea-runner" ];
|
||||||
|
|
||||||
|
@ -23,22 +29,22 @@
|
||||||
|
|
||||||
users = {
|
users = {
|
||||||
gitea-runner = {
|
gitea-runner = {
|
||||||
isSystemUser = true;
|
isNormalUser = true;
|
||||||
extraGroups = [ "docker" ];
|
extraGroups = [ "docker" ];
|
||||||
group = "gitea-runner";
|
group = "gitea-runner";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
jahanson = {
|
||||||
|
isNormalUser = true;
|
||||||
|
extraGroups = [ "wheel" "docker" ];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
virtualisation.docker.enable = true;
|
virtualisation.docker.enable = true;
|
||||||
|
|
||||||
networking = {
|
# Runner communication port for cache restores.
|
||||||
# Runner communication port for cache restores.
|
networking.firewall.allowedTCPPorts = [ 45315 ];
|
||||||
firewall.allowedTCPPorts = [ 45315 ];
|
|
||||||
|
|
||||||
networkmanager.enable = true;
|
|
||||||
useDHCP = lib.mkDefault true;
|
|
||||||
};
|
|
||||||
|
|
||||||
system.stateVersion = "24.05";
|
system.stateVersion = "24.05";
|
||||||
}
|
}
|
|
@ -22,13 +22,6 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.secrets."forgejo-runner-token" = {
|
|
||||||
# configure secret for the gitea/forgejo runner.
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
mode = "0444";
|
|
||||||
restartUnits = [ "gitea-runner-default.service" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
sops.secrets."cachix/agent_auth_tokens/fj-hetzner-aarch64" = {
|
sops.secrets."cachix/agent_auth_tokens/fj-hetzner-aarch64" = {
|
||||||
# configure secret for cachix deploy agent.
|
# configure secret for cachix deploy agent.
|
||||||
sopsFile = ./secrets.sops.yaml;
|
sopsFile = ./secrets.sops.yaml;
|
|
@ -4,7 +4,6 @@
|
||||||
./common.nix
|
./common.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
# Generic x86 VM
|
|
||||||
services.gitea-actions-runner = {
|
services.gitea-actions-runner = {
|
||||||
package = pkgs.forgejo-actions-runner;
|
package = pkgs.forgejo-actions-runner;
|
||||||
instances.default = {
|
instances.default = {
|
||||||
|
@ -14,22 +13,15 @@
|
||||||
# Obtaining the path to the runner token file may differ
|
# Obtaining the path to the runner token file may differ
|
||||||
tokenFile = config.sops.secrets.forgejo-runner-token.path;
|
tokenFile = config.sops.secrets.forgejo-runner-token.path;
|
||||||
labels = [
|
labels = [
|
||||||
"docker" # this is essentially the same as the below tag
|
|
||||||
"docker-x86_64:docker://node:20-bullseye"
|
|
||||||
"x86_64"
|
"x86_64"
|
||||||
"linux"
|
"linux"
|
||||||
"remote"
|
"pc"
|
||||||
|
"docker-x86_64:docker://node:20-bullseye"
|
||||||
|
"native-x86_64:host"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.secrets."forgejo-runner-token" = {
|
|
||||||
# configure secret for the gitea/forgejo runner.
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
mode = "0444";
|
|
||||||
restartUnits = [ "gitea-runner-default.service" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
sops.secrets."cachix/agent_auth_tokens/fj-x86_64" = {
|
sops.secrets."cachix/agent_auth_tokens/fj-x86_64" = {
|
||||||
# configure secret for cachix deploy agent.
|
# configure secret for cachix deploy agent.
|
||||||
sopsFile = ./secrets.sops.yaml;
|
sopsFile = ./secrets.sops.yaml;
|
35
flake.nix
35
flake.nix
|
@ -49,7 +49,6 @@
|
||||||
};
|
};
|
||||||
cachix-deploy-lib = cachix-deploy-flake.lib pkgs;
|
cachix-deploy-lib = cachix-deploy-flake.lib pkgs;
|
||||||
};
|
};
|
||||||
|
|
||||||
aarch64-linux-modules = [
|
aarch64-linux-modules = [
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
srvos.nixosModules.hardware-hetzner-cloud
|
srvos.nixosModules.hardware-hetzner-cloud
|
||||||
|
@ -57,51 +56,37 @@
|
||||||
srvos.nixosModules.mixins-systemd-boot
|
srvos.nixosModules.mixins-systemd-boot
|
||||||
disko.nixosModules.disko
|
disko.nixosModules.disko
|
||||||
lix-module.nixosModules.default
|
lix-module.nixosModules.default
|
||||||
./profiles/role-fj-hetzner.nix
|
./agents/fj-hetzner-aarch64.nix
|
||||||
(import ./disko-hetzner-cloud.nix { disks = [ "/dev/sda" ]; })
|
(import ./disko-hetzner-cloud.nix { disks = [ "/dev/sda" ]; })
|
||||||
{
|
{
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
boot.loader.efi.canTouchEfiVariables = true;
|
||||||
networking.hostName = "fj-hetzner-aarch64-01";
|
networking.hostName = "fj-hetzner-aarch64-01";
|
||||||
users.users.root.openssh.authorizedKeys.keys =
|
users.users.root.openssh.authorizedKeys.keys =
|
||||||
[
|
[
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBsUe5YF5z8vGcEYtQX7AAiw2rJygGf2l7xxr8nZZa7w jahanson@legiondary"
|
||||||
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJyA/yMPPo+scxBaDFUk7WeEyMAMhXUro5vi4feOKsJT jahanson@durincore"
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILcLI5qN69BuoLp8p7nTYKoLdsBNmZB31OerZ63Car1g jahanson@telchar"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILcLI5qN69BuoLp8p7nTYKoLdsBNmZB31OerZ63Car1g jahanson@telchar"
|
||||||
];
|
];
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
services.openssh.settings.PermitRootLogin = "without-password";
|
services.openssh.settings.PermitRootLogin = "without-password";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
x86_64-linux-modules = [
|
x86_64-linux-modules = [
|
||||||
sops-nix.nixosModules.sops
|
sops-nix.nixosModules.sops
|
||||||
./profiles/hw-shadowfax.nix
|
./hardware/shadowfax.nix
|
||||||
srvos.nixosModules.server
|
srvos.nixosModules.server
|
||||||
srvos.nixosModules.mixins-systemd-boot
|
srvos.nixosModules.mixins-systemd-boot
|
||||||
disko.nixosModules.disko
|
disko.nixosModules.disko
|
||||||
lix-module.nixosModules.default
|
lix-module.nixosModules.default
|
||||||
./profiles/fj-shadowfax-x86_64.nix
|
./agents/fj-shadowfax-x86_64.nix
|
||||||
(import ./disko-shadowfax.nix { disks = [ "/dev/sda" ]; })
|
(import ./disko-shadowfax.nix { disks = [ "/dev/sda" ]; })
|
||||||
{
|
{
|
||||||
boot.loader.efi.canTouchEfiVariables = true;
|
boot.loader.efi.canTouchEfiVariables = true;
|
||||||
networking.hostName = "fj-shadowfax-01";
|
networking.hostName = "fj-shadowfax-01";
|
||||||
users.users.root.openssh.authorizedKeys.keys =
|
users.users.root.openssh.authorizedKeys.keys =
|
||||||
[
|
[
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILcLI5qN69BuoLp8p7nTYKoLdsBNmZB31OerZ63Car1g jahanson@telchar"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBsUe5YF5z8vGcEYtQX7AAiw2rJygGf2l7xxr8nZZa7w jahanson@legiondary"
|
||||||
];
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJyA/yMPPo+scxBaDFUk7WeEyMAMhXUro5vi4feOKsJT jahanson@durincore"
|
||||||
services.openssh.enable = true;
|
|
||||||
services.openssh.settings.PermitRootLogin = "without-password";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
x86_64-linux-modules-lxc-vm = [
|
|
||||||
"${inputs.nixpkgs}/nixos/modules/virtualisation/lxd-virtual-machine.nix"
|
|
||||||
sops-nix.nixosModules.sops
|
|
||||||
srvos.nixosModules.server
|
|
||||||
lix-module.nixosModules.default
|
|
||||||
./profiles/role-lxc-vm.nix
|
|
||||||
{
|
|
||||||
# networking.hostName = "fj-x86_64-vm-01";
|
|
||||||
users.users.root.openssh.authorizedKeys.keys =
|
|
||||||
[
|
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILcLI5qN69BuoLp8p7nTYKoLdsBNmZB31OerZ63Car1g jahanson@telchar"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILcLI5qN69BuoLp8p7nTYKoLdsBNmZB31OerZ63Car1g jahanson@telchar"
|
||||||
];
|
];
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
|
@ -124,12 +109,6 @@
|
||||||
specialArgs = { inherit inputs; };
|
specialArgs = { inherit inputs; };
|
||||||
modules = x86_64-linux-modules;
|
modules = x86_64-linux-modules;
|
||||||
};
|
};
|
||||||
|
|
||||||
"fj-lxc-vm-x86_64" = lib.nixosSystem {
|
|
||||||
system = "x86_64-linux";
|
|
||||||
specialArgs = { inherit inputs; };
|
|
||||||
modules = x86_64-linux-modules-lxc-vm;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# Cachix deploy for automated deployments
|
# Cachix deploy for automated deployments
|
||||||
|
|
|
@ -9,5 +9,8 @@
|
||||||
boot.loader.grub.device = "/dev/sda";
|
boot.loader.grub.device = "/dev/sda";
|
||||||
|
|
||||||
fileSystems."/" = lib.mkDefault { device = "/dev/disk/by-partlabel/disk-main-root"; fsType = "ext4"; };
|
fileSystems."/" = lib.mkDefault { device = "/dev/disk/by-partlabel/disk-main-root"; fsType = "ext4"; };
|
||||||
|
|
||||||
|
networking.useNetworkd = true;
|
||||||
|
networking.useDHCP = true;
|
||||||
};
|
};
|
||||||
}
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
{ ... }:
|
{ ... }:
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./host-qemu.nix
|
./.
|
||||||
];
|
];
|
||||||
|
|
||||||
config = {
|
config = {
|
|
@ -1,31 +0,0 @@
|
||||||
{ pkgs, config, ... }:
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./common.nix
|
|
||||||
];
|
|
||||||
|
|
||||||
# Ensure the /var/lib/gitea-runner/default directory is created
|
|
||||||
# and has the correct permissions.
|
|
||||||
systemd.tmpfiles.rules = [
|
|
||||||
"d /var/lib/gitea-runner/default 0750 gitea-runner gitea-runner -"
|
|
||||||
];
|
|
||||||
|
|
||||||
services.gitea-actions-runner = {
|
|
||||||
package = pkgs.forgejo-actions-runner;
|
|
||||||
instances.default = {
|
|
||||||
enable = true;
|
|
||||||
name = config.networking.hostName;
|
|
||||||
url = "https://git.hsn.dev";
|
|
||||||
# The gitea-runner token file is pushed on vm creation with this command:
|
|
||||||
# `incus file push "$TOKEN_FILE" "$INCUS_INSTANCE/var/lib/forgejo/$TOKEN_FILE" --mode 400`
|
|
||||||
tokenFile = "/var/lib/gitea-runner/default/tokenfile";
|
|
||||||
labels = [
|
|
||||||
"x86_64"
|
|
||||||
"linux"
|
|
||||||
"pc"
|
|
||||||
"docker-x86_64:docker://node:20-bullseye"
|
|
||||||
"native-x86_64:host"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
Loading…
Reference in a new issue