restic backup overhaul
This commit is contained in:
parent
3beae7844f
commit
d3613a4ec4
22 changed files with 885 additions and 458 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -3,6 +3,7 @@
|
||||||
**/*sync-conflict*
|
**/*sync-conflict*
|
||||||
age.key
|
age.key
|
||||||
result*
|
result*
|
||||||
|
.decrypted~*
|
||||||
.direnv
|
.direnv
|
||||||
.kube
|
.kube
|
||||||
.github
|
.github
|
||||||
|
|
21
flake.lock
21
flake.lock
|
@ -695,26 +695,6 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nix-index-database": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1734234111,
|
|
||||||
"narHash": "sha256-icEMqBt4HtGH52PU5FHidgBrNJvOfXH6VQKNtnD1aw8=",
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "nix-index-database",
|
|
||||||
"rev": "311d6cf3ad3f56cb051ffab1f480b2909b3f754d",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "nix-index-database",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nix-inspect": {
|
"nix-inspect": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nci": "nci",
|
"nci": "nci",
|
||||||
|
@ -1078,7 +1058,6 @@
|
||||||
"hyprland-plugins": "hyprland-plugins",
|
"hyprland-plugins": "hyprland-plugins",
|
||||||
"krewfile": "krewfile",
|
"krewfile": "krewfile",
|
||||||
"lix-module": "lix-module",
|
"lix-module": "lix-module",
|
||||||
"nix-index-database": "nix-index-database",
|
|
||||||
"nix-inspect": "nix-inspect",
|
"nix-inspect": "nix-inspect",
|
||||||
"nix-minecraft": "nix-minecraft",
|
"nix-minecraft": "nix-minecraft",
|
||||||
"nix-vscode-extensions": "nix-vscode-extensions",
|
"nix-vscode-extensions": "nix-vscode-extensions",
|
||||||
|
|
|
@ -47,13 +47,6 @@
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
};
|
};
|
||||||
|
|
||||||
# nix-index database
|
|
||||||
# https://github.com/nix-community/nix-index-database
|
|
||||||
nix-index-database = {
|
|
||||||
url = "github:nix-community/nix-index-database";
|
|
||||||
inputs.nixpkgs.follows = "nixpkgs";
|
|
||||||
};
|
|
||||||
|
|
||||||
# nix-inspect - inspect nix derivations usingn a TUI interface
|
# nix-inspect - inspect nix derivations usingn a TUI interface
|
||||||
# https://github.com/bluskript/nix-inspect
|
# https://github.com/bluskript/nix-inspect
|
||||||
nix-inspect = {
|
nix-inspect = {
|
||||||
|
|
|
@ -1,5 +1,11 @@
|
||||||
{ config, pkgs, lib, ... }:
|
{
|
||||||
with lib; let
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
inherit (config.myHome) username homeDirectory;
|
inherit (config.myHome) username homeDirectory;
|
||||||
cfg = config.myHome.shell.fish;
|
cfg = config.myHome.shell.fish;
|
||||||
in
|
in
|
||||||
|
@ -30,14 +36,22 @@ in
|
||||||
nrs = "sudo nixos-rebuild switch --flake .";
|
nrs = "sudo nixos-rebuild switch --flake .";
|
||||||
nvdiff = "nvd diff /run/current-system result";
|
nvdiff = "nvd diff /run/current-system result";
|
||||||
# rook & ceph versions.
|
# rook & ceph versions.
|
||||||
rcv =
|
rcv = ''
|
||||||
''
|
kubectl \
|
||||||
kubectl \
|
-n rook-ceph \
|
||||||
-n rook-ceph \
|
get deployments \
|
||||||
get deployments \
|
-l rook_cluster=rook-ceph \
|
||||||
-l rook_cluster=rook-ceph \
|
-o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}'
|
||||||
-o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}'
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
functions = {
|
||||||
|
nix-which = {
|
||||||
|
body = ''
|
||||||
|
set -l cmd $argv[1]
|
||||||
|
nix-locate --whole-name --type x --type s "$cmd"
|
||||||
'';
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
interactiveShellInit = ''
|
interactiveShellInit = ''
|
||||||
|
|
|
@ -153,13 +153,6 @@
|
||||||
# zfs.mountPoolsAtBoot = [ "eru" ];
|
# zfs.mountPoolsAtBoot = [ "eru" ];
|
||||||
# NFS
|
# NFS
|
||||||
nfs.enable = true;
|
nfs.enable = true;
|
||||||
# Restic
|
|
||||||
resticBackup = {
|
|
||||||
local.enable = false;
|
|
||||||
remote.enable = false;
|
|
||||||
local.noWarning = true;
|
|
||||||
remote.noWarning = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
services = {
|
services = {
|
||||||
libvirt-qemu.enable = true;
|
libvirt-qemu.enable = true;
|
||||||
|
|
28
nixos/hosts/shadowfax/config/backups.nix
Normal file
28
nixos/hosts/shadowfax/config/backups.nix
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
{ ... }:
|
||||||
|
{
|
||||||
|
localbackup = {
|
||||||
|
exclude = [
|
||||||
|
"/home/*/.cache"
|
||||||
|
];
|
||||||
|
initialize = true;
|
||||||
|
passwordFile = "/etc/nixos/secrets/restic-password";
|
||||||
|
paths = [
|
||||||
|
"/home"
|
||||||
|
];
|
||||||
|
repository = "/mnt/backup-hdd";
|
||||||
|
};
|
||||||
|
remotebackup = {
|
||||||
|
extraOptions = [
|
||||||
|
"sftp.command='ssh backup@host -i /etc/nixos/secrets/backup-private-key -s sftp'"
|
||||||
|
];
|
||||||
|
passwordFile = "/etc/nixos/secrets/restic-password";
|
||||||
|
paths = [
|
||||||
|
"/home"
|
||||||
|
];
|
||||||
|
repository = "sftp:backup@host:/backups/home";
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = "00:05";
|
||||||
|
RandomizedDelaySec = "5h";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,6 +1,3 @@
|
||||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
|
||||||
# and may be overwritten by future invocations. Please make changes
|
|
||||||
# to /etc/nixos/configuration.nix instead.
|
|
||||||
{
|
{
|
||||||
config,
|
config,
|
||||||
lib,
|
lib,
|
||||||
|
@ -104,16 +101,6 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
xserver.videoDrivers = [ "nvidia" ];
|
|
||||||
|
|
||||||
# Prometheus exporters
|
|
||||||
prometheus.exporters = {
|
|
||||||
# Node Exporter - port 9100
|
|
||||||
node.enable = true;
|
|
||||||
# ZFS Exporter - port 9134
|
|
||||||
zfs.enable = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
# Minio
|
# Minio
|
||||||
minio = {
|
minio = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -126,6 +113,14 @@ in
|
||||||
enable = true;
|
enable = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Prometheus exporters
|
||||||
|
prometheus.exporters = {
|
||||||
|
# Node Exporter - port 9100
|
||||||
|
node.enable = true;
|
||||||
|
# ZFS Exporter - port 9134
|
||||||
|
zfs.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
# Smart daemon for monitoring disk health.
|
# Smart daemon for monitoring disk health.
|
||||||
smartd = {
|
smartd = {
|
||||||
devices = smartdDevices;
|
devices = smartdDevices;
|
||||||
|
@ -141,6 +136,8 @@ in
|
||||||
|
|
||||||
# VSCode Compatibility Settings
|
# VSCode Compatibility Settings
|
||||||
vscode-server.enable = true;
|
vscode-server.enable = true;
|
||||||
|
|
||||||
|
xserver.videoDrivers = [ "nvidia" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
# sops
|
# sops
|
||||||
|
@ -164,24 +161,10 @@ in
|
||||||
mode = "400";
|
mode = "400";
|
||||||
restartUnits = [ "syncthing.service" ];
|
restartUnits = [ "syncthing.service" ];
|
||||||
};
|
};
|
||||||
"restic/plex/resticPassword" = {
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
owner = "jahanson";
|
|
||||||
mode = "400";
|
|
||||||
# restartUnits = [ "restic-plex.service" ];
|
|
||||||
};
|
|
||||||
"restic/plex/resticUri" = {
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
owner = "jahanson";
|
|
||||||
mode = "400";
|
|
||||||
# restartUnits = [ "restic-backup.service" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# System settings and services.
|
# System settings and services.
|
||||||
mySystem = {
|
mySystem = {
|
||||||
purpose = "Production";
|
|
||||||
|
|
||||||
# Containers
|
# Containers
|
||||||
containers = {
|
containers = {
|
||||||
jellyfin.enable = true;
|
jellyfin.enable = true;
|
||||||
|
@ -189,48 +172,17 @@ in
|
||||||
plex.enable = true;
|
plex.enable = true;
|
||||||
scrypted.enable = true;
|
scrypted.enable = true;
|
||||||
};
|
};
|
||||||
|
purpose = "Production";
|
||||||
# System
|
|
||||||
system = {
|
|
||||||
motd.networkInterfaces = [ "enp36s0f0" ];
|
|
||||||
# Incus
|
|
||||||
incus = {
|
|
||||||
enable = true;
|
|
||||||
preseed = import ./config/incus-preseed.nix { };
|
|
||||||
};
|
|
||||||
|
|
||||||
# ZFS
|
|
||||||
zfs.enable = true;
|
|
||||||
zfs.mountPoolsAtBoot = [
|
|
||||||
"nahar"
|
|
||||||
"moria"
|
|
||||||
"eru"
|
|
||||||
];
|
|
||||||
|
|
||||||
# NFS
|
|
||||||
nfs.enable = true;
|
|
||||||
|
|
||||||
resticBackup = {
|
|
||||||
local.enable = false;
|
|
||||||
remote.enable = false;
|
|
||||||
local.noWarning = true;
|
|
||||||
remote.noWarning = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Services
|
# Services
|
||||||
services = {
|
services = {
|
||||||
podman.enable = true;
|
# Misc
|
||||||
libvirt-qemu.enable = true;
|
libvirt-qemu.enable = true;
|
||||||
|
podman.enable = true;
|
||||||
# Syncthing
|
# Sanoid
|
||||||
syncthing = {
|
sanoid = {
|
||||||
enable = true;
|
enable = true;
|
||||||
user = "jahanson";
|
inherit (sanoidConfig.outputs) templates datasets;
|
||||||
publicCertPath = config.sops.secrets."syncthing/publicCert".path;
|
|
||||||
privateKeyPath = config.sops.secrets."syncthing/privateKey".path;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# Scrutiny
|
# Scrutiny
|
||||||
scrutiny = {
|
scrutiny = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -239,12 +191,36 @@ in
|
||||||
containerVolumeLocation = "/nahar/containers/volumes/scrutiny";
|
containerVolumeLocation = "/nahar/containers/volumes/scrutiny";
|
||||||
port = 8585;
|
port = 8585;
|
||||||
};
|
};
|
||||||
|
# Syncthing
|
||||||
# Sanoid
|
syncthing = {
|
||||||
sanoid = {
|
enable = false;
|
||||||
enable = true;
|
user = "jahanson";
|
||||||
inherit (sanoidConfig.outputs) templates datasets;
|
publicCertPath = config.sops.secrets."syncthing/publicCert".path;
|
||||||
|
privateKeyPath = config.sops.secrets."syncthing/privateKey".path;
|
||||||
};
|
};
|
||||||
|
# ZFS nightly snapshot of container volumes
|
||||||
|
zfs-nightly-snap = {
|
||||||
|
enable = true;
|
||||||
|
mountPath = "/mnt/restic_nightly_backup";
|
||||||
|
zfsDataset = "nahar/containers/volumes";
|
||||||
|
snapshotName = "restic_nightly_snap";
|
||||||
|
startAt = "*-*-* 02:00:00 America/Chicago";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# System
|
||||||
|
system = {
|
||||||
|
incus = {
|
||||||
|
enable = true;
|
||||||
|
preseed = import ./config/incus-preseed.nix { };
|
||||||
|
};
|
||||||
|
motd.networkInterfaces = [ "enp36s0f0" ];
|
||||||
|
nfs.enable = true;
|
||||||
|
zfs.enable = true;
|
||||||
|
zfs.mountPoolsAtBoot = [
|
||||||
|
"eru"
|
||||||
|
"moria"
|
||||||
|
"nahar"
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,12 +70,6 @@
|
||||||
purpose = "Production";
|
purpose = "Production";
|
||||||
system = {
|
system = {
|
||||||
motd.networkInterfaces = [ "enp2s0" "wlp3s0" ];
|
motd.networkInterfaces = [ "enp2s0" "wlp3s0" ];
|
||||||
resticBackup = {
|
|
||||||
local.enable = false;
|
|
||||||
remote.enable = false;
|
|
||||||
local.noWarning = true;
|
|
||||||
remote.noWarning = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
|
|
|
@ -7,19 +7,18 @@
|
||||||
with lib;
|
with lib;
|
||||||
let
|
let
|
||||||
app = "jellyfin";
|
app = "jellyfin";
|
||||||
|
cfg = config.mySystem.containers.${app};
|
||||||
|
group = "kah";
|
||||||
|
image = "ghcr.io/jellyfin/jellyfin:${version}";
|
||||||
|
user = "kah";
|
||||||
# renovate: depName=ghcr.io/jellyfin/jellyfin datasource=docker
|
# renovate: depName=ghcr.io/jellyfin/jellyfin datasource=docker
|
||||||
version = "10.10.3";
|
version = "10.10.3";
|
||||||
image = "ghcr.io/jellyfin/jellyfin:${version}";
|
volumeLocation = "/nahar/containers/volumes/jellyfin";
|
||||||
cfg = config.mySystem.containers.${app};
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
# Options
|
# Options
|
||||||
options.mySystem.containers.${app} = {
|
options.mySystem.containers.${app} = {
|
||||||
enable = mkEnableOption "${app}";
|
enable = mkEnableOption "${app}";
|
||||||
# TODO add to homepage
|
|
||||||
# addToHomepage = mkEnableOption "Add ${app} to homepage" // {
|
|
||||||
# default = true;
|
|
||||||
# };
|
|
||||||
openFirewall = mkEnableOption "Open firewall for ${app}" // {
|
openFirewall = mkEnableOption "Open firewall for ${app}" // {
|
||||||
default = true;
|
default = true;
|
||||||
};
|
};
|
||||||
|
@ -46,13 +45,13 @@ in
|
||||||
${pkgs.podman}/bin/podman run \
|
${pkgs.podman}/bin/podman run \
|
||||||
--rm \
|
--rm \
|
||||||
--name=${app} \
|
--name=${app} \
|
||||||
--user=568:568 \
|
--user="${toString config.users.users."${user}".uid}:${toString config.users.groups."${group}".gid}" \
|
||||||
--device='nvidia.com/gpu=all' \
|
--device='nvidia.com/gpu=all' \
|
||||||
--log-driver=journald \
|
--log-driver=journald \
|
||||||
--cidfile=/run/${app}.ctr-id \
|
--cidfile=/run/${app}.ctr-id \
|
||||||
--cgroups=no-conmon \
|
--cgroups=no-conmon \
|
||||||
--sdnotify=conmon \
|
--sdnotify=conmon \
|
||||||
--volume="/nahar/containers/volumes/jellyfin:/config:rw" \
|
--volume="${volumeLocation}:/config:rw" \
|
||||||
--volume="/moria/media:/media:rw" \
|
--volume="/moria/media:/media:rw" \
|
||||||
--volume="tmpfs:/cache:rw" \
|
--volume="tmpfs:/cache:rw" \
|
||||||
--volume="tmpfs:/transcode:rw" \
|
--volume="tmpfs:/transcode:rw" \
|
||||||
|
@ -78,15 +77,46 @@ in
|
||||||
# Firewall
|
# Firewall
|
||||||
networking.firewall = mkIf cfg.openFirewall {
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
allowedTCPPorts = [
|
allowedTCPPorts = [
|
||||||
8096 # HTTP web interface
|
8096 # HTTP web interface
|
||||||
8920 # HTTPS web interface
|
8920 # HTTPS web interface
|
||||||
];
|
];
|
||||||
allowedUDPPorts = [
|
allowedUDPPorts = [
|
||||||
1900 # DLNA discovery
|
1900 # DLNA discovery
|
||||||
7359 # Jellyfin auto-discovery
|
7359 # Jellyfin auto-discovery
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
sops.secrets = {
|
||||||
|
"restic/jellyfin/env" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
"restic/jellyfin/password" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
"restic/jellyfin/template" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Restic backups for `jellyfin-local` and `jellyfin-remote`
|
||||||
|
services.restic.backups = config.lib.mySystem.mkRestic {
|
||||||
|
inherit app user;
|
||||||
|
environmentFile = config.sops.secrets."restic/jellyfin/env".path;
|
||||||
|
excludePaths = [ ];
|
||||||
|
localResticTemplate = "/eru/restic/jellyfin";
|
||||||
|
passwordFile = config.sops.secrets."restic/jellyfin/password".path;
|
||||||
|
paths = [ volumeLocation ];
|
||||||
|
remoteResticTemplateFile = config.sops.secrets."restic/jellyfin/template".path;
|
||||||
|
};
|
||||||
# TODO add nginx proxy
|
# TODO add nginx proxy
|
||||||
# services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
|
# services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
|
||||||
# useACMEHost = config.networking.domain;
|
# useACMEHost = config.networking.domain;
|
||||||
|
@ -131,14 +161,5 @@ in
|
||||||
# ];
|
# ];
|
||||||
# }
|
# }
|
||||||
# ];
|
# ];
|
||||||
|
|
||||||
# TODO add restic backup
|
|
||||||
# services.restic.backups = config.lib.mySystem.mkRestic {
|
|
||||||
# inherit app user;
|
|
||||||
# excludePaths = [ "Backups" ];
|
|
||||||
# paths = [ appFolder ];
|
|
||||||
# inherit appFolder;
|
|
||||||
# };
|
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
88
nixos/modules/nixos/containers/jellyfin/secrets.sops.yaml
Normal file
88
nixos/modules/nixos/containers/jellyfin/secrets.sops.yaml
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
restic:
|
||||||
|
jellyfin:
|
||||||
|
env: ENC[AES256_GCM,data:SlhZjwhe1xYlks1TCvM=,iv:RCGjs9JYKSOlK9J+4m20FS/nca+v9+a87aolAOMEpOI=,tag:4TbyZ7q4x/zw+MGwd8Y2oQ==,type:str]
|
||||||
|
password: ENC[AES256_GCM,data:kacukf7Js/9RLFGegR8wTm11md0nVpNErrfFLXRVLGI2HA==,iv:7IPiDAlc8c3uJImn/+l4nqnD24Nij44nhFkbaqO7SHQ=,tag:1dIYZaiYX9Y5RmuvAypy3w==,type:str]
|
||||||
|
template: ENC[AES256_GCM,data:rqxj00XCHNDoTb6rbfz+dyT4YwpsL9I2mAxZ8Mn3ngadeIHLOtLVzBvtoCMBli5Bhq3ZYYzjxRqtLQSRUz3AyeWr,iv:zKoSXoi2gkTFfAHZfhVf7uN5QTDaxHlEW3gSRbeBsQ8=,tag:eyNQBoxtOp39nQrH6dFstA==,type:str]
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1d9p83j52m2xg0vh9k7q0uwlxwhs3y6tlv68yg9s2h9mdw2fmmsqshddz5m
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxaDJpYXRySUgzbGgrRVJ2
|
||||||
|
bEtQS2N4VGNVTEFXYTNqWSt5ME1mYUM0Y1MwCnljajF5d0swS2w1ZjVXMVVKKzNJ
|
||||||
|
UEhhbzlMVzlRbzVQMGhQKzJuZ0lqUEUKLS0tIFFMcUtTT2hEQ1lwUXNyaUF4MEVS
|
||||||
|
TG8vV05iYWxkNmNFZVhDaTh3TmZRRzQKS6Pwx8S211SzAGYoandbGG9qrf8sFBaR
|
||||||
|
JO/5KwkD5y5ZqFGdPTpJsZfYeIXkKLbrvgnbPUr92H9qANgPntVE7w==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1m83ups8xn2jy4ayr8gw0pyn34smr0huqc5v76e4887az4vsl4yzsj0dlhd
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXTHB3OUpBdmwvenc2Sjgw
|
||||||
|
eUFwNVVQT0J5bUVVYnRPYS9qRlRjdjUwZ253CkJDd2RDeHBydGIyUndxNVdFUTdP
|
||||||
|
K29seHQ5OGJOWDlVSnkzQTZvTXcwNHMKLS0tIGF2UG1XQUovc3Q2QlJqWHJ0MWdM
|
||||||
|
cXlEVi8yZWxuQVlHUm1TUWFVL0NGVEEK3fqKRT/ZHfhlZxQ6QJP1zf1KogFo3krz
|
||||||
|
xRXr2WfUPZ2E+R0VoatHVdmlnDPscqL2Okjd4q06Ow8qivDhXj8wWQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age18kj3xhlvgjeg2awwku3r8d95w360uysu0w5ejghnp4kh8qmtge5qwa2vjp
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxVlh3UkR3dzRXdWJEckY5
|
||||||
|
UmdjRUtyQ3ZNbzBYeEwxZ3BGenczMVhyQVZzCmw5QTVwenRTN2xVcU5Sb2w4Y3V2
|
||||||
|
d2FyTXpUZW5wSE9GMjlIOUV1eEQrY3cKLS0tIGdHaXBzd3M3TFFsUU8yTGVWaWdZ
|
||||||
|
Mmt2bUxpb3RCb0o5cEU2WUt2OURkc3cKmb95HbzdeaFXaoga/xGpmcvant2xMIuu
|
||||||
|
oCahUeaavMcpJ2/xujw89kNkKoszBAin72Y6pSWaHFiWPVVuwtRp5g==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1lp6rrlvmytp9ka6q89m0e0am26222kwrn7aqd45hu07s3a6jv3gqty86eu
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4QnF1K1dsdWpGODBtejZy
|
||||||
|
enQ5TElWRzJZa3pURXVEZ2VzaVJ2RWtnNXg4CkRyU1g4L01IeU91cXRQY0hoOFJh
|
||||||
|
NTdZL3dVeUg1aml1ZzFRSUtHVFlmZ0UKLS0tIHhSZGV3akdxOE5IcThwM0tXOVlQ
|
||||||
|
SUtaZHhTcE05dWxjQTVBRFBTdTNwelkKSKEfNR1PE/qvHPdEyCBp0bl2EUJDGdlk
|
||||||
|
0t9AoUMBI3W4WrGQjlPz3H1JkwmniEvB6hsC4KA2l6Kg0lLGY2OBWA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1e4sd6jjd4uxxsh9xmhdsnu6mqd5h8c4zz4gwme7lkw9ee949fc9q4px9df
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByL2lZa09oUXlrK3JqWVMr
|
||||||
|
WWhCbGpQNVdrUDBjNFVxTktrQjFFMSs1c0ZVCk1pT3Z3MXFMVTdCNzdZQWU3UEg4
|
||||||
|
YWlsYkpZRnhYYVdUSzhjYjBnb0pzM00KLS0tIDdwRUV4S2IrdUJvZ2VqWnlkZmlN
|
||||||
|
RTU3RTRXSGZWQzJLZ1B2R1BvdEczeUUK1YqO0cOA9S9F69s8civG7B5fBBa0mIHt
|
||||||
|
W8jNV2d2ivDMNZKZztZ4CdfuvTybHdPIyGOQQ3KFi1RD2hp7OXow4g==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age19jm7uuam7gkacm3kh2v7uqgkvmmx0slmm9zwdjhd2ln9r60xzd7qh78c5a
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzSUFTUStJazNsK015cTZT
|
||||||
|
NjgxRU0rNzBzK3NKNU5MaldQanpTSUlKbVVNCm1IcHZ6TEI0aEZXSW1hVC9MVUJs
|
||||||
|
b1RIRkpBZkMydGt1dDlzOGpwTytybGMKLS0tIGxRSld3ZjdtRTFadEFRNzVTQ0Zt
|
||||||
|
VndNOFJMUTdQbEtrSHJENkQ3RmxOQ0EKL19WyFWH+1jqdchRoZHVsn6OCI0v8KxS
|
||||||
|
lLNO2iFEISmzv18w9awANroZNmqod+YQgWB/DC/lob8HSUIG4uUatw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1nwnqxjuaxlt5g7fe8rnspvn2c36uuef4hzwuwa6cfjfalz2lrd4q4n5fpl
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxRnlTVWVCNTB6ZmJ4Q1hN
|
||||||
|
Um4xb3ZxaU13TkhDbUF6SzVlUW45WWNBZFdRCnV1K1pGWm90OGs5ckdDTzBHTm83
|
||||||
|
WmplOGwxNFhwRkN6MVNTQTVTWnAyVGcKLS0tIHlwcll6cGZhbGxXM0hONk8wZ1lE
|
||||||
|
aXdzYWtTMEJJU056RDJYbGF3Y05WTG8KBXCmARd1lq1/vwHciwUVlhyeoDmjeDl7
|
||||||
|
Met1WpME/9R+P39I4fWNY9z6F60xeQsIVUPuQyr/K7T9xwayYwtZcQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1a8z3p24v32l9yxm5z2l8h7rpc3nhacyfv4jvetk2lenrvsdstd3sdu2kaf
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqbDAzNTExZUsyMUtCMDhy
|
||||||
|
OU1rUWpGejNvVEJLalRTN284c041aThhYXdvCmFaODU3WHcvQkdEanI3OU9EQzJG
|
||||||
|
Nk1JNjhoSFVqKysybVltbGRuazArcmsKLS0tIDd4RThtU1M5b3AzNXY2RjYxTjkz
|
||||||
|
Q1p4dk96SXpIbnR3d25jNnk4YW5qRmsKUjMeb/+q4blAtiT58AHletkNt8xrvH7M
|
||||||
|
FGAuRRuwIxBKrbCl4fAzM/CuEslvyr/Jrf4ulazI6l+hSwJNwKdimA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2024-12-27T04:08:26Z"
|
||||||
|
mac: ENC[AES256_GCM,data:sJ0EB7SGKAJLa4/H7yRt4L4jOpZKz0uKDJqnLnjTel+DL5m9lnn55vQILEV2ynOBf+xqYE60ZBxSe3MlPjYBXig/Zr6oJV7g1pwpvpS4rmyGUzHr9xGuVXOWuWzHIvNePkWozPATWvW/hBkSMlBVp54lTjY/KI/UnOuvwCs/uIk=,iv:CkHPpk4FiLfYu6PdVBoYfn8IG2tvRYnB2Noq18JEfl8=,tag:ctKf2pmp+JaZpahehG1JqA==,type:str]
|
||||||
|
pgp: []
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.9.2
|
|
@ -7,19 +7,18 @@
|
||||||
with lib;
|
with lib;
|
||||||
let
|
let
|
||||||
app = "plex";
|
app = "plex";
|
||||||
|
cfg = config.mySystem.containers.${app};
|
||||||
|
group = "kah";
|
||||||
|
image = "ghcr.io/onedr0p/plex:${version}";
|
||||||
|
user = "kah";
|
||||||
# renovate: depName=ghcr.io/onedr0p/plex datasource=docker versioning=loose
|
# renovate: depName=ghcr.io/onedr0p/plex datasource=docker versioning=loose
|
||||||
version = "1.41.3.9314-a0bfb8370";
|
version = "1.41.3.9314-a0bfb8370";
|
||||||
image = "ghcr.io/onedr0p/plex:${version}";
|
volumeLocation = "/nahar/containers/volumes/plex";
|
||||||
cfg = config.mySystem.containers.${app};
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
# Options
|
# Options
|
||||||
options.mySystem.containers.${app} = {
|
options.mySystem.containers.${app} = {
|
||||||
enable = mkEnableOption "${app}";
|
enable = mkEnableOption "${app}";
|
||||||
# TODO add to homepage
|
|
||||||
# addToHomepage = mkEnableOption "Add ${app} to homepage" // {
|
|
||||||
# default = true;
|
|
||||||
# };
|
|
||||||
openFirewall = mkEnableOption "Open firewall for ${app}" // {
|
openFirewall = mkEnableOption "Open firewall for ${app}" // {
|
||||||
default = true;
|
default = true;
|
||||||
};
|
};
|
||||||
|
@ -34,7 +33,7 @@ in
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStartPre = "${pkgs.writeShellScript "scrypted-start-pre" ''
|
ExecStartPre = "${pkgs.writeShellScript "plex-start-pre" ''
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
@ -42,6 +41,7 @@ in
|
||||||
${pkgs.podman}/bin/podman rm -f ${app} || true
|
${pkgs.podman}/bin/podman rm -f ${app} || true
|
||||||
rm -f /run/${app}.ctr-id
|
rm -f /run/${app}.ctr-id
|
||||||
''}";
|
''}";
|
||||||
|
# TODO: mount /config instead of /config/Library/Application Support/Plex Media Server
|
||||||
ExecStart = ''
|
ExecStart = ''
|
||||||
${pkgs.podman}/bin/podman run \
|
${pkgs.podman}/bin/podman run \
|
||||||
--rm \
|
--rm \
|
||||||
|
@ -51,8 +51,8 @@ in
|
||||||
--cidfile=/run/${app}.ctr-id \
|
--cidfile=/run/${app}.ctr-id \
|
||||||
--cgroups=no-conmon \
|
--cgroups=no-conmon \
|
||||||
--sdnotify=conmon \
|
--sdnotify=conmon \
|
||||||
--user=568:568 \
|
--user="${toString config.users.users."${user}".uid}:${toString config.users.groups."${group}".gid}" \
|
||||||
--volume="/nahar/containers/volumes/plex:/config/Library/Application Support/Plex Media Server:rw" \
|
--volume="${volumeLocation}:/config:rw" \
|
||||||
--volume="/moria/media:/media:rw" \
|
--volume="/moria/media:/media:rw" \
|
||||||
--volume="tmpfs:/config/Library/Application Support/Plex Media Server/Logs:rw" \
|
--volume="tmpfs:/config/Library/Application Support/Plex Media Server/Logs:rw" \
|
||||||
--volume="tmpfs:/tmp:rw" \
|
--volume="tmpfs:/tmp:rw" \
|
||||||
|
@ -78,6 +78,38 @@ in
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
sops.secrets ={
|
||||||
|
"restic/plex/env" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
"restic/plex/password" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
"restic/plex/template" = {
|
||||||
|
sopsFile = ./secrets.sops.yaml;
|
||||||
|
owner = user;
|
||||||
|
group = group;
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Restic backups for `plex-local` and `plex-remote`
|
||||||
|
services.restic.backups = config.lib.mySystem.mkRestic {
|
||||||
|
inherit app user;
|
||||||
|
environmentFile = config.sops.secrets."restic/plex/env".path;
|
||||||
|
excludePaths = [ "${volumeLocation}/Library/Application Support/Plex Media Server/Cache" ];
|
||||||
|
localResticTemplate = "/eru/restic/plex";
|
||||||
|
passwordFile = config.sops.secrets."restic/plex/password".path;
|
||||||
|
paths = [ "${volumeLocation}/Library" ];
|
||||||
|
remoteResticTemplateFile = config.sops.secrets."restic/plex/template".path;
|
||||||
|
};
|
||||||
|
|
||||||
# TODO add nginx proxy
|
# TODO add nginx proxy
|
||||||
# services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
|
# services.nginx.virtualHosts."${app}.${config.networking.domain}" = {
|
||||||
# useACMEHost = config.networking.domain;
|
# useACMEHost = config.networking.domain;
|
||||||
|
@ -123,13 +155,6 @@ in
|
||||||
# }
|
# }
|
||||||
# ];
|
# ];
|
||||||
|
|
||||||
# TODO add restic backup
|
|
||||||
# services.restic.backups = config.lib.mySystem.mkRestic {
|
|
||||||
# inherit app user;
|
|
||||||
# excludePaths = [ "Backups" ];
|
|
||||||
# paths = [ appFolder ];
|
|
||||||
# inherit appFolder;
|
|
||||||
# };
|
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
88
nixos/modules/nixos/containers/plex/secrets.sops.yaml
Normal file
88
nixos/modules/nixos/containers/plex/secrets.sops.yaml
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
restic:
|
||||||
|
plex:
|
||||||
|
env: ENC[AES256_GCM,data:Kuo21H4HZ4YVAsmj/Lw=,iv:9QE3ghWliEFahgTXKxPE38lA/UW4XL/0QAVHxB/VYJM=,tag:TmLgSwzxI2PfdijRpqskvA==,type:str]
|
||||||
|
password: ENC[AES256_GCM,data:aEBi5RRFR+PDXFseVHCcDbjRkXkMQLHTa7fygi6e971UNA==,iv:Iwg9IXp0NHqJP7BAPFqG0bLWWKdwC2wwiOJP7cz4E/M=,tag:KpKa8z/FlOgfygEue2xAtQ==,type:str]
|
||||||
|
template: ENC[AES256_GCM,data:usADYx+OKwW+RMwZkyaDq8OpWR5pyYH9bOaIGaN06/aqZlPA5EwoW5C6BTTj0z+s1y8Xz/WhTHK/sljyV1ZLtC9D,iv:tdSrw/wnM9gC8Q5zo5PsGeWrTVtuyKQRU3Dyd2XWYpQ=,tag:mds0QglR2B+RXC/tpkTO4Q==,type:str]
|
||||||
|
sops:
|
||||||
|
kms: []
|
||||||
|
gcp_kms: []
|
||||||
|
azure_kv: []
|
||||||
|
hc_vault: []
|
||||||
|
age:
|
||||||
|
- recipient: age1d9p83j52m2xg0vh9k7q0uwlxwhs3y6tlv68yg9s2h9mdw2fmmsqshddz5m
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBDS0F4REt5RklvVE5mU0Ra
|
||||||
|
RkxvWHdYUHdMN3JxdjVBRTZkOWtCSTN5dHlnCmp5V2djZXEvbFlnZEhNaU5qcUtR
|
||||||
|
U2RyQW5nSmNVT3FyNjhyU3E3MjRHcncKLS0tIHBZMmNlZ0llN0t6N09TeUY1ZWZm
|
||||||
|
dzJ4STBQcVZyMFEyYUY0clBIQ0tCcWsKNZ2c9tVKrEbcaVn1Tk/7Fkc3ZMWDST5q
|
||||||
|
LRiO+63fLkXj6LBOH8WOKDMKnYym+Ii1PiCV6QNizXE5H534xHpDQQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1m83ups8xn2jy4ayr8gw0pyn34smr0huqc5v76e4887az4vsl4yzsj0dlhd
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHV3pRM0RES201cUVKcCtK
|
||||||
|
eEFBaUhsWlJnTTFvRFdxMFpFeUp6WWh6OFNBCmlNR3EyWXZxY09IenVVL3ViNmIx
|
||||||
|
RC8wWjNlN2JlVTBDaE85aEw2ME1MaWcKLS0tIFlaNDV0bFBXNERhdmN4a1hLem1s
|
||||||
|
UVNVTDhSellJTkRPZE9hN0wvanF3N2sKJ4Qp30gHicpGhowLCP+T4EdVy+wV1Pxk
|
||||||
|
MF/yh9Og7mW1FvydCRz5GZZPWMGmEeC5c/SiqUD9xorXzzthYkUtTA==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age18kj3xhlvgjeg2awwku3r8d95w360uysu0w5ejghnp4kh8qmtge5qwa2vjp
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwdTV0enozam9NK1FpZUNT
|
||||||
|
aUkzUnhHM0lqN1Q0Ymp4VzhOQjdXQmdLSkg4CkpvTmhmUlRMSmxpN0ZRWUFrQng1
|
||||||
|
eXVrMzFjRUNiN2VDcTVBRW9nTVBlQzQKLS0tIEJtNk91SnBkcGNBbXRLSjhjMEx4
|
||||||
|
NUZMYTd2MFdYT2YxYTdBMXpRM3ZFUUEKn2rRJePBAZeslthoSZ9+suhqcKZIHR0T
|
||||||
|
z4PTQG6ZY1OVotIbK52JF34yi1FVH020UhFJolD3HZ7W+z1D88Mtqg==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1lp6rrlvmytp9ka6q89m0e0am26222kwrn7aqd45hu07s3a6jv3gqty86eu
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBGY3crMXhpdFdZZjZLT1Fh
|
||||||
|
QTV0RDVVQzJIbUtwUGwzWHo5QVYwK2JXMUM4ClZlMHJtSkR5dnZoanVxUW4yNlgy
|
||||||
|
ZGRpbUs1TnZQb1NnS2VpaDgyL0NOZW8KLS0tIGFqeGo5dHYyaEl1ZkZxYWpKZGlY
|
||||||
|
TmhxT1F4enhFelJ0bndrT2dqbG95VWsKzyQCNjbGXO98fjmmtrYhv7An4s+BLKpq
|
||||||
|
TtiRp1PFtjb1EF6QwBhNWFuYE9QK08T3m0Dkr32A6PvuiAIrDfaCsQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1e4sd6jjd4uxxsh9xmhdsnu6mqd5h8c4zz4gwme7lkw9ee949fc9q4px9df
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMNmpEbEVHTllRT0tEM2xo
|
||||||
|
Ulc4bDFVVGNNTTdwTUhvOXk4aGcvcjZpLzJNCmphZFl5NkdxZVIwWnFHWDdSa241
|
||||||
|
dHAvYlVFb0lVNDFQSEdYWStWRjQvNHcKLS0tIFlYVHlWaGszL1pxYStmTWtFcXRW
|
||||||
|
aGErRDZFeDNpMXFneU1jYTVLS2lsWVkK4Zggc2aIxTX+PnfDyPLXsifxnsRwrZ84
|
||||||
|
v9G4lY/ReLaO6xHG/824W2ZIuDW5zsBtdbui8UFSwVsf1XxkOUzSDw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age19jm7uuam7gkacm3kh2v7uqgkvmmx0slmm9zwdjhd2ln9r60xzd7qh78c5a
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXQTNCT1dDbTNMM0Jla1Fv
|
||||||
|
WHZ1RStHQTJ2aWd3U21BWml0VWFnT2lVOEhVClFuZDJrUUgyU05Pb1UrV3pXOVhw
|
||||||
|
eUZCTGdvbUNZdGFoVW84VVpMSGNrSGsKLS0tIDgwOTlmMGxPN0xHb215dDRXOW4x
|
||||||
|
S2dmd29FaU5PaHVzb1cvMDlQeTJCY1UK5iLYEfOJM8m6Tml8XhTz6O0NMb1a4y3Z
|
||||||
|
auQnSKjRskPfSRbBf0stv1oAuTENHEbqJiQCOk2aWG+gO5ih36/Ffw==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1nwnqxjuaxlt5g7fe8rnspvn2c36uuef4hzwuwa6cfjfalz2lrd4q4n5fpl
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvZERzWVNJLzFvaHRxL1hN
|
||||||
|
Y2JKcmQzbWYveDlQd3VtL3U4ZE8vclZoeXk4ClNaMERjcnAyREZSNHg5YkgxL3lm
|
||||||
|
MHlwR3JWVkEwZ1NlbzFtbDI4Wmh4cmsKLS0tIDlzUG8yd3daS0FpRm5KcWI5NEFq
|
||||||
|
cjh6bWloSktCTEc4bStQb09UM0FvWncKa/mAWmXffFBGIfQtmQxAtZE/dwzPDdpN
|
||||||
|
a1/eE3nx/9r4M2NhI7mAJbN2e1V7YgjW0xL0kKSSMLutYt6vN4bnEQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
- recipient: age1a8z3p24v32l9yxm5z2l8h7rpc3nhacyfv4jvetk2lenrvsdstd3sdu2kaf
|
||||||
|
enc: |
|
||||||
|
-----BEGIN AGE ENCRYPTED FILE-----
|
||||||
|
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAyQnQram51YWJMb1VFcGNo
|
||||||
|
L3JJdkRyRzF3U3R6N2FWTlFJdThNb3JBVEZnCmRHZ2o3NFBhc0xOK1VYN2FaTHJV
|
||||||
|
bWNVc0MvQlFTenk3MEJyYzJQM2JCeWMKLS0tIG40K2NUeHhENlJMRmFTSmN0amlW
|
||||||
|
STBERjI3ZFNydXA1Vzcvb1BRZnlRZDQK6p/uU7z9Q0zr3uZLHEgNcK2IR144MFu3
|
||||||
|
BurEmLIWzSfhLJTGkop6ODDKpKORapldMJTigGt2+QZ3jwQwBai9TQ==
|
||||||
|
-----END AGE ENCRYPTED FILE-----
|
||||||
|
lastmodified: "2024-12-27T04:10:37Z"
|
||||||
|
mac: ENC[AES256_GCM,data:xbJ6G5LQApSWWQfWdcN5DNsaaNdniprNgODtfXHoAGvmOf9r4tYXtI3LPwxXSpyvLIRDv1orasYOxH3m0h5+PIkqegasOuRHcXtRll8e05qD2p/RNPPSAiAl08EvRoJAuu0wYP/GR90/UfYMk6UeRKNFt5YFNdn5CZuyAV/drkc=,iv:zijqky8rBhmGwOJ3gkJDx8UVIFxEtxCPQaq4+2lgwZs=,tag:d1abcbyMZkSV4uolMS7eaA==,type:str]
|
||||||
|
pgp: []
|
||||||
|
unencrypted_suffix: _unencrypted
|
||||||
|
version: 3.9.2
|
|
@ -1,49 +1,112 @@
|
||||||
{ lib, config, pkgs, ... }:
|
{
|
||||||
|
lib,
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
{
|
{
|
||||||
|
|
||||||
# container builder
|
# container builder
|
||||||
lib.mySystem.mkContainer = options: (
|
lib.mySystem.mkContainer =
|
||||||
let
|
options:
|
||||||
containerExtraOptions = lib.optionals (lib.attrsets.attrByPath [ "caps" "privileged" ] false options) [ "--privileged" ]
|
(
|
||||||
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "readOnly" ] false options) [ "--read-only" ]
|
let
|
||||||
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "tmpfs" ] false options) (map (folders: "--tmpfs=${folders}") options.caps.tmpfsFolders)
|
containerExtraOptions =
|
||||||
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "noNewPrivileges" ] false options) [ "--security-opt=no-new-privileges" ]
|
lib.optionals (lib.attrsets.attrByPath [ "caps" "privileged" ] false options) [ "--privileged" ]
|
||||||
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "dropAll" ] false options) [ "--cap-drop=ALL" ];
|
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "readOnly" ] false options) [ "--read-only" ]
|
||||||
in
|
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "tmpfs" ] false options) (
|
||||||
{
|
map (folders: "--tmpfs=${folders}") options.caps.tmpfsFolders
|
||||||
${options.app} = {
|
)
|
||||||
image = "${options.image}";
|
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "noNewPrivileges" ] false options) [
|
||||||
user = "${options.user}:${options.group}";
|
"--security-opt=no-new-privileges"
|
||||||
environment = {
|
]
|
||||||
TZ = config.time.timeZone;
|
++ lib.optionals (lib.attrsets.attrByPath [ "caps" "dropAll" ] false options) [ "--cap-drop=ALL" ];
|
||||||
} // lib.attrsets.attrByPath [ "env" ] { } options;
|
in
|
||||||
dependsOn = lib.attrsets.attrByPath [ "dependsOn" ] [ ] options;
|
{
|
||||||
entrypoint = lib.attrsets.attrByPath [ "entrypoint" ] null options;
|
${options.app} = {
|
||||||
cmd = lib.attrsets.attrByPath [ "cmd" ] [ ] options;
|
image = "${options.image}";
|
||||||
environmentFiles = lib.attrsets.attrByPath [ "envFiles" ] [ ] options;
|
user = "${options.user}:${options.group}";
|
||||||
volumes = [ "/etc/localtime:/etc/localtime:ro" ]
|
environment = {
|
||||||
++ lib.attrsets.attrByPath [ "volumes" ] [ ] options;
|
TZ = config.time.timeZone;
|
||||||
ports = lib.attrsets.attrByPath [ "ports" ] [ ] options;
|
} // lib.attrsets.attrByPath [ "env" ] { } options;
|
||||||
extraOptions = containerExtraOptions;
|
dependsOn = lib.attrsets.attrByPath [ "dependsOn" ] [ ] options;
|
||||||
};
|
entrypoint = lib.attrsets.attrByPath [ "entrypoint" ] null options;
|
||||||
}
|
cmd = lib.attrsets.attrByPath [ "cmd" ] [ ] options;
|
||||||
);
|
environmentFiles = lib.attrsets.attrByPath [ "envFiles" ] [ ] options;
|
||||||
|
volumes = [
|
||||||
|
"/etc/localtime:/etc/localtime:ro"
|
||||||
|
] ++ lib.attrsets.attrByPath [ "volumes" ] [ ] options;
|
||||||
|
ports = lib.attrsets.attrByPath [ "ports" ] [ ] options;
|
||||||
|
extraOptions = containerExtraOptions;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
# build a restic restore set for both local and remote
|
## Creates a standardized restic backup configuration for both local and remote backups per app.
|
||||||
lib.mySystem.mkRestic = options: (
|
# One S3 bucket per server. Each app has its own repository in the bucket.
|
||||||
|
# Or backup each app it's own remote repository.
|
||||||
|
# Takes an attribute set with:
|
||||||
|
# - app: name of the application (used for backup naming)
|
||||||
|
# - user: user to run the backup as
|
||||||
|
# - localResticTemplate: template for local restic backup
|
||||||
|
# - passwordFile: path to the password file
|
||||||
|
# - paths: list of paths to backup
|
||||||
|
# - remoteResticTemplate: template for remote restic backup
|
||||||
|
# - environmentFile (optional): path to the env file
|
||||||
|
# - excludePaths (optional): list of paths to exclude from backup
|
||||||
|
# Configures:
|
||||||
|
# - Daily backups at 02:05 with 3h random delay
|
||||||
|
# - Retention: 7 daily, 5 weekly, 12 monthly backups
|
||||||
|
# - Automatic stale lock removal
|
||||||
|
# - Uses system-configured backup paths and credentials
|
||||||
|
#
|
||||||
|
# Example usage:
|
||||||
|
# services.restic.backups = config.lib.mySystem.mkRestic {
|
||||||
|
# app = "nextcloud";
|
||||||
|
# paths = [ "/nahar/containers/volumes/nextcloud" ];
|
||||||
|
# excludePaths = [ "/nahar/containers/volumes/nextcloud/data/cache" ];
|
||||||
|
# user = "kah";
|
||||||
|
# localResticTemplate = "/eru/restic/nextcloud";
|
||||||
|
# remoteResticTemplate = "rest:https://user:password@x.repo.borgbase.com";
|
||||||
|
# remoteResticTemplate = "s3:https://x.r2.cloudflarestorage.com/resticRepos";
|
||||||
|
# remoteResticTemplateFile = "/run/secrets/restic/nextcloud/template";
|
||||||
|
# passwordFile = "/run/secrets/restic/nextcloud/password";
|
||||||
|
# environmentFile = "/run/secrets/restic/nextcloud/env";
|
||||||
|
# };
|
||||||
|
# This creates two backup jobs:
|
||||||
|
# - nextcloud-local: backs up to local storage
|
||||||
|
# - nextcloud-remote: backs up to remote storage (e.g. S3)
|
||||||
|
lib.mySystem.mkRestic =
|
||||||
|
options:
|
||||||
let
|
let
|
||||||
|
# excludePaths is optional
|
||||||
excludePaths = if builtins.hasAttr "excludePaths" options then options.excludePaths else [ ];
|
excludePaths = if builtins.hasAttr "excludePaths" options then options.excludePaths else [ ];
|
||||||
|
# Decide which mutually exclusive options to use
|
||||||
|
remoteResticTemplateFile =
|
||||||
|
if builtins.hasAttr "remoteResticTemplateFile" options then
|
||||||
|
options.remoteResticTemplateFile
|
||||||
|
else
|
||||||
|
null;
|
||||||
|
remoteResticTemplate =
|
||||||
|
if builtins.hasAttr "remoteResticTemplate" options then
|
||||||
|
options.remoteResticTemplate
|
||||||
|
else
|
||||||
|
null;
|
||||||
|
# 2:05 daily backup with 3h random delay
|
||||||
timerConfig = {
|
timerConfig = {
|
||||||
OnCalendar = "02:05";
|
OnCalendar = "02:05";
|
||||||
Persistent = true;
|
Persistent = true;
|
||||||
RandomizedDelaySec = "3h";
|
RandomizedDelaySec = "3h";
|
||||||
};
|
};
|
||||||
|
# 7 daily, 5 weekly, 12 monthly backups
|
||||||
pruneOpts = [
|
pruneOpts = [
|
||||||
"--keep-daily 7"
|
"--keep-daily 7"
|
||||||
"--keep-weekly 5"
|
"--keep-weekly 5"
|
||||||
"--keep-monthly 12"
|
"--keep-monthly 12"
|
||||||
];
|
];
|
||||||
|
# Initialize the repository if it doesn't exist
|
||||||
initialize = true;
|
initialize = true;
|
||||||
|
# Only one backup is ever running at a time it's safe to say that we can remove stale locks
|
||||||
backupPrepareCommand = ''
|
backupPrepareCommand = ''
|
||||||
# remove stale locks - this avoids some occasional annoyance
|
# remove stale locks - this avoids some occasional annoyance
|
||||||
#
|
#
|
||||||
|
@ -53,28 +116,33 @@
|
||||||
{
|
{
|
||||||
# local backup
|
# local backup
|
||||||
"${options.app}-local" = {
|
"${options.app}-local" = {
|
||||||
inherit pruneOpts timerConfig initialize backupPrepareCommand;
|
inherit
|
||||||
|
pruneOpts
|
||||||
|
timerConfig
|
||||||
|
initialize
|
||||||
|
backupPrepareCommand
|
||||||
|
;
|
||||||
|
inherit (options) user passwordFile environmentFile;
|
||||||
# Move the path to the zfs snapshot path
|
# Move the path to the zfs snapshot path
|
||||||
paths = map (x: "${config.mySystem.system.resticBackup.mountPath}/${x}") options.paths;
|
paths = map (x: "${config.mySystem.services.zfs-nightly-snap.mountPath}/${x}") options.paths;
|
||||||
passwordFile = config.sops.secrets."services/restic/password".path;
|
exclude = map (x: "${config.mySystem.services.zfs-nightly-snap.mountPath}/${x}") options.excludePaths;
|
||||||
exclude = excludePaths;
|
repository = "${options.localResticTemplate}";
|
||||||
repository = "${config.mySystem.system.resticBackup.local.location}/${options.appFolder}";
|
|
||||||
# inherit (options) user;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# remote backup
|
# remote backup
|
||||||
"${options.app}-remote" = {
|
"${options.app}-remote" = {
|
||||||
inherit pruneOpts timerConfig initialize backupPrepareCommand;
|
inherit
|
||||||
|
pruneOpts
|
||||||
|
timerConfig
|
||||||
|
initialize
|
||||||
|
backupPrepareCommand
|
||||||
|
;
|
||||||
|
inherit (options) user passwordFile environmentFile;
|
||||||
# Move the path to the zfs snapshot path
|
# Move the path to the zfs snapshot path
|
||||||
paths = map (x: "${config.mySystem.system.resticBackup.mountPath}/${x}") options.paths;
|
paths = map (x: "${config.mySystem.services.zfs-nightly-snap.mountPath}/${x}") options.paths;
|
||||||
environmentFile = config.sops.secrets."services/restic/env".path;
|
repository = remoteResticTemplate;
|
||||||
passwordFile = config.sops.secrets."services/restic/password".path;
|
repositoryFile = remoteResticTemplateFile;
|
||||||
repository = "${config.mySystem.system.resticBackup.remote.location}/${options.appFolder}";
|
exclude = map (x: "${config.mySystem.services.zfs-nightly-snap.mountPath}/${x}") options.excludePaths;
|
||||||
exclude = excludePaths;
|
|
||||||
# inherit (options) user;
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,11 +7,12 @@
|
||||||
./libvirt-qemu
|
./libvirt-qemu
|
||||||
./matchbox
|
./matchbox
|
||||||
./nginx
|
./nginx
|
||||||
|
./nix-index-daily
|
||||||
./onepassword-connect
|
./onepassword-connect
|
||||||
./podman
|
./podman
|
||||||
./reboot-required-check.nix
|
./reboot-required-check.nix
|
||||||
./restic
|
|
||||||
./sanoid
|
./sanoid
|
||||||
./syncthing
|
./syncthing
|
||||||
|
./zfs-nightly-snap
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,55 +5,45 @@
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
cfg = config.services.nix-index-daily;
|
cfg = config.mySystem.services.nix-index-daily;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.services.nix-index-daily = {
|
options.mySystem.services.nix-index-daily = {
|
||||||
enable = lib.mkEnableOption "Automatic daily nix-index database updates";
|
enable = lib.mkEnableOption "Automatic daily nix-index database updates";
|
||||||
|
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
description = "User account under which to run nix-index";
|
description = "User account under which to run nix-index";
|
||||||
example = "alice";
|
example = "jahanson";
|
||||||
};
|
};
|
||||||
|
|
||||||
startTime = lib.mkOption {
|
startTime = lib.mkOption {
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
default = "daily";
|
default = "daily";
|
||||||
description = "When to start the service. See systemd.time(7)";
|
description = "When to start the service. See systemd.time(7)";
|
||||||
example = "03:00";
|
example = "05:00";
|
||||||
};
|
|
||||||
|
|
||||||
randomizedDelaySec = lib.mkOption {
|
|
||||||
type = lib.types.int;
|
|
||||||
default = 3600;
|
|
||||||
description = "Random delay in seconds after startTime";
|
|
||||||
example = 1800;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
config = lib.mkIf cfg.enable {
|
||||||
users.users.${cfg.user}.packages = [ pkgs.nix-index ];
|
systemd.user = {
|
||||||
|
# Timer for nix-index update
|
||||||
systemd.user.services.nix-index-update = {
|
timers.nix-index-update = {
|
||||||
description = "Update nix-index database";
|
wantedBy = [ "timers.target" ];
|
||||||
script = "${pkgs.nix-index}/bin/nix-index";
|
partOf = [ "nix-index-update.service" ];
|
||||||
serviceConfig = {
|
timerConfig = {
|
||||||
Type = "oneshot";
|
OnCalendar = cfg.startTime;
|
||||||
|
Persistent = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Service for nix-index update
|
||||||
|
services.nix-index-update = {
|
||||||
|
description = "Update nix-index database";
|
||||||
|
script = "${pkgs.nix-index}/bin/nix-index";
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.user.timers.nix-index-update = {
|
|
||||||
wantedBy = [ "timers.target" ];
|
|
||||||
timerConfig = {
|
|
||||||
OnCalendar = cfg.startTime;
|
|
||||||
Persistent = true;
|
|
||||||
RandomizedDelaySec = cfg.randomizedDelaySec;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Ensure the services are enabled
|
|
||||||
systemd.user.services.nix-index-update.enable = true;
|
|
||||||
systemd.user.timers.nix-index-update.enable = true;
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
{ lib, config, pkgs, ... }:
|
|
||||||
with lib;
|
|
||||||
let
|
|
||||||
cfg = config.mySystem.system.resticBackup;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.mySystem.system.resticBackup = {
|
|
||||||
local = {
|
|
||||||
enable = mkEnableOption "Local backups" // { default = true; };
|
|
||||||
noWarning = mkOption
|
|
||||||
{
|
|
||||||
type = types.bool;
|
|
||||||
description = "Disable warning for local backups";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
location = mkOption
|
|
||||||
{
|
|
||||||
type = types.str;
|
|
||||||
description = "Location for local backups";
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
remote = {
|
|
||||||
enable = mkEnableOption "Remote backups" // { default = true; };
|
|
||||||
noWarning = mkOption
|
|
||||||
{
|
|
||||||
type = types.bool;
|
|
||||||
description = "Disable warning for remote backups";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
location = mkOption
|
|
||||||
{
|
|
||||||
type = types.str;
|
|
||||||
description = "Location for remote backups";
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
mountPath = mkOption
|
|
||||||
{
|
|
||||||
type = types.str;
|
|
||||||
description = "Location for snapshot mount";
|
|
||||||
default = "/mnt/nightly_backup";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
config = {
|
|
||||||
|
|
||||||
# Warn if backups are disable and machine isnt a dev box
|
|
||||||
warnings = [
|
|
||||||
(mkIf (!cfg.local.noWarning && !cfg.local.enable && config.mySystem.purpose != "Development") "WARNING: Local backups are disabled for ${config.system.name}!")
|
|
||||||
(mkIf (!cfg.remote.noWarning && !cfg.remote.enable && config.mySystem.purpose != "Development") "WARNING: Remote backups are disabled for ${config.system.name}!")
|
|
||||||
];
|
|
||||||
|
|
||||||
sops.secrets = mkIf (cfg.local.enable || cfg.remote.enable) {
|
|
||||||
"services/restic/password" = {
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
owner = "kah";
|
|
||||||
group = "kah";
|
|
||||||
};
|
|
||||||
|
|
||||||
"services/restic/env" = {
|
|
||||||
sopsFile = ./secrets.sops.yaml;
|
|
||||||
owner = "kah";
|
|
||||||
group = "kah";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# useful commands:
|
|
||||||
# view snapshots - zfs list -t snapshot
|
|
||||||
|
|
||||||
# below takes a snapshot of the zfs persist volume
|
|
||||||
# ready for restic syncs
|
|
||||||
# essentially its a nightly rotation of atomic state at 2am.
|
|
||||||
|
|
||||||
# this is the safest option, as if you run restic
|
|
||||||
# on live services/databases/etc, you will have
|
|
||||||
# a bad day when you try and restore
|
|
||||||
# (backing up a in-use file can and will cause corruption)
|
|
||||||
|
|
||||||
# ref: https://cyounkins.medium.com/correct-backups-require-filesystem-snapshots-23062e2e7a15
|
|
||||||
systemd = mkIf (cfg.local.enable || cfg.remote.enable) {
|
|
||||||
|
|
||||||
timers.restic_nightly_snapshot = {
|
|
||||||
description = "Nightly ZFS snapshot timer";
|
|
||||||
wantedBy = [ "timers.target" ];
|
|
||||||
partOf = [ "restic_nightly_snapshot.service" ];
|
|
||||||
timerConfig.OnCalendar = "2:00";
|
|
||||||
timerConfig.Persistent = "true";
|
|
||||||
};
|
|
||||||
|
|
||||||
# recreate snapshot and mount, ready for backup
|
|
||||||
# I used mkdir -p over a nix tmpfile, as mkdir -p exits cleanly
|
|
||||||
# if the folder already exists, and tmpfiles complain
|
|
||||||
# if the folder exists and is already mounted.
|
|
||||||
services.restic_nightly_snapshot = {
|
|
||||||
description = "Nightly ZFS snapshot for Restic";
|
|
||||||
path = with pkgs; [ zfs busybox ];
|
|
||||||
serviceConfig.Type = "simple";
|
|
||||||
script = ''
|
|
||||||
mkdir -p /mnt/nightly_backup/ && \
|
|
||||||
umount ${cfg.mountPath} || true && \
|
|
||||||
zfs destroy rpool/safe/persist@restic_nightly_snap || true && \
|
|
||||||
zfs snapshot rpool/safe/persist@restic_nightly_snap && \
|
|
||||||
mount -t zfs rpool/safe/persist@restic_nightly_snap ${cfg.mountPath}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,88 +0,0 @@
|
||||||
services:
|
|
||||||
restic:
|
|
||||||
password: ENC[AES256_GCM,data:QPU=,iv:6FYmdgpKLplg1uIkXNvyA+DW493xdMLsBLnbenabz+M=,tag:SVY2mEhoPP/exDOENzVRGg==,type:str]
|
|
||||||
repository: ENC[AES256_GCM,data:VGtSJA==,iv:K4FnYzTrfVhjMWf4R7qgPUCdgWFlQAG8JJccfRYlEWM=,tag:43onghqVr44slin0rlIUgQ==,type:str]
|
|
||||||
env: ENC[AES256_GCM,data:TWUJ/GE84CTiLo1Gud+XsA==,iv:gKC1VcWnGqEwn5+e5jIqsIfipi3X2oHGvrG0rgqQl9E=,tag:QIBfXblvSDxAVYbZGAN3Mg==,type:str]
|
|
||||||
sops:
|
|
||||||
kms: []
|
|
||||||
gcp_kms: []
|
|
||||||
azure_kv: []
|
|
||||||
hc_vault: []
|
|
||||||
age:
|
|
||||||
- recipient: age1d9p83j52m2xg0vh9k7q0uwlxwhs3y6tlv68yg9s2h9mdw2fmmsqshddz5m
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvRUJEU25EaUhacWFBOVg5
|
|
||||||
TWI3NmtkWFpONHRVZ1BVSVRsQzMraVdmblFBCmd2NzcwMGRTMTR6ck9lcGZSQmVi
|
|
||||||
dHlFeS9RNENKcDEvS2FiRTVrYjVlUGcKLS0tIG1VSW9sejVWZmJHQXlIOVpLMjds
|
|
||||||
SHV6U2ZhUnVpQVNROGNjNEtZZXI1bEUKXjSwBNA8ylfo4CWlefFfajm2JdYtjUVK
|
|
||||||
bqXlIH/nG+nQ+I4Rj1XHo7hAuxCatuN0bGVBkSlzqIZk58/JladwFg==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age1m83ups8xn2jy4ayr8gw0pyn34smr0huqc5v76e4887az4vsl4yzsj0dlhd
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMWis3TWZ0djY4YnJNek9N
|
|
||||||
T2VXK0IzaStkMisyaUs5MTVHeXY4bytoUWdnCmlmTmRXRlRwOUZVQm5aWkxSKzFB
|
|
||||||
UzhtbWd2Q09sbTJPeDRWeTFESkcwWUUKLS0tIDVaN0d4UGlTZUhIaXVKaXJRNThS
|
|
||||||
algwTTZsVzNTQngzVUwyU2lpNll0bU0Kjz+34mvPPAfGUQKMH6LXawGou9HjBTjJ
|
|
||||||
p9vxncB+7ykvT4e4Z0PpPE/Zo5yvi9rt1T8bZ6dG7GA5vuE/4BarCA==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age18kj3xhlvgjeg2awwku3r8d95w360uysu0w5ejghnp4kh8qmtge5qwa2vjp
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByK2FNS0tJaTdRQzA0VVky
|
|
||||||
aERMTVdqRzBwWFV1WFJJcVRKSTFIUlh3U0E0CmFKZm9jUHBpRjJCZk9PVkNWVEFU
|
|
||||||
RURReEhGNTRmWWpLa1ZNdVFHK3FQQWMKLS0tIHcrMTBiMGhlcFc3RzlmVEp2OEpX
|
|
||||||
ZHZLdXV4a05NaGRmR2Z1SkZCV25kNUEKHU1v1OK0d2ud7QL+gEoA8R4Z5YgVSP42
|
|
||||||
IvnEQxjjXZjC4p+OjFErKcWrVb+3DGzqF1vngJVrXmIgOx/SZKTa/Q==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age1lp6rrlvmytp9ka6q89m0e0am26222kwrn7aqd45hu07s3a6jv3gqty86eu
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3MytrUFpsMUVpT3pTNWlq
|
|
||||||
NjMrRjI5a3NqNzlNV2JlczJRNXNicVZaWVdNCjNnRHM2RGV1SEh6M0U3T0NvdlNQ
|
|
||||||
a1JIZFp5bHJwMXlNd29DQ2MwckRrczAKLS0tIHdmd2lFZ1FWTFFMUExPeWRXd2U3
|
|
||||||
RU9UYXJESnAyYXFITTN0cm5QelR2T1UK3XUlIGQED91sUPc1ITq1rXLj/xhkGM9s
|
|
||||||
R4bsTK5RqpXE+RmGfxeAMP7Om424vjM76l6DU2JkoZietDwR35UA8w==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age1e4sd6jjd4uxxsh9xmhdsnu6mqd5h8c4zz4gwme7lkw9ee949fc9q4px9df
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjc0haNU95V3JRUlpuUjha
|
|
||||||
SHpOWThJWVMwbElRaFcrL21jYXA2SFBHeFR3CnV1MkRxbG9QV1dWdjJxWENtQk5L
|
|
||||||
M1g0cDJXRjN0VFhiRXZKbG1yS3hXaG8KLS0tIEtScWorRENpbFZWMjVXNnIxTTdi
|
|
||||||
djdBdThNMzFZdlI4TVBJSjdxeXg0VE0Kcwsa/et9gMSlm46rt0vZ/dFy3ZCZQ5Oi
|
|
||||||
WLJ492+srIeE47Gpye2jN2XAmM4exCijYkZeQvPpLIFvBFmQCK30hQ==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age19jm7uuam7gkacm3kh2v7uqgkvmmx0slmm9zwdjhd2ln9r60xzd7qh78c5a
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMTDI0QXZaMlZLUW9ST0lW
|
|
||||||
Q1M1ZmlpTHpvM0NHejFSNEx0UUFnTVJIN0U4CllRcnVpUjFqOUZRRk5CWXZqT0V0
|
|
||||||
YWwweld0TE9zZGFmUTVDVVl6eDNETzAKLS0tIGtEanVWTHgxSk9Ld3NRYndOL3dZ
|
|
||||||
WXJrUWtncDZjVE50dmw2MHRCelpzZ2cKfLIQbrTsVGXY+UZCC5p/7+bXKHhv8nxt
|
|
||||||
dvvr+VGnH57jmELqSUoWOgefJ6GFNcCoGSYHZ9cn0UgvhZgx1Wpoow==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age1nwnqxjuaxlt5g7fe8rnspvn2c36uuef4hzwuwa6cfjfalz2lrd4q4n5fpl
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRN2M0VmVCQ0JaNVhnRzBj
|
|
||||||
Z2Vqbk9GZUtaZlExYTRPQ3ZJWHIvU283cFRBCjExQnJvZy9SMndJd0VqdUpCSDFJ
|
|
||||||
ZmJpVFJ1em9iNnNOcnFTQUExeGZESm8KLS0tIGdnWXNtNEg2SHpjRW1mR28vVDRv
|
|
||||||
VFVRcDh0TlVXR3pYRk1Ybkx3MjhOaVEKsViUc14dePdnukQa3ud/EesnvZL7OCM1
|
|
||||||
HWJYP81C9O4mU1kwRYtC0lGxMQX6aWiFZ5e2ImSi3w+mBP+KihfmBw==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
- recipient: age1a8z3p24v32l9yxm5z2l8h7rpc3nhacyfv4jvetk2lenrvsdstd3sdu2kaf
|
|
||||||
enc: |
|
|
||||||
-----BEGIN AGE ENCRYPTED FILE-----
|
|
||||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCUlZ1TER2anNCRHBKQm1v
|
|
||||||
QjhybHFCc1dod1djeWxkRmhBSC9YTW5IV0NJCkM5c3hkYWtLZnJHNVpPYUh4TzBR
|
|
||||||
U3ZaMEdSTVNsenV0RVorTTZMUXdYT3MKLS0tIDV1dWxjbXNtekZaUk9xaVdOYU93
|
|
||||||
UUpVako2MGVobTcvNWRsTWMwZm5ZSVEK1uI5dVSI4vY5hw0oxj21mJYoZB2Jq52z
|
|
||||||
e+RDvcyBFRsS+238UCVi5qDdA8DcnQ2uRiBxKDGC2P3RoVU5TeCfTQ==
|
|
||||||
-----END AGE ENCRYPTED FILE-----
|
|
||||||
lastmodified: "2024-09-18T23:57:27Z"
|
|
||||||
mac: ENC[AES256_GCM,data:88ZnGTkV1xxZO7UuVm5clZrHUMeiqAG++4X4DbCJGwqL+VDagYVhsui1+PzN62h6TgXtARecHON8TXd8z/NF4ekiY+LAcMC3m9x5AzmGYa7Qd5FKht1O6RfRORBDrojj251cqCifDxeGPq3C/X4Zi8Jg4KTSk1lAJoXMsqJQ3+c=,iv:8NnKOlzXD1jRVQ/tgoChEb0YY18Y7VpEiq85YhupTws=,tag:eUbLR66sNqQ2VIQW0/CBwA==,type:str]
|
|
||||||
pgp: []
|
|
||||||
unencrypted_suffix: _unencrypted
|
|
||||||
version: 3.8.1
|
|
229
nixos/modules/nixos/services/zfs-nightly-snap/default.nix
Normal file
229
nixos/modules/nixos/services/zfs-nightly-snap/default.nix
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.mySystem.services.zfs-nightly-snap;
|
||||||
|
|
||||||
|
# Replaces/Creates and mounts a ZFS snapshot
|
||||||
|
resticSnapAndMount = pkgs.writeShellApplication {
|
||||||
|
name = "zfs-nightly-snap";
|
||||||
|
|
||||||
|
runtimeInputs = with pkgs; [
|
||||||
|
busybox # for id, mount, umount, mkdir, grep, echo
|
||||||
|
zfs # for zfs
|
||||||
|
];
|
||||||
|
|
||||||
|
text = ''
|
||||||
|
# Check if running as root
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "Error: This script must be run as root."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
BACKUP_DIRECTORY="${cfg.mountPath}"
|
||||||
|
ZFS_DATASET="${cfg.zfsDataset}"
|
||||||
|
SNAPSHOT_NAME="${cfg.snapshotName}"
|
||||||
|
|
||||||
|
# functions sourced from: https://github.com/Jip-Hop/zfs-backup-snapshots
|
||||||
|
# some enhancements made to the original code to adhere to best practices
|
||||||
|
# mounts all zfs filesystems under $ZFS_DATASET
|
||||||
|
function mount_dataset() {
|
||||||
|
# ensure BACKUP_DIRECTORY exists
|
||||||
|
mkdir -p "$BACKUP_DIRECTORY"
|
||||||
|
# get list of all zfs filesystems under $ZFS_DATASET
|
||||||
|
# exclude if mountpoint "legacy" and "none" mountpoint
|
||||||
|
# order by shallowest mountpoint first (determined by number of slashes)
|
||||||
|
mapfile -t fs < <(zfs list "$ZFS_DATASET" -r -H -o name,mountpoint | grep -E "(legacy)$|(none)$" -v | awk '{print gsub("/","/", $2), $1}' | sort -n | cut -d' ' -f2-)
|
||||||
|
|
||||||
|
for fs in "''${fs[@]}"; do
|
||||||
|
mount_latest_snap "''${fs}" "$BACKUP_DIRECTORY"
|
||||||
|
done
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# umounts and cleans up the backup directory
|
||||||
|
# usage: zfs_backup_cleanup BACKUP_DIRECTORY
|
||||||
|
function zfs_backup_cleanup() {
|
||||||
|
# get all filesystems mounted within the backup directory
|
||||||
|
mapfile -t fs < <(tac /etc/mtab | cut -d " " -f 2 | grep "''${1}")
|
||||||
|
|
||||||
|
# umount said filesystems
|
||||||
|
for i in "''${fs[@]}"; do
|
||||||
|
echo "Unmounting $i"
|
||||||
|
umount "$i"
|
||||||
|
done
|
||||||
|
|
||||||
|
# delete empty directories from within the backup directory
|
||||||
|
find "''${1}" -type d -empty -delete
|
||||||
|
}
|
||||||
|
|
||||||
|
# gets the name of the newest snapshot given a zfs filesystem
|
||||||
|
# usage: get_latest_snap filesystem
|
||||||
|
function zfs_latest_snap() {
|
||||||
|
snapshot=$(zfs list -H -t snapshot -o name -S creation -d1 "''${1}" | head -1 | cut -d '@' -f 2)
|
||||||
|
if [[ -z $snapshot ]]; then
|
||||||
|
# if there's no snapshot then let's ignore it
|
||||||
|
echo "No snapshot exists for ''${1}, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo "$snapshot"
|
||||||
|
}
|
||||||
|
|
||||||
|
# gets the path of a snapshot given a zfs filesystem and a snapshot name
|
||||||
|
# usage zfs_snapshot_mountpoint filesystem snapshot
|
||||||
|
function zfs_snapshot_mountpoint() {
|
||||||
|
# get mountpoint for filesystem
|
||||||
|
mountpoint=$(zfs list -H -o mountpoint "''${1}")
|
||||||
|
|
||||||
|
# exit if filesystem doesn't exist
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# build out path
|
||||||
|
path="''${mountpoint}/.zfs/snapshot/''${2}"
|
||||||
|
|
||||||
|
# check to make sure path exists
|
||||||
|
if stat "''${path}" &> /dev/null; then
|
||||||
|
echo "''${path}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# mounts latest snapshot in directory
|
||||||
|
# usage: mount_latest_snap filesystem BACKUP_DIRECTORY
|
||||||
|
function mount_latest_snap() {
|
||||||
|
local mount_point="''${2}"
|
||||||
|
local filesystem="''${1}"
|
||||||
|
|
||||||
|
# get name of latest snapshot
|
||||||
|
snapshot=$(zfs_latest_snap "''${filesystem}")
|
||||||
|
|
||||||
|
# if there's no snapshot then let's ignore it
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
echo "No snapshot exists for ''${filesystem}, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sourcepath=$(zfs_snapshot_mountpoint "''${filesystem}" "''${snapshot}")
|
||||||
|
# if the filesystem is not mounted/path doesn't exist then let's ignore as well
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
echo "Cannot find snapshot ''${snapshot} for ''${filesystem}, perhaps it's not mounted? Anyways, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# mountpath may be inside a previously mounted snapshot
|
||||||
|
mountpath="$mount_point/''${filesystem}"
|
||||||
|
|
||||||
|
# mount to backup directory using a bind filesystem
|
||||||
|
mkdir -p "''${mountpath}"
|
||||||
|
echo "mount ''${sourcepath} => ''${mountpath}"
|
||||||
|
mount --bind --read-only "''${sourcepath}" "''${mountpath}"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Unmount and cleanup if necessary
|
||||||
|
zfs_backup_cleanup "$BACKUP_DIRECTORY"
|
||||||
|
|
||||||
|
# Check if snapshot exists
|
||||||
|
echo "Previous snapshot:"
|
||||||
|
zfs list -t snapshot | grep "$ZFS_DATASET@$SNAPSHOT_NAME" || true
|
||||||
|
|
||||||
|
# Attempt to destroy existing snapshot
|
||||||
|
echo "Attempting to destroy existing snapshot..."
|
||||||
|
if zfs list -t snapshot | grep -q "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
if zfs destroy -r "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
echo "Successfully destroyed old snapshot"
|
||||||
|
else
|
||||||
|
echo "Failed to destroy existing snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No existing snapshot found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create new snapshot
|
||||||
|
if ! zfs snapshot -r "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
echo "Failed to create snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "New snapshot created:"
|
||||||
|
zfs list -t snapshot | grep "$ZFS_DATASET@$SNAPSHOT_NAME"
|
||||||
|
|
||||||
|
# Mount the snapshot
|
||||||
|
if ! mount_dataset; then
|
||||||
|
echo "Failed to mount snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Successfully created and mounted snapshot at $BACKUP_DIRECTORY"
|
||||||
|
mount | grep "$BACKUP_DIRECTORY"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.mySystem.services.zfs-nightly-snap = {
|
||||||
|
enable = lib.mkEnableOption "ZFS nightly snapshot service";
|
||||||
|
|
||||||
|
mountPath = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "Location for the nightly snapshot mount";
|
||||||
|
default = "/mnt/nightly_backup";
|
||||||
|
};
|
||||||
|
zfsDataset = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "Location of the dataset to be snapshot";
|
||||||
|
default = "nahar/containers/volumes";
|
||||||
|
};
|
||||||
|
snapshotName = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "Name of the nightly snapshot";
|
||||||
|
default = "restic_nightly_snap";
|
||||||
|
};
|
||||||
|
startAt = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "*-*-* 02:00:00 America/Chicago"; # Every day at 2 AM
|
||||||
|
description = "When to create and mount the ZFS snapshot. Defaults to 2 AM.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
# Warn if backups are disabled and machine isnt a dev box
|
||||||
|
warnings = [
|
||||||
|
(lib.mkIf (
|
||||||
|
!cfg.enable && config.mySystem.purpose != "Development"
|
||||||
|
) "WARNING: ZFS nightly snapshot is disabled for ${config.system.name}!")
|
||||||
|
];
|
||||||
|
|
||||||
|
# Adding script to system packages
|
||||||
|
environment.systemPackages = [ resticSnapAndMount ];
|
||||||
|
|
||||||
|
systemd = {
|
||||||
|
# Timer for nightly snapshot
|
||||||
|
timers.zfs-nightly-snap = {
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = cfg.startAt;
|
||||||
|
Persistent = true; # Run immediately if we missed the last trigger time
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Service for nightly snapshot
|
||||||
|
services.zfs-nightly-snap = {
|
||||||
|
description = "Create and mount nightly ZFS snapshot";
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
ExecStart = "${lib.getExe resticSnapAndMount}";
|
||||||
|
};
|
||||||
|
requires = [ "zfs.target" ];
|
||||||
|
after = [ "zfs.target" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
153
nixos/modules/nixos/services/zfs-nightly-snap/snap-and-mount.sh
Executable file
153
nixos/modules/nixos/services/zfs-nightly-snap/snap-and-mount.sh
Executable file
|
@ -0,0 +1,153 @@
|
||||||
|
#!/usr/bin/env nix-shell
|
||||||
|
#!nix-shell -I nixpkgs=/etc/nix/inputs/nixpkgs -i bash -p busybox zfs
|
||||||
|
# shellcheck disable=SC1008
|
||||||
|
|
||||||
|
set -e # Exit on error
|
||||||
|
|
||||||
|
BACKUP_DIRECTORY="/mnt/restic_nightly_backup"
|
||||||
|
ZFS_DATASET="nahar/containers/volumes"
|
||||||
|
SNAPSHOT_NAME="restic_nightly_snap"
|
||||||
|
|
||||||
|
# Check if running as root
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "Error: This script must be run as root."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# functions sourced from: https://github.com/Jip-Hop/zfs-backup-snapshots
|
||||||
|
# some enhancements made to the original code to adhere to best practices
|
||||||
|
# mounts all zfs filesystems under $ZFS_DATASET
|
||||||
|
function mount_dataset() {
|
||||||
|
# ensure BACKUP_DIRECTORY exists
|
||||||
|
mkdir -p $BACKUP_DIRECTORY
|
||||||
|
# get list of all zfs filesystems under $ZFS_DATASET
|
||||||
|
# exclude if mountpoint "legacy" and "none" mountpoint
|
||||||
|
# order by shallowest mountpoint first (determined by number of slashes)
|
||||||
|
mapfile -t fs < <(zfs list "$ZFS_DATASET" -r -H -o name,mountpoint | grep -E "(legacy)$|(none)$" -v | awk '{print gsub("/","/", $2), $1}' | sort -n | cut -d' ' -f2-)
|
||||||
|
|
||||||
|
for fs in "${fs[@]}"; do
|
||||||
|
mount_latest_snap "${fs}" "${BACKUP_DIRECTORY}"
|
||||||
|
done
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# umounts and cleans up the backup directory
|
||||||
|
# usage: zfs_backup_cleanup BACKUP_DIRECTORY
|
||||||
|
function zfs_backup_cleanup() {
|
||||||
|
# get all filesystems mounted within the backup directory
|
||||||
|
mapfile -t fs < <(tac /etc/mtab | cut -d " " -f 2 | grep "${1}")
|
||||||
|
|
||||||
|
# umount said filesystems
|
||||||
|
for i in "${fs[@]}"; do
|
||||||
|
echo "Unmounting $i"
|
||||||
|
umount "$i"
|
||||||
|
done
|
||||||
|
|
||||||
|
# delete empty directories from within the backup directory
|
||||||
|
find "${1}" -type d -empty -delete
|
||||||
|
}
|
||||||
|
|
||||||
|
# gets the name of the newest snapshot given a zfs filesystem
|
||||||
|
# usage: get_latest_snap filesystem
|
||||||
|
function zfs_latest_snap() {
|
||||||
|
snapshot=$(zfs list -H -t snapshot -o name -S creation -d1 "${1}" | head -1 | cut -d '@' -f 2)
|
||||||
|
if [[ -z $snapshot ]]; then
|
||||||
|
# if there's no snapshot then let's ignore it
|
||||||
|
echo "No snapshot exists for ${1}, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
echo "$snapshot"
|
||||||
|
}
|
||||||
|
|
||||||
|
# gets the path of a snapshot given a zfs filesystem and a snapshot name
|
||||||
|
# usage zfs_snapshot_mountpoint filesystem snapshot
|
||||||
|
function zfs_snapshot_mountpoint() {
|
||||||
|
# get mountpoint for filesystem
|
||||||
|
mountpoint=$(zfs list -H -o mountpoint "${1}")
|
||||||
|
|
||||||
|
# exit if filesystem doesn't exist
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# build out path
|
||||||
|
path="${mountpoint}/.zfs/snapshot/${2}"
|
||||||
|
|
||||||
|
# check to make sure path exists
|
||||||
|
if stat "${path}" &> /dev/null; then
|
||||||
|
echo "${path}"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# mounts latest snapshot in directory
|
||||||
|
# usage: mount_latest_snap filesystem BACKUP_DIRECTORY
|
||||||
|
function mount_latest_snap() {
|
||||||
|
BACKUP_DIRECTORY="${2}"
|
||||||
|
filesystem="${1}"
|
||||||
|
|
||||||
|
# get name of latest snapshot
|
||||||
|
snapshot=$(zfs_latest_snap "${filesystem}")
|
||||||
|
|
||||||
|
# if there's no snapshot then let's ignore it
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
echo "No snapshot exists for ${filesystem}, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sourcepath=$(zfs_snapshot_mountpoint "${filesystem}" "${snapshot}")
|
||||||
|
# if the filesystem is not mounted/path doesn't exist then let's ignore as well
|
||||||
|
if [[ $? == 1 ]]; then
|
||||||
|
echo "Cannot find snapshot ${snapshot} for ${filesystem}, perhaps it's not mounted? Anyways, it will not be backed up."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# mountpath may be inside a previously mounted snapshot
|
||||||
|
mountpath=${BACKUP_DIRECTORY}/${filesystem}
|
||||||
|
|
||||||
|
# mount to backup directory using a bind filesystem
|
||||||
|
mkdir -p "${mountpath}"
|
||||||
|
echo "mount ${sourcepath} => ${mountpath}"
|
||||||
|
mount --bind --read-only "${sourcepath}" "${mountpath}"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Unmount and cleanup if necessary
|
||||||
|
zfs_backup_cleanup "$BACKUP_DIRECTORY"
|
||||||
|
|
||||||
|
# Check if snapshot exists
|
||||||
|
echo "Previous snapshot:"
|
||||||
|
zfs list -t snapshot | grep "$ZFS_DATASET@$SNAPSHOT_NAME" || true
|
||||||
|
|
||||||
|
# Attempt to destroy existing snapshot
|
||||||
|
echo "Attempting to destroy existing snapshot..."
|
||||||
|
if zfs list -t snapshot | grep -q "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
if zfs destroy -r "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
echo "Successfully destroyed old snapshot"
|
||||||
|
else
|
||||||
|
echo "Failed to destroy existing snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No existing snapshot found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create new snapshot
|
||||||
|
if ! zfs snapshot -r "$ZFS_DATASET@$SNAPSHOT_NAME"; then
|
||||||
|
echo "Failed to create snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "New snapshot created:"
|
||||||
|
zfs list -t snapshot | grep "$ZFS_DATASET@$SNAPSHOT_NAME"
|
||||||
|
|
||||||
|
# Mount the snapshot
|
||||||
|
if ! mount_dataset; then
|
||||||
|
echo "Failed to mount snapshot"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Successfully created and mounted snapshot at $BACKUP_DIRECTORY"
|
||||||
|
mount | grep "$BACKUP_DIRECTORY"
|
|
@ -1,4 +1,10 @@
|
||||||
{ config, lib, pkgs, modulesPath, ... }:
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
modulesPath,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
{
|
{
|
||||||
|
@ -9,11 +15,9 @@ with lib;
|
||||||
# Not sure at this point a good way to manage globals in one place
|
# Not sure at this point a good way to manage globals in one place
|
||||||
# without mono-repo config.
|
# without mono-repo config.
|
||||||
|
|
||||||
imports =
|
imports = [
|
||||||
[
|
./global
|
||||||
(modulesPath + "/installer/scan/not-detected.nix") # Generated by nixos-config-generate
|
];
|
||||||
./global
|
|
||||||
];
|
|
||||||
config = {
|
config = {
|
||||||
boot.tmp.cleanOnBoot = true;
|
boot.tmp.cleanOnBoot = true;
|
||||||
mySystem = {
|
mySystem = {
|
||||||
|
@ -24,10 +28,6 @@ with lib;
|
||||||
system.packages = [ pkgs.bat ];
|
system.packages = [ pkgs.bat ];
|
||||||
domain = "hsn.dev";
|
domain = "hsn.dev";
|
||||||
shell.fish.enable = true;
|
shell.fish.enable = true;
|
||||||
|
|
||||||
# But wont enable plugins globally, leave them for workstations
|
|
||||||
# TODO: Make per device option
|
|
||||||
system.resticBackup.remote.location = "s3:https://x.r2.cloudflarestorage.com/nixos-restic";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
|
|
|
@ -13,15 +13,6 @@
|
||||||
mySystem = {
|
mySystem = {
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
security.wheelNeedsSudoPassword = false;
|
security.wheelNeedsSudoPassword = false;
|
||||||
|
|
||||||
# Restic backups disabled.
|
|
||||||
# TODO: configure storagebox for hetzner backups
|
|
||||||
system.resticBackup = {
|
|
||||||
local.enable = false;
|
|
||||||
local.noWarning = true;
|
|
||||||
remote.enable = false;
|
|
||||||
remote.noWarning = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
networking.useDHCP = lib.mkDefault true;
|
networking.useDHCP = lib.mkDefault true;
|
||||||
|
|
|
@ -13,15 +13,6 @@
|
||||||
mySystem = {
|
mySystem = {
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
security.wheelNeedsSudoPassword = false;
|
security.wheelNeedsSudoPassword = false;
|
||||||
|
|
||||||
# Restic backups disabled.
|
|
||||||
# TODO: configure storagebox for hetzner backups
|
|
||||||
system.resticBackup = {
|
|
||||||
local.enable = false;
|
|
||||||
local.noWarning = true;
|
|
||||||
remote.enable = false;
|
|
||||||
remote.noWarning = true;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
networking.useDHCP = lib.mkDefault true;
|
networking.useDHCP = lib.mkDefault true;
|
||||||
|
|
Loading…
Reference in a new issue