This commit is contained in:
Joseph Hanson 2024-05-10 10:06:16 -05:00
parent 8d2a3484b3
commit 2f184ff05c
Signed by: jahanson
SSH key fingerprint: SHA256:vy6dKBECV522aPAwklFM3ReKAVB086rT3oWwiuiFG7o
85 changed files with 259 additions and 2827 deletions

View file

@ -1,27 +0,0 @@
---
ignore: |
.direnv/
.private/
.vscode/
**/*.sops.yaml
extends: default
rules:
truthy:
allowed-values: ["true", "false", "on"]
comments:
min-spaces-from-content: 1
line-length: disable
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 0
indentation: enable

View file

@ -1,32 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"github>truxnell/renovate-config",
"github>truxnell/renovate-config:automerge-github-actions",
"github>truxnell/nix-config//.github/renovate/autoMerge.json5",
],
"gitAuthor": "Trux-Bot <19149206+trux-bot[bot]@users.noreply.github.com>",
"ignoreTests": "false",
// TODO remove once out of beta?
// https://docs.renovatebot.com/modules/manager/nix/
"nix": {
"enabled": "true",
},
"lockFileMaintenance": {
"enabled": "true",
"automerge": "true",
"schedule": [ "before 4am on Sunday" ],
},
"regexManagers": [
{
fileMatch: ["^.*\\.nix$", "^.*\\.toml$"],
matchStrings: [
'image *= *"(?<depName>.*?):(?<currentValue>.*?)(@(?<currentDigest>sha256:[a-f0-9]+))?"',
],
datasourceTemplate: "docker",
}
],
}

View file

@ -1,44 +0,0 @@
{
"packageRules": [
{
// automerge minor, patch, digest
"matchDatasources": ['docker'],
"automerge": "true",
"automergeType": "branch",
"schedule": [ "before 4am on Sunday" ],
"matchUpdateTypes": [ 'minor', 'patch', 'digest'],
"matchPackageNames": [
// 'ghcr.io/onedr0p/sonarr',
// 'ghcr.io/onedr0p/readarr-nightly',
// 'ghcr.io/onedr0p/radarr',
// 'ghcr.io/onedr0p/lidarr',
// 'ghcr.io/onedr0p/prowlarr',
'ghcr.io/twin/gatus',
'vaultwarden/server',
],
},
// automerge patch and digest
{
"matchDatasources": ['docker'],
"automerge": "true",
"automergeType": "branch",
"schedule": [ "before 4am on Sunday" ],
"matchUpdateTypes": [ 'patch', 'digest'],
"matchPackageNames": [
"ghcr.io/gethomepage/homepage",
"garethgeorge/backrest",
]
},
{
// automerge all digests
"matchDatasources": ['docker'],
"automerge": "true",
"automergeType": "branch",
"matchUpdateTypes": [ 'digest'],
},
],
}

76
.github/settings.yml vendored
View file

@ -1,76 +0,0 @@
---
# These settings are synced to GitHub by https://probot.github.io/apps/settings/
repository:
# See https://docs.github.com/en/rest/reference/repos#update-a-repository for all available settings.
# The name of the repository. Changing this will rename the repository
name: nix-config
# A short description of the repository that will show up on GitHub
description: My nix & nixos home setup
# A URL with more information about the repository
# homepage: https://example.github.io/
# A comma-separated list of topics to set on the repository
topics: nix, nixos
# Either `true` to make the repository private, or `false` to make it public.
private: false
# Either `true` to enable issues for this repository, `false` to disable them.
has_issues: true
# Either `true` to enable projects for this repository, or `false` to disable them.
# If projects are disabled for the organization, passing `true` will cause an API error.
has_projects: false
# Either `true` to enable the wiki for this repository, `false` to disable it.
has_wiki: false
# Either `true` to enable downloads for this repository, `false` to disable them.
has_downloads: false
# Updates the default branch for this repository.
default_branch: main
# Either `true` to allow squash-merging pull requests, or `false` to prevent
# squash-merging.
allow_squash_merge: true
# Either `true` to allow merging pull requests with a merge commit, or `false`
# to prevent merging pull requests with merge commits.
allow_merge_commit: false
# Either `true` to allow rebase-merging pull requests, or `false` to prevent
# rebase-merging.
allow_rebase_merge: true
# Either `true` to enable automatic deletion of branches on merge, or `false` to disable
delete_branch_on_merge: true
# Either `true` to enable automated security fixes, or `false` to disable
# automated security fixes.
enable_automated_security_fixes: false
# Either `true` to enable vulnerability alerts, or `false` to disable
# vulnerability alerts.
enable_vulnerability_alerts: true
# Labels: define labels for Issues and Pull Requests
# labels:
# - name: bug
# color: CC0000
# description: An issue with the system 🐛.
# - name: feature
# # If including a `#`, make sure to wrap it with quotes!
# color: '#336699'
# description: New functionality.
# - name: Help Wanted
# # Provide a new name to rename an existing label
# new_name: first-timers-only
# TODO branch protection once nailed down.

View file

@ -1,38 +0,0 @@
---
name: build-image
on:
workflow_dispatch:
inputs:
image:
description: 'Which image to build'
required: true
default: 'rpi4'
options: ['iso', 'rpi4']
jobs:
build-sd-image:
name: Build Nixos image
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4.1.1
- uses: cachix/install-nix-action@v26
with:
nix_path: nixpkgs=channel:nixos-23.05
extra_nix_config: |
extra-platforms = aarch64-linux
- name: Check nix.conf
run: cat /etc/nix/nix.conf
- name: Register binfmt
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- name: Test binfmt availability
run: |
cat /proc/sys/fs/binfmt_misc/qemu-aarch64
shell: bash
- name: Build SD Image
run: |
nix build .#images.${{ github.event.inputs.image }}
- uses: actions/upload-artifact@v4
with:
name: rpi4.img
path: ./result/sd-image/*.img*

View file

@ -1,34 +0,0 @@
name: Deploy Vaultwarden to Fly.io
on:
workflow_dispatch:
pull_request:
paths:
- .github/workflows/deploy-vault.yaml
- "flyio/vaultwarden/fly.toml"
jobs:
build-deploy:
if: ${{ github.ref_name == 'main' }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
persist-credentials: false
- name: setup flyctl
uses: superfly/flyctl-actions/setup-flyctl@master
- name: Publish
run: flyctl deploy --config fly/vaultwarden/fly.toml
env:
FLY_ACCESS_TOKEN: ${{ secrets.FLY_ACCESS_TOKEN }}
FLY_APP: ${{ secrets.FLY_APP_VAULTWARDEN }}
- name: Push Build Status Notifications
if: ${{ always() }}
uses: desiderati/github-action-pushover@v1
with:
job-status: ${{ job.status }}
pushover-api-token: ${{ secrets.PUSHOVER_API_TOKEN }}
pushover-user-key: ${{ secrets.PUSHOVER_USER_KEY }}

View file

@ -1,126 +0,0 @@
---
name: Pull Request
permissions:
pull-requests: write
on:
pull_request:
paths:
- .github/workflows/**
- "**.nix"
- "flake.lock"
jobs:
build:
if: github.event.pull_request.draft == false
name: "Build ${{ matrix.target }}"
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target: citadel
- os: ubuntu-latest
target: rickenbacker
- os: ubuntu-latest
target: dns01
- os: ubuntu-latest
target: dns02
steps:
- name: Create nix mount point
if: contains(matrix.os, 'ubuntu')
run: sudo mkdir /nix
- name: Maximize build space
uses: easimon/maximize-build-space@v10
if: contains(matrix.os, 'ubuntu')
with:
root-reserve-mb: 512
swap-size-mb: 1024
build-mount-path: "/nix"
remove-dotnet: true
remove-android: true
remove-haskell: true
remove-docker-images: true
remove-codeql: true
overprovision-lvm: true
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install nix
uses: cachix/install-nix-action@v26
with:
extra_nix_config: |
experimental-features = nix-command flakes
extra-platforms = aarch64-linux
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Register binfmt
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- name: Garbage collect build dependencies
run: nix-collect-garbage
- name: Fetch old system profile
run: nix build github:truxnell/nix-config#top.${{ matrix.target }} -v --log-format raw --profile ./profile
- name: Add new system to profile
run: |
set -o pipefail
nix build .#top.${{ matrix.target }} --profile ./profile --show-trace --fallback -v --log-format raw > >(tee stdout.log) 2> >(tee /tmp/nix-build-err.log >&2)
- name: Output build failure
if: failure()
run: |
drv=$(grep "For full logs, run" /tmp/nix-build-err.log | grep -oE "/nix/store/.*.drv")
if [ -n $drv ]; then
nix log $drv
echo $drv
fi
exit 1
- name: Diff profile
id: diff
run: |
nix profile diff-closures --profile ./profile
delimiter="$(openssl rand -hex 16)"
echo "diff<<${delimiter}" >> "${GITHUB_OUTPUT}"
nix profile diff-closures --profile ./profile | perl -pe 's/\e\[[0-9;]*m(?:\e\[K)?//g' >> "${GITHUB_OUTPUT}"
echo "${delimiter}" >> "${GITHUB_OUTPUT}"
- name: Scan for security issues
id: security
run: |
nix run nixpkgs/nixos-unstable#vulnix -- -w https://raw.githubusercontent.com/ckauhaus/nixos-vulnerability-roundup/master/whitelists/nixos-unstable.toml ./profile | tee /tmp/security.txt
OUTPUT_SECURITY="$(cat /tmp/security.txt)"
OUTPUT_SECURITY="${OUTPUT_SECURITY//'%'/'%25'}"
OUTPUT_SECURITY="${OUTPUT_SECURITY//$'\n'/'%0A'}"
OUTPUT_SECURITY="${OUTPUT_SECURITY//$'\r'/'%0D'}"
echo "$OUTPUT_SECURITY"
delimiter="$(openssl rand -hex 16)"
echo "security<<${delimiter}" >> "${GITHUB_OUTPUT}"
echo "$OUTPUT_SECURITY" >> "${GITHUB_OUTPUT}"
echo "${delimiter}" >> "${GITHUB_OUTPUT}"
- name: Comment report in pr
uses: marocchino/sticky-pull-request-comment@v2
if: ${{ !startswith(github.ref, 'dependabot') }}
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
header: ".#top.${{ matrix.target }}"
message: |
### Report for `${{ matrix.target }}`
<summary> Version changes </summary> <br>
<pre> ${{ steps.diff.outputs.diff }} </pre>
<details>
<summary> Security vulnerability report </summary> <br>
<pre> ${{ steps.security.outputs.security }} </pre>
</details>
# Liberated from edeneast's github

View file

@ -1,55 +0,0 @@
---
name: "Docs: Release to GitHub pages"
on:
workflow_dispatch:
push:
branches:
- main
paths:
- ".github/workflows/docs-release.yaml"
- ".mkdocs.yml"
- "docs/**"
permissions:
contents: write
jobs:
release-docs:
name: Release documentation
runs-on: ubuntu-22.04
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
steps:
- name: "Generate Short Lived OAuth App Token (ghs_*)"
uses: actions/create-github-app-token@v1.10.0
id: app-token
with:
app-id: "${{ secrets.TRUXNELL_APP_ID }}"
private-key: "${{ secrets.TRUXNELL_APP_PRIVATE_KEY }}"
- name: Checkout main branch
uses: actions/checkout@v4
with:
token: ${{ steps.app-token.outputs.token }}
fetch-depth: 0
- uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Install requirements
run: pip install -r docs/requirements.txt
- name: Build and publish docs
run: mkdocs build -f mkdocs.yml
- name: Deploy
uses: peaceiris/actions-gh-pages@v4.0.0
if: ${{ github.ref == 'refs/heads/main' }}
with:
github_token: ${{ steps.app-token.outputs.token }}
publish_dir: ./site
destination_dir: docs
user_name: "Trux-Bot[bot]"
user_email: "Trux-Bot[bot] <19149206+trux-bot[bot]@users.noreply.github.com>"

View file

@ -1,40 +0,0 @@
---
name: Nix Lint
on: [pull_request]
jobs:
check:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v26
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Install Nix Linting and Formatting Tools
run: nix-env -i statix nixpkgs-fmt -f '<nixpkgs>'
- name: Run Statix Lint
run: statix fix
- name: Run Nix Format
run: nix fmt
- name: Nix Flake Checker
# You may pin to the exact commit or the version.
# uses: DeterminateSystems/flake-checker-action@4b90f9fc724969ff153fe1803460917c84fe00a3
uses: DeterminateSystems/flake-checker-action@v5
- name: Commit
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: Auto lint/format

View file

@ -9,22 +9,15 @@
# copying one key to each machine # copying one key to each machine
keys: keys:
- &dns01 age1lj5vmr02qkudvv2xedfj5tq8x93gllgpr6tzylwdlt7lud4tfv5qfqsd5u - users:
- &dns02 age17edew3aahg3t5nte5g0a505sn96vnj8g8gqse8q06ccrrn2n3uysyshu2c - &jahanson age18kj3xhlvgjeg2awwku3r8d95w360uysu0w5ejghnp4kh8qmtge5qwa2vjp
- &citadel age1u4tht685sqg6dkmjyer96r93pl425u6353md6fphpd84jh3jwcusvm7mgk - hosts:
- &rickenbacker age1cp6vegrmqfkuj8nmt2u3z0sur7n0f7e9x9zmdv4zygp8j2pnucpsdkgagc - &telperion age1z3vjvkead2h934n3w4m5m7tg4tj5qlzagsq6ly84h3tcu7x4ldsqd3s5fg
- &shodan age1ekt5xz7u2xgdzgsrffhd9x22n80cn4thxd8zxjy2ey5vq3ca7gnqz25g5r
- &daedalus age1jpeh4s553taxkyxhzlshzqjfrtvmmp5lw0hmpgn3mdnmgzku332qe082dl
- &durandal age1j2r8mypw44uvqhfs53424h6fu2rkr5m7asl7rl3zn3xzva9m3dcqpa97gw
creation_rules: creation_rules:
- path_regex: .*\.sops\.yaml$ - path_regex: .*\.sops\.yaml$
key_groups: key_groups:
- age: - age:
- *dns01 - *jahanson
- *dns02 - *telperion
- *citadel
- *rickenbacker
- *shodan
- *daedalus
- *durandal

View file

@ -1,59 +1,35 @@
# Truxnell's homelab # jahanson's homelab
[![NixOS](https://img.shields.io/badge/NIXOS-5277C3.svg?style=for-the-badge&logo=NixOS&logoColor=white)](https://nixos.org)
[![NixOS](https://img.shields.io/badge/NixOS-23.11-blue?style=for-the-badge&logo=nixos&logoColor=white)](https://nixos.org)
[![MIT License](https://img.shields.io/github/license/truxnell/nix-config?style=for-the-badge)](https://github.com/truxnell/nix-config/blob/ci/LICENSE)
[![renovate](https://img.shields.io/badge/renovate-enabled-%231A1F6C?logo=renovatebot)](https://developer.mend.io/github/truxnell/nix-config)
[![built with garnix](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fgarnix.io%2Fapi%2Fbadges%2Ftruxnell%2Fnix-config%3Fbranch%3Dmain)](https://garnix.io)
![Code Comprehension](https://img.shields.io/badge/Code%20comprehension-26%25-red)
Leveraging nix, nix-os and other funny magic man words to apply machine and home configurations
[Repository Documentation](https://truxnell.github.io/nix-config/) [Repository Documentation](https://truxnell.github.io/nix-config/)
## Background
Having used a variety of infracture as code solutions - and having found them lacking in some areas, it is time to give nix a go.
Frustrations with other methods tend to be bitrot and config drift - very annoying to want to do a quick disaster recovery and find your have different versions of modules/utilities, breaking changes in code you didnt catch, etc.
## Getting started ## Getting started
To Install To Install
``` ```sh
# nixos-rebuild switch --flake github:truxnell/nix-config#HOST nixos-rebuild switch --flake github:jahanson/nix-config-tn#HOST
``` ```
## Goals ## Goals
- [ ] Learn nix - [ ] Learn nix
- [ ] Mostly reproduce features from my existing homelab - [ ] Services I want to separate from my kubernetes cluster I will use Nix.
- [ ] Replace existing ubuntu-based 'NAS' - [ ] Approval-based update automation for flakes.
- [ ] Expand usage to other shell environments such as WSL, etc - [ ] Expand usage to other shell environments such as WSL, etc
- [ ] handle secrets - decide between sweet and simple SOPS or re-use my doppler setup.
- [ ] keep it simple, use trusted boring tools - [ ] keep it simple, use trusted boring tools
## TODO ## TODO
- [ ] Github Actions update fly.io instances (Bitwarden) - [ ] Forgejo Actions
- [ ] Bring over hosts - [ ] Bring over hosts
- [x] DNS01 Raspi4 - [ ] git.hsn.dev
- [x] DNS02 Raspi4 - [ ] Telperion (network services)
- [x] NAS - [ ] Gandalf (NixNAS)
- [x] Latop - [ ] Thinkpad T470
- [x] Gaming desktop
- [ ] WSL
- [ ] JJY emulator Raspi4
- [x] Documentation!
- [x] ssh_config build from computers?
- [x] Modularise host to allow vm builds and hw builds
- [x] Add license
- [x] Add taskfiles
## Checklist ## Checklist
### Adding new node ### Adding a new node
- Ensure secrets are grabbed from note and all sops re-encrypte with task sops:re-encrypt - Ensure secrets are grabbed from note and all sops re-encrypte with task sops:re-encrypt
- Add to relevant github action workflows - Add to relevant github action workflows
@ -86,7 +62,7 @@ nix eval .#nixosConfigurations.rickenbacker.config.mySystem.security.wheelNeedsP
And browsing whats at a certain level in options - or just use [nix-inspect](https://github.com/bluskript/nix-inspect) TUI And browsing whats at a certain level in options - or just use [nix-inspect](https://github.com/bluskript/nix-inspect) TUI
```bash ```bash
nix eval .#nixosConfigurations.rickenbacker.config.home-manager.users.truxnell --apply builtins.attrNames --json nix eval .#nixosConfigurations.rickenbacker.config.home-manager.users.jahanson --apply builtins.attrNames --json
``` ```
Quickly run a flake to see what the next error message is as you hack. Quickly run a flake to see what the next error message is as you hack.

View file

@ -1,8 +0,0 @@
_[CI]: Continuous Integration
_[PR]: Pull Request
_[HASS]: Home-assistant
_[k8s]: Kubernetes
_[YAML]: Yet Another Markup Language
_[JSON]: JavaScript Object Notation
_[ZFS]: Originally 'Zettabyte File System', a COW filesystem.
_[COW]: Copy on Write

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 427 KiB

View file

@ -1,19 +0,0 @@
👋 Welcome to my NixoOS home and homelab configuration. This monorepo is my personal :simple-nixos: nix/nixos setup for all my devices, specifically my homelab.
This is the end result of a recovering :simple-kubernetes: k8s addict - who no longer enjoyed the time and effort I **personally** found it took to run k8s at home.
## Why?
Having needed a break from hobby's for some health related reasons, I found coming back to a unpatched cluster a chore, which was left unattented. Then a cheap SSD in my custom VyOS router blew, leading me to just put back in my Unifi Dreammachine router, which broke the custom DNS I was running for my cluster, which caused it issues.
During fixing the DNS issue, a basic software upgrade for the custom k8s OS I was running k8s on broke my cluster for the 6th time running, coupled with using a older version of the script tool I used to manage its machine config yaml, which ended up leading to my 6th k8s disaster recovery :octicons-info-16:{ title="No I don't want to talk about it" }).
Looking at my boring :simple-ubuntu: Ubuntu ZFS nas which just ran and ran and ran without needing TLC, and remembering the old days with Ubuntu + Docker Compose being hands-off :octicons-info-16:{ title="Too much hands off really as I auto-updated everything, but I digress" }), I dove into nix, with the idea of getting back to basics of boring proven tools, with the power of nix's declarative system.
## Goals
One of my goals is to bring what I learnt running k8s at home with some of the best homelabbers, into the nix world and see just how much of the practices I learnt I can apply to a nix setup, while focussing on having a solid, reliable, setup that I can leave largely unattended for months without issues cropping up.
The goal of this doc is for me to slow down a bit and jot down how and why I am doing what im doing in a module, and cover how I have approached the faucets of homelabbing, so **YOU** can understand, steal with pride from my code, and hopefully(?) learn a thing or two.
To _teach me_ a thing or two, contact me or raise a Issue. PR's may or may not be taken as a personal attack - this is my home setup after all.

View file

@ -1,182 +0,0 @@
## Getting ISO/Image
How to get nix to a system upfront can be done in different ways depending on how much manual work you plan to do for bootstrapping a system. Below is a few ways I've used, but obviously there will be multiple methods and this isn't a comprehensive list.
### Default ISO (2-step)
You can download the **minimal ISO** from [NixOS.org](https://nixos.org/download/) This gets you a basic installer running on tmpfs, which you can then open up for SSH, partition drives, `nixos-install` them, and then reboot into a basic system, ready to then do a second install for whatever config/flake you are doing.
### Custom ISO (1-step)
An alternative is to build a _custom ISO_ or image with a number of extra tools, ssh key pre-installed, etc. If you image a drive with this you get a bootable nixos install immediately, which I find useful for Raspberry Pi's
I have started down this path with the below code:
!!! note
The below nix for images is Work In Progress!
**[ISO Image (x86_64)](https://github.com/truxnell/nix-config/blob/main/nixos/hosts/images/cd-dvd/default.nix)**
**[SD Image (aarch64\_)](https://github.com/truxnell/nix-config/blob/main/nixos/hosts/images/sd-image/default.nix)**
From here, you can build them with `nix build`. I have mine in my flake so I could run `nix build .#images.rpi4`. Even better, you can also setup a Github Action to build these images for you and put them in a release for you to download.
### Graphical install
A graphical install is available but this isn't my cup of tea for a system so closely linked to declarative setup from configuration files. There are plenty of guides to guide through a graphical installer.
### Alternate tools (Automated)
You can look at tools like [disko](https://github.com/nix-community/disko) for declarative disk formatting and [nixos-anywhere](https://github.com/nix-community/nixos-anywhere) for installing NixOS, well, anywhere using features like `kexec` to bootstrap nix on any accessiable linux system.
This closes the loop of the manual steps you need to setup a NixOS system - I've chosen to avoid these as I don't want additional complexity in my installs and I'm OK with a few manual steps for the rare occasions I setup new devices.
## Booting into raw system
### Linux x86_64 systems
I'm a fan of [Ventoy.net](https://ventoy.net/en/index.html) for getting ISO's onto systems, as it allows you to load ISO files onto a USB, and from any system you can boot it up into a ISO selector - this way you can have one USB with many ISO's, including rescue-style ISO's ready to go.
### Virtual Machines
I follow the same approach as x86_64 above for testing Nix in VM's.
General settings below for a virtual machine- I stick with larger drives to ensure nix-store & compiling doesn't bite me. 16GB is probably OK too.
**VM Settings:**
**ISO:** nixos-minimal
**Hard:** Drive: 32GB
**RAM:** 2GB
**EFI:** Enable
!!! warning
Ensure you have EFI enabled or ensure you align your configuration.nix to your VM's BIOS setup, else you will have issues installing & booting.
For VM's, I then expose port 22 to allow SSH from local machine into vm - mapping host port 3022 to vm guest 22.
### aarch64 (RasPi)
I cant comment on Mac as im not a Apple guy, so this section is for my Raspberry Pi's. I build my custom image and flash it to a sd-card, then boot it - this way I already have ssh open and I can just ssh into it headless. If you use a minimal ISO you will need to have a monitor/keyboard attached, as by default SSH is not enabled in the default ISO until a root password is set.
### Other methods
You could also look at PXE/iPXE/network boot across your entire network so devices with no OS 'drop' into a NixOS live installer.
## Initial install from live image
### Opening SSH
If your live image isn't customised with a root password or ssh key, SSH will be closed until you set one.
```sh
sudo su
passwd
```
`sshd` is now running, so you can now ssh into the vm remotely for the rest of the setup if you prefer
### Partitioning drives
### Standard
Next step is to partition drives. This will vary per host, so `lsblk`, viewing disks in `/dev/`, `/dev/disk/by-id`, `parted` and at times your eyeballs will be useful to identify what drive to partition
Below is a fairly standard setup with a 512MB boot partition, 8GB swap and the rest ext4. If you have the RAM a swap partition is unlikely to be needed.
```sh
# Partitioning
parted /dev/sda -- mklabel gpt
parted /dev/sda -- mkpart root ext4 512MB -8GB
parted /dev/sda -- mkpart swap linux-swap -8GB 100%
parted /dev/sda -- mkpart ESP fat32 1MB 512MB
parted /dev/sda -- set 3 esp on
```
And then a little light formatting.
```sh
# Formatting
mkfs.ext4 -L nixos /dev/sda1
mkswap -L swap /dev/sda2 # if swap setup
mkfs.fat -F 32 -n boot /dev/sda3
```
We will want to mount these ready for the config generation (which will look at the current mount setup and output config for it)
```sh
# Mounting disks for installation
mount /dev/disk/by-label/nixos /mnt
mkdir -p /mnt/boot
mount /dev/disk/by-label/boot /mnt/boot
swapon /dev/sda2 # if swap setup
```
### Impermanence (ZFS)
TBC
### Generating initial nixos config
If this is a fresh machine you've never worked with & **dont have a config ready to push to it**, you'll need to generate a config to get started
The below will generate a config based on the current setup of your machine, and output it to the /mnt folder.
```
# Generating default configuration
nixos-generate-config --root /mnt
```
This will output `configuration.nix` and `hardware-config.nix`. `configuration.nix` contains a boilerplate with some basics to create a bootable system, mainly importing hardware-config.
`hardware-config.nix` contains the nix code to setup the kernel on boot with a expected list of modules based on your systems capabilities, as well as the nix to mount the drives as currently setup.
As I gitops my files, I then copy the hardware-config to my machine, and then copy across a bootstrap configuration file with some basics added for install.
```sh
scp -P 3022 nixos/hosts/bootstrap/configuration.nix root@127.0.0.1:/mnt/etc/nixos/configuration.nix
scp -P 3022 root@127.0.0.1:/mnt/etc/nixos/hardware-configuration.nix nixos/hosts/nixosvm/hardware-configuration.nix
```
### Installing nix
```sh
nixos-install
reboot
# after machine has rebooted
nixos-rebuild switch
```
Set the password for the user that was created.
Might need to use su?
```sh
passwd truxnell
```
Also grab the ssh keys and re-encrypt sops
```sh
cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age
```
then run task
Login as user, copy nix git OR for remote machines/servers just `nixos-install --impure --flake github:truxnell/nix-config#<MACHINE_ID>`
```sh
mkdir .local
cd .local
git clone https://github.com/truxnell/nix-config.git
cd nix-config
```
Apply config to bootstrapped device
First time around, MUST APPLY <machinename> with name of host in ./hosts/
This is because `.. --flake .` looks for a `nixosConfigurations` key with the machines hostname
The bootstrap machine will be called 'nixos-bootstrap' so the flake by default would resolve `nixosConfigurations.nixos-bootstrap`
Subsequent rebuilds can be called with the default command as after first build the machines hostname will be changed to the desired machine
```sh
nixos-rebuild switch --flake .#<machinename>
```

View file

@ -1,136 +0,0 @@
# Backups
Nightly Backups are facilitated by NixOS's module for [restic](https://search.nixos.org/options?channel=23.11&from=0&size=50&sort=relevance&type=packages&query=services.restic.) module and a helper module ive written.
This does a nightly ZFS snapshot, in which apps and other mutable data is restic backed up to both a local folder on my NAS and also to Cloudflare R2 :octicons-info-16:{ title="R2 mainly due to the cheap cost and low egrees fees" }. Backing up from a ZFS snapshot ensures that the restic backup is consistent, as backing up files in use (especially a sqlite database) will cause corruption. Here, all restic jobs are backing up as per the 2.05 snapshot, regardless of when they run that night.
Another benefit of this approach is that it is service agnostic - containers, nixos services, qemu, whatever all have files in the same place on the filesystem (in the persistant folder) so they can all be backed up in the same fashion.
The alternative is to shutdown services during backup (which could be facilitaed with the restic backup pre/post scripts) but ZFS snapshots are a godsend in this area, and im already running them for impermanence.
!!! info "Backing up without snapshots/shutdowns?"
This is a pattern I see a bit too - if you are backing up files raw without stopping your service beforehand you might want to check to ensure your backups aren't corrupted.
The timeline then is:
| time | activity |
| ------------- | -------------------------------------------------------------------------------------------------------------------------------- |
| 02.00 | ZFS deletes prior snapshot and creates new one, to `rpool/safe/persist@restic_nightly_snap` |
| 02.05 - 04.05 | Restic backs up from new snapshot's hidden read-only mount `.zfs` with random delays per-service - to local and remote locations |
## Automatic Backups
I have added a sops secret for both my local and remote servers in my restic module :simple-github: [/nixos/modules/nixos/services/restic/](https://github.com/truxnell/nix-config/blob/main/nixos/modules/nixos/services/restic/default.nix). This provides the restic password and 'AWS' credentials for the S3-compatible R2 bucket.
Backups are created per-service in each services module. This is largely done with a `lib` helper ive written, which creates both the relevant restic backup local and remote entries in my nixosConfiguration.
:simple-github: [nixos/modules/nixos/lib.nix](https://github.com/truxnell/nix-config/blob/main/nixos/modules/nixos/lib.nix)
!!! question "Why not backup the entire persist in one hit?"
Possibly a hold over from my k8s days, but its incredibly useful to be able to restore per-service, especially if you just want to move an app around or restore one app. You can always restore multiple repos with a script/taskfile.
NixOS will create a service + timer for each job - below shows the output for a prowlarr local/remote backup.
```bash
# Confirming snapshot taken overnight - we can see 2AM
truxnell@daedalus ~> systemctl status restic_nightly_snapshot.service
○ restic_nightly_snapshot.service - Nightly ZFS snapshot for Restic
Loaded: loaded (/etc/systemd/system/restic_nightly_snapshot.service; linked; preset: enabled)
Active: inactive (dead) since Wed 2024-04-17 02:00:02 AEST; 5h 34min ago
Duration: 61ms
TriggeredBy: ● restic_nightly_snapshot.timer
Process: 606080 ExecStart=/nix/store/vd0pr3la91pi0qhmcn7c80rwrn7jkpx9-unit-script-restic_nightly_snapshot-start/bin/restic_nightly_snapshot-start (code=exited, status=0/SUCCESS)
Main PID: 606080 (code=exited, status=0/SUCCESS)
IP: 0B in, 0B out
CPU: 21ms
# confirming local snapshot occured - we can see 05:05AM
truxnell@daedalus ~ [1]> sudo restic-prowlarr-local snapshots
repository 9d9bf357 opened (version 2, compression level auto)
ID Time Host Tags Paths
---------------------------------------------------------------------------------------------------------------------
293dad23 2024-04-15 19:24:37 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
24938fe8 2024-04-16 12:42:50 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
442d4de3 2024-04-17 05:05:04 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
---------------------------------------------------------------------------------------------------------------------
3 snapshots
# confirming remote snapshot occured - we can see 4:52AM
truxnell@daedalus ~> sudo restic-prowlarr-remote snapshots
repository 30b7eef0 opened (version 2, compression level auto)
ID Time Host Tags Paths
---------------------------------------------------------------------------------------------------------------------
e7d933c4 2024-04-15 22:07:09 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
aa605c6b 2024-04-16 02:39:47 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
68f91a20 2024-04-17 04:52:59 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
---------------------------------------------------------------------------------------------------------------------
3 snapshots
```
NixOS (as of 23.05 IIRC) now provides shims to enable easy access to the restic commands with the correct env vars mounted same as the service.
```bash
truxnell@daedalus ~ [1]> sudo restic-prowlarr-local snapshots
repository 9d9bf357 opened (version 2, compression level auto)
ID Time Host Tags Paths
---------------------------------------------------------------------------------------------------------------------
293dad23 2024-04-15 19:24:37 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
24938fe8 2024-04-16 12:42:50 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
---------------------------------------------------------------------------------------------------------------------
2 snapshots
```
## Manually backing up
They are a systemd timer/service so you can query or trigger a manual run with `systemctl start restic-backups-<service>-<destination>` Local and remote work and function exactly the same, querying remote it just a fraction slower to return information.
```bash
truxnell@daedalus ~ > sudo systemctl start restic-backups-prowlarr-local.service
< no output >
truxnell@daedalus ~ [1]> sudo restic-prowlarr-local snapshots
repository 9d9bf357 opened (version 2, compression level auto)
ID Time Host Tags Paths
---------------------------------------------------------------------------------------------------------------------
293dad23 2024-04-15 19:24:37 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
24938fe8 2024-04-16 12:42:50 daedalus /persist/.zfs/snapshot/restic_nightly_snap/containers/prowlarr
---------------------------------------------------------------------------------------------------------------------
2 snapshots
truxnell@daedalus ~> date
Tue Apr 16 12:43:20 AEST 2024
truxnell@daedalus ~>
```
## Restoring a backup
Testing a restore (would do --target / for a real restore)
Would just have to pause service, run restore, then re-start service.
```bash
truxnell@daedalus ~ [1]> sudo restic-lidarr-local restore --target /tmp/lidarr/ latest
repository a2847581 opened (version 2, compression level auto)
[0:00] 100.00% 2 / 2 index files loaded
restoring <Snapshot b96f4b94 of [/persist/nixos/lidarr] at 2024-04-14 04:19:41.533770692 +1000 AEST by root@daedalus> to /tmp/lidarr/
Summary: Restored 52581 files/dirs (11.025 GiB) in 1:37
```
## Failed backup notifications
Failed backup notifications are baked-in due to the global Pushover notification on SystemD unit falure. No config nessecary
Here I tested it by giving the systemd unit file a incorrect path.
<figure markdown="span">
![Screenshot of a pushover notification of a failed backup](../includes/assets/pushover-failed-backup.png)
<figcaption>A deliberately failed backup to test notifications, hopefully I don't see a real one.</figcaption>
</figure>
## Disabled backup warnings
Using [module warnings](https://nlewo.github.io/nixos-manual-sphinx/development/assertions.xml.html), I have also put in warnings into my NixOS modules if I have disabled a warning on a host _that isnt_ a development machine, just in case I do this or mixup flags on hosts. Roll your eyes, I will probably do it.
This will pop up when I do a dry run/deployment - but not abort the build.
<figure markdown="span">
![Screenshoft of nixos warning of disabled backups](../includes/assets/no-backup-warning.png)
<figcaption>It is eye catching thankfully</figcaption>
</figure>

View file

@ -1,122 +0,0 @@
# Software updates
Its crucial to update software regularly - but a homelab isn't a google play store you forget about and let it do its thing. How do you update your software stack regular without breaking things?
## Continuous integration
Continuous integration (CI) is running using :simple-githubactions: [Github Actions](https://github.com/features/actions) and [Garnix](https://Garnix.io). I have enabled branch protection rules to ensure all my devices successfully build before a PR is allowed to be pushed to main. This ensures I have a level of testing/confidence that an update of a device from the main branch will not break anything.
<figure markdown="span">
![Screenshot of passed CI checks on GitHub Repository](../includes/assets/ci-checks.png)
<figcaption>Lovely sea of green passed checks</figcaption>
</figure>
## Binary Caching
Binary caching is done for me by [Garnix](https://Garnix.io) which is an amazing tool. I can then add them as [substituter](https://wiki.nixos.org/wiki/Binary_Cache#Binary_cache_hint_in_Flakes). These run each push to _any_ branch and cache the build results for me. Even better, I can hook into them as above for CI purposes.
No code to show here, you add it as an app to your github repo and it 'Just Works :tm:'
```nix
# Substitutions
substituters = [ "https://cache.garnix.io" ];
trusted-public-keys = [
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
];
```
<figure markdown="span">
![Screenshot of Garnix Cache build tests passing](../includes/assets/ci-checks-garnix.png)
<figcaption>Lovely sea of green passed checks</figcaption>
</figure>
## Flake updates
Github repo updates are provided by :simple-renovatebot: [Renovate](https://www.mend.io/renovate/) by [Mend](https://mend.io). These are auto-merged on a weekly schedule after passing CI. The settings can be found at :simple-github: [/main/.github/renovate.json5](https://github.com/truxnell/nix-config/blob/main/.github/renovate.json5)
The primary CI is a Garnix build, which Is already building and caching all my systems. Knowing all of the systems have built and cached goes a huge way toward ensuring main is a stable branch.
## Docker container updates
Container updates are provided by :simple-renovatebot: [Renovate](https://www.mend.io/renovate/) by [Mend](https://mend.io). These will either be manually merged after I have checked the upstream projects notes for breaking changes _or_ auto-merged based on settings I have in :simple-github: [/.github/renovate/autoMerge.json5](https://github.com/truxnell/nix-config/blob/dev2/.github/renovate/autoMerge.json5).
!!! info "Semantic Versioning summary"
Semantic Versioning blurb is a format of MAJOR.MINOR.PATCH:<br>
MAJOR version when you make incompatible API changes (e.g. 1.7.8 -> 2.0.0)<br>
MINOR version when you add functionality in a backward compatible manner (e.g. 1.7.8 -> 1.8.0)<br>
PATCH version when you make backward compatible bug fixes (e.g. 1.7.8 -> 1.7.9)<br>
The auto-merge file allows me to define a pattern of which packages I want to auto-merge based on the upgrade type Renovate is suggesting. As many packages adhere to [Semantic Versioning](https://semver.org/ "A standard for version numbers to indicate type of upgrade"), I can determine how I 'feel' about the project, and decide to auto-merge specific tags. So for example, Sonarr has been reliable for me so I am ok merging all digest, patch and minor updates. I will always review a a major update, as it is likely to contain a breaking change.
!!! warning "Respect pre-1.0.0 software!"
Semantic Versioning also specifies that all software before 1.0.0 may have a breaking change **AT ANY TIME**. Auto update pre 1.0 software at your own risk!
The rational here is twofold. One is obvious - The entire point of doing Nix is reproducibility - what is the point of having flakes and SHA tags to provide the ability
Also, I dont wan't a trillion PR's in my github repo waiting, but I also will not blindly update everything. There is **a balance** between updating for security/patching purposes and avoiding breaking changes. I know its popular to use `:latest` tag and a auto-update service like [watchtower](https://github.com/containrrr/watchtower) - trust me this is a bad idea.
<figure markdown="span">
![Alt text](../includes/assets/home-cluster-pr.png)
<figcaption>I only glanced away from my old homelab for a few months...</figcaption>
</figure>
!!! info "Automatically updating **all versions** of containers will break things eventually!"
This is simply because projects from time to time will release breaking changes - totally different database schemas, overhaul config, replace entire parts of their software stack etc. If you let your service update totally automatically without checking for these you will wake up to a completely broken service like I did many, many years ago when Seafile did a major upgrade.
Container updates are provided by a custom regex that matches my format for defining images in my nix modules.
```yaml
"regexManagers": [
{
fileMatch: ["^.*\\.nix$"],
matchStrings: [
'image *= *"(?<depName>.*?):(?<currentValue>.*?)(@(?<currentDigest>sha256:[a-f0-9]+))?";',
],
datasourceTemplate: "docker",
}
],
```
And then I can pick and choose what level (if any) I want for container software. The below gives me brackets I can put containers in to enable auto-merge depending on how much I much i trust the container maintainer.
```yaml
"packageRules": [
{
// auto update up to major
"matchDatasources": ['docker'],
"automerge": "true",
"automergeType": "branch",
"matchUpdateTypes": [ 'minor', 'patch', 'digest'],
"matchPackageNames": [
'ghcr.io/onedr0p/sonarr',
'ghcr.io/onedr0p/readarr',
'ghcr.io/onedr0p/radarr',
'ghcr.io/onedr0p/lidarr',
'ghcr.io/onedr0p/prowlarr'
'ghcr.io/twin/gatus',
]
},
// auto update up to minor
{
"matchDatasources": ['docker'],
"automerge": "true",
"automergeType": "branch",
"matchUpdateTypes": [ 'patch', 'digest'],
"matchPackageNames": [
"ghcr.io/gethomepage/homepage",
]
}
]
```
Which results in automated PR's being raised - and **possibly** automatically merged into main if CI passes.
<figure markdown="span">
![Alt text](../includes/assets/renovate-pr.png)
<figcaption>Thankyou RenovateBot!</figcaption>
</figure>

View file

@ -1,89 +0,0 @@
# SystemD pushover notifications
Keeping with the goal of simple, I put together a `curl` script that can send me a pushover alert. I originally tied this to individual backups, until I realised how powerful it would be to just have it tied to every SystemD service globally.
This way, I would never need to worry or consider _what_ services are being created/destroyed and repeating myself _ad nauseam_.
!!! question "Why not Prometheus?"
I ran Prometheus/AlertManager for many years and well it can be easy to get TOO many notifications depending on your alerts, or to have issues with the big complex beast it is itself, or have alerts that trigger/reset/trigger (i.e. HDD temps).
This gives me native, simple notifications I can rely on using basic tools - one of my design principles.
Immediately I picked up with little effort:
- Pod ~~crashloop~~ failed after too many quick restarts
- Native service failure
- Backup failures
- AutoUpdate failure
- etc
<figure markdown="span">
![Screenshot of Cockpit web ui showing various pushover notification units](../includes/assets/cockpit-systemd-notifications.png)
<figcaption>NixOS SystemD built-in notifications for all occasions</figcaption>
</figure>
## Adding to all services
This is accomplished in :simple-github:[/nixos/modules/nixos/system/pushover](https://github.com/truxnell/nix-config/blob/main/nixos/modules/nixos/system/pushover/default.nix), with a systemd service `notify-pushover@`.
This can then be called by other services, which I setup with adding into my options:
```nix
options.systemd.services = mkOption {
type = with types; attrsOf (
submodule {
config.onFailure = [ "notify-pushover@%n.service" ];
}
);
```
This adds into every systemd NixOS generates the "notify-pushover@%n.service", where the [systemd specifiers](https://www.freedesktop.org/software/systemd/man/latest/systemd.unit.html#Specifiers") are injected with `scriptArgs`, and the simple bash script can refer to them as `$1` etc.
```nix
systemd.services."notify-pushover@" = {
enable = true;
onFailure = lib.mkForce [ ]; # cant refer to itself on failure (1)
description = "Notify on failed unit %i";
serviceConfig = {
Type = "oneshot";
# User = config.users.users.truxnell.name;
EnvironmentFile = config.sops.secrets."services/pushover/env".path; # (2)
};
# Script calls pushover with some deets.
# Here im using the systemd specifier %i passed into the script,
# which I can reference with bash $1.
scriptArgs = "%i %H"; # (3)
# (4)
script = ''
${pkgs.curl}/bin/curl --fail -s -o /dev/null \
--form-string "token=$PUSHOVER_API_KEY" \
--form-string "user=$PUSHOVER_USER_KEY" \
--form-string "priority=1" \
--form-string "html=1" \
--form-string "timestamp=$(date +%s)" \
--form-string "url=https://$2:9090/system/services#/$1" \
--form-string "url_title=View in Cockpit" \
--form-string "title=Unit failure: '$1' on $2" \
--form-string "message=<b>$1</b> has failed on <b>$2</b><br><u>Journal tail:</u><br><br><i>$(journalctl -u $1 -n 10 -o cat)</i>" \
https://api.pushover.net/1/messages.json 2&>1
'';
```
1. Force exclude this service from having the default 'onFailure' added
2. Bring in pushover API/User ENV vars for script
3. Pass SystemD specifiers into script
4. Er.. script. Nix pops it into a shell script and refers to it in the unit.
!!! bug
I put in a nice link direct to Cockpit for the specific machine/service in question that doesnt _quite_ work yet... (:octicons-issue-opened-16: [#96](https://github.com/truxnell/nix-config/issues/96))
## Excluding from a services
Now we may not want this on ALL services. Especially the pushover-notify service itself. We can exclude this from a service using Nix `nixpkgs.lib.mkForce`
```nix
# Over-write the default pushover
systemd.services."service".onFailure = lib.mkForce [ ] option.
```

View file

@ -1,33 +0,0 @@
I've added warnings and assertations to code using nix to help me avoid misconfigurations. For example, if a module needs a database enabled, it can abort a deployment if it is not enabled. Similary, I have added warnings if I have disabled backups for production machines.
!!! question "But why, when its not being shared with others?"
Because I guarentee ill somehow stuff it up down the track and accidently disable things I didnt mean to. Roll your eyes, Ill thank myself later.
> Learnt from: [Nix Manual](https://nlewo.github.io/nixos-manual-sphinx/development/assertions.xml.html)
## Warnings
Warnings will print a warning message duyring a nix build or deployment, but **NOT** stop the action. Great for things like reminders on disabled features
To add a warning inside a module:
```nix
# Warn if backups are disable and machine isn't a dev box
config.warnings = [
(mkIf (!cfg.local.enable && config.mySystem.purpose != "Development")
"WARNING: Local backups are disabled!")
(mkIf (!cfg.remote.enable && config.mySystem.purpose != "Development")
"WARNING: Remote backups are disabled!")
];
```
<figure markdown="span">
![Alt text](../includes/assets/no-backup-warning.png)
<figcaption>Oh THATS what I forgot to re-enable...</figcaption>
</figure>
## Abort/assert
Warnings bigger and meaner brother. Stops a nix build/deploy dead in its tracks. Only useful for when deployment is incompatiable with running - i.e. a dependency not met in options.

View file

@ -1,6 +0,0 @@
Zed monitoring can also send to pushover!
<figure markdown="span">
![Alt text](../includes/assets/zed_alert.png)
<figcaption>Come on these drives are hardly 12months old</figcaption>
</figure>

View file

@ -1,48 +0,0 @@
# Message of the day
Why not include a nice message of the day for each server I log into?
The below gives some insight into what the servers running, status of zpools, usage, etc.
While not show below - thankfully - If a zpool error is found the status gives a full `zpool status -x` debrief which is particulary eye-catching upon login.
I've also squeezed in a 'reboot required' flag for when the server had detected its running kernel/init/systemd is a different version to what it booted with - useful to know when long running servers require a reboot to pick up new kernel/etc versions.
<figure markdown="span">
![Screenshot of message of the day prompt on login to server](../includes/assets/motd.png)
<figcaption>Message of the day</figcaption>
</figure>
Code TLDR
:simple-github:[/nixos/modules/nixos/system/motd](https://github.com/truxnell/nix-config/blob/462144babe7e7b2a49a985afe87c4b2f1fa8c3f9/nixos/modules/nixos/system/motd/default.nix])
Write a shell script using nix with a bash motd of your choosing.
```nix
let
motd = pkgs.writeShellScriptBin "motd"
''
#! /usr/bin/env bash
source /etc/os-release
service_status=$(systemctl list-units | grep podman-)
<- SNIP ->
printf "$BOLDService status$ENDCOLOR\n"
'';
in
```
This gets us a shells script we can then directly call into systemPackages - and after that its just a short hop to make this part of the shell init.
!!! note
Replace with your preferred shell!
```nix
environment.systemPackages = [
motd
];
programs.fish.interactiveShellInit = ''
motd
'';
```

View file

@ -1,3 +0,0 @@
2 x adguard -> powerdns (authoritive) -> (quad9 || mullvad)
note reverse dns (in.arpa) and split brain setup.
dnssec

View file

@ -1,15 +0,0 @@
# DNS & DHCP
!!! info "TLDR"
External DNS: Client -> Adguard Home (r->
My DNS has evolved and changed over time, especially with a personal desire to keep my entire internet backbone boring and standard off a trusted vendor. 'Why cant I connect to my Minecraft server' and 'Are you playing with the internet again' are questions I don't want to have to answer in this house.
Sadly, while I do love my Unifi Dream Machine Pro, its DNS opportunity is lackluster and I really prefer split-dns so I don't have to access everything with ip:port.
# General
<DNS IMAGE>
My devices all use the Unifi DHCP server to get addresses, which I much prefer so I maintain all my clients in the single-pane-of-glass the UDMP provides. In the DHCP options, I add the

View file

@ -1,12 +0,0 @@
# Design principles
Taking some lead from the [Zen of Python](https://peps.python.org/pep-0020/):
- Minimise dependencies, where required, explicitly define dependencies
- Use plain Nix & bash to solve problems over additional tooling
- Stable channel for stable machines. Unstable only where features are important.
- Modules for a specific service - Profiles for broad configuration of state.
- Write readable code - descriptive variable names and modules
- Keep functions/dependencies within the relevant module where possible
- Errors should never pass silently - use assert etc for misconfigurations
- Flat is better than nested - use built-in functions like map, filter, and fold to operate on lists or sets

View file

@ -1,10 +0,0 @@
# Features
Some things I'm proud of. Or just happy they exist so I can forget about something until I need to worry.
<div class="grid cards" markdown>
- :octicons-copy-16: [__Nightly Backups__](/maintenance/backups/)<br>A ZFS snapshot is done at night, with restic then backing up to both locally and cloud. NixOS wrappers make restoring a single command line entry.<br><br>ZFS snapshot before backup is important to ensure restic isnt backing up files that are in use, which would cause corruption.
- :material-update: [__Software Updates__](/maintenance/software_updates/)<br>Renovate Bot regulary runs on this Github repo, updating the flake lockfile, containers and other dependencies automatically.<br><br> Automerge is enabled for updates I expect will be routine, but waits for manual PR approval for updates I suspect may require reading changelog for breaking changes
- :ghost: __Impermance__:<br>Inspried by the [Erase your Darlings](https://grahamc.com/blog/erase-your-darlings/) post, Servers run zfs and rollback to a blank snapshot at night. This ensures repeatable NixOS deployments and no cruft, and also hardens servers a little.
- :material-alarm-light: __SystemD Notifications__:<br>Systemd hook that adds a pushover notification to __any__ systemd unit failure for any unit NixOS is aware of. No worrying about forgetting to add a notification to every new service or worrying about missing one.
</div>

View file

@ -1,19 +0,0 @@
# Goals
When I set about making this lab I had a number of goals - I wonder how well I will do :thinking:?
A master list of ideas/goals/etc can be found at :octicons-issue-tracks-16: [Issue #1](https://github.com/truxnell/nix-config/issues/1)
<div class="grid cards" markdown>
- __:material-sword: Stability__ <br>NixOS stable channel for core services unstable for desktop apps/non-mission critical where desired. Containers with SHA256 pinning for server apps
- __:kiss: KISS__<br>Keep it Simple, use boring, reliable, trusted tools - not todays flashy new software repo
- __:zzz: Easy Updates__<br>Weekly update schedule, utilizing Renovate for updating lockfile and container images. Autoupdates enabled off main branch for mission critical. Aim for 'magic rollback' on upgrade failure
- __:material-cloud-upload: Backups__<br>Nightly restic backups to both cloud and NAS. All databases to have nightly backups. _Test backups regulary_
- __:repeat: Reproducability__<br>Flakes & Git for version pinning, SHA256 tags for containers.
- __:alarm_clock: Monitoring__<br>Automated monitoring on failure & critical summaries, using basic tools. Use Gatus for both internal and external monitoring
- __:clipboard: Continuous Integration__<br>CI against main branch to ensure all code compiles OK. Use PR's to add to main and dont skip CI due to impatience
- __:material-security: Security__<br>Dont use containers with S6 overlay/root (i.e. LSIO :grey_question:{ title="LSIO trades security for convenience with their container configuration" }). Expose minimal ports at router, Reduce attack surface by keeping it simple, review hardening containers/podman/NixOS
- __:fontawesome-solid-martini-glass-citrus: Ease of administration__<br>Lean into the devil that is SystemD - and have one standard interface to see logs, manipulate services, etc. Run containers as podman services, and webui's for watching/debugging
- __:simple-letsencrypt: Secrets__ _~ssshh~.._<br>[Sops-nix](https://github.com/Mic92/sops-nix) for secrets, living in my gitrepo. Avoid cloud services like I used in k8s (i.e. [Doppler.io](https://doppler.io))
</div>

View file

@ -1,9 +0,0 @@
Removed complexity
- external secrets -> bog standard sops
- HA file storage -> standard file system
- HA database cluster -> nixos standard cluster
- Database user operator -> nixos standard ensure_users
- Database permissions operator -> why even??
- secrets reloader -> sops restart_unit
- easier managment, all services run through systemd for consistency, cockpit makes viewing logs/pod console etc easy.

View file

@ -1 +0,0 @@
Explain mySystem and myHome

View file

@ -1,34 +0,0 @@
# Repository Structure
!!! note inline end
Oh god writing this now is a horrid idea, I always refactor like 50 times...
Here is a bit of a walkthrough of the repository structure so ~~you~~ I can have a vague idea on what is going on. Organizing a monorepo is hard at the best of times.
<br><br><br>
```
├── .github
│ ├── renovate Renovate modules
│ ├── workflows Github Action workflows (i.e. CI/Site building)
│ └── renovate.json5 Renovate core settings
├── .taskfiles go-task file modules
├── docs This mkdocs-material site
│ nixos Nixos Modules
│ └── home home-manager nix files
│ ├── modules home-manager modules
│ └── truxnell home-manager user
│ ├── hosts hosts for nix - starting point of configs.
│ ├── modules nix modules
│ ├── overlays nixpkgs overlays
│ ├── pkgs custom nix packages
│ └── profiles host profiles
├── README.md Github Repo landing page
├── flake.nix Core flake
├── flake.lock Lockfile
├── LICENSE Project License
├── mkdocs.yml mkdocs settings
└── Taskfile.yaml go-task core file
```
Whew that wasnt so hard right... right?

View file

@ -1,6 +0,0 @@
mkdocs~=1.5,>=1.5.3
mkdocs-material~=9.4
mkdocs-material-extensions~=1.2
pygments~=2.16
pymdown-extensions~=10.2
mkdocs-minify-plugin~=0.7

View file

@ -1,3 +0,0 @@
## Container images
Dont use LSIO!

View file

@ -1,8 +0,0 @@
* Dont make conditional imports (nix needs to resolve imports upfront)
* can pass between nixos and home-manager with config.homemanager.users.<X>.<y> and osConfig.<x?
* when adding home-manager to existing setup, the home-manager service may fail due to trying to over-write existing files in `~`. Deleting these should allow the service to start
* yaml = json, so using nix + builtins.toJSON a lot (and repl to vscode for testing)
checking values:
# https://github.com/NixOS/nixpkgs/blob/90055d5e616bd943795d38808c94dbf0dd35abe8/nixos/modules/config/users-groups.nix#L116

View file

@ -1,3 +0,0 @@
## Why not recurse the module folder
Imports are special in NIX and its important that they are defined at runtime for lazy evaluation - if you do optional/coded imports not everything is available for evaluating.

View file

@ -1,4 +0,0 @@
- need to save ssh keys on reboot
- else you end up with sops issues & ssh known_key changes every reboot
- need to sort out password

View file

@ -1,92 +0,0 @@
## Installing a playground VM
I've used gnome-boxes from my current Fedora laptop for running playground vm's.
Settings:
ISO: nixos-minimal
Hard drive: 32GB
RAM: 2GB
EFI: Enable
Expose port 22 to allow ssh into vm (host port 3022, guest 22)
```sh
# set temp root passwd
sudo su
passwd
```
`sshd` is already running, so you can now ssh into the vm remotely for the rest of the setup.
`ssh root@127.0.0.1 -p 3022`
```sh
# Partitioning
parted /dev/sda -- mklabel gpt
parted /dev/sda -- mkpart root ext4 512MB -8GB
parted /dev/sda -- mkpart swap linux-swap -8GB 100%
parted /dev/sda -- mkpart ESP fat32 1MB 512MB
parted /dev/sda -- set 3 esp on
# Formatting
mkfs.ext4 -L nixos /dev/sda1
mkswap -L swap /dev/sda2
mkfs.fat -F 32 -n boot /dev/sda3
# Mounting disks for installation
mount /dev/disk/by-label/nixos /mnt
mkdir -p /mnt/boot
mount /dev/disk/by-label/boot /mnt/boot
swapon /dev/sda2
# Generating default configuration
nixos-generate-config --root /mnt
```
From this config copy the bootstrap configuration and fetch the hardware configuration.
```sh
scp -P 3022 nixos/hosts/bootstrap/configuration.nix root@127.0.0.1:/mnt/etc/nixos/configuration.nix
scp -P 3022 root@127.0.0.1:/mnt/etc/nixos/hardware-configuration.nix nixos/hosts/nixosvm/hardware-configuration.nix
```
Then back to the VM
```sh
nixos-install
reboot
nixos-rebuild switch
```
Set the password for the user that was created.
Might need to use su?
```sh
passwd truxnell
```
Also grab the ssh keys and re-encrypt sops
```sh
cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age
```
then run task
Login as user, copy nix git OR for remote machines/servers just `nixos-install --impure --flake github:truxnell/nix-config#<MACHINE_ID>`
```sh
mkdir .local
cd .local
git clone https://github.com/truxnell/nix-config.git
cd nix-config
```
Apply config to bootstrapped device
First time around, MUST APPLY <machinename> with name of host in ./hosts/
This is because `.. --flake .` looks for a `nixosConfigurations` key with the machines hostname
The bootstrap machine will be called 'nixos-bootstrap' so the flake by default would resolve `nixosConfigurations.nixos-bootstrap`
Subsequent rebuilds can be called with the default command as after first build the machines hostname will be changed to the desired machine
```sh
nixos-rebuild switch --flake .#<machinename>
```

View file

@ -1,56 +0,0 @@
> https://grahamc.com/blog/erase-your-darlings/
# Get hostid
run `head -c 8 /etc/machine-id`
and copy into networking.hostId to ensure ZFS doesnt get borked on reboot
# Partitioning
parted /dev/sda -- mklabel gpt
parted /dev/sda -- mkpart root ext4 512MB -8GB
parted /dev/sda -- mkpart ESP fat32 1MB 512MB
parted /dev/sda -- set 2 esp on
# Formatting
mkswap -L swap /dev/sdap2
swapon /dev/sdap2
mkfs.fat -F 32 -n boot /dev/sdap3
# ZFS on root partition
zpool create -O mountpoint=none rpool /dev/sdap1
zfs create -p -o mountpoint=legacy rpool/local/root
## immediate blank snapshot
zfs snapshot rpool/local/root@blank
mount -t zfs rpool/local/root /mnt
# Boot partition
mkdir /mnt/boot
mount /dev/sdap3 /mnt/boot
#mk nix
zfs create -p -o mountpoint=legacy rpool/local/nix
mkdir /mnt/nix
mount -t zfs rpool/local/nix /mnt/nix
# And a dataset for /home: if needed
zfs create -p -o mountpoint=legacy rpool/safe/home
mkdir /mnt/home
mount -t zfs rpool/safe/home /mnt/home
zfs create -p -o mountpoint=legacy rpool/safe/persist
mkdir /mnt/persist
mount -t zfs rpool/safe/persist /mnt/persist
Set ` networking.hostid`` in the nixos config to `head -c 8 /etc/machine-id`
nixos-install --impure --flake github:truxnell/nix-config#<MACHINE_ID>
consider a nixos-enter to import a zpool if required (for NAS) instead of rebooting post-install

View file

@ -1,9 +0,0 @@
# Generate age key per machine
On new machine, run below to transfer its shiny new ed25519 to age
```sh
nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
```
Copy this into `./.sops.yaml` in base repo, then re-run taskfile `task sops:re-encrypt` to loop through all sops keys, decrypt then re-encrypt

141
flake.nix
View file

@ -44,6 +44,8 @@
url = "github:nix-community/nix-index-database"; url = "github:nix-community/nix-index-database";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
# nix-inspect - inspect nix derivations usingn a TUI interface
# https://github.com/bluskript/nix-inspect
nix-inspect = { nix-inspect = {
url = "github:bluskript/nix-inspect"; url = "github:bluskript/nix-inspect";
}; };
@ -137,149 +139,22 @@
}; };
in in
rec { rec {
"durincore" = mkNixosConfig {
"rickenbacker" = mkNixosConfig { # NixOS laptop - T470 Thinkpad
# NixOS laptop (dualboot windows, dunno why i kept it) hostname = "durincore";
hostname = "rickenbacker";
system = "x86_64-linux"; system = "x86_64-linux";
hardwareModules = [ hardwareModules = [
./nixos/profiles/hw-thinkpad-e14-amd.nix ./nixos/profiles/hw-lenovo-thinkpad-t470.nix
inputs.nixos-hardware.nixosModules.lenovo-thinkpad-e14-amd inputs.nixos-hardware.nixosModules.lenovo-thinkpad-t470s
]; ];
profileModules = [ profileModules = [
./nixos/profiles/role-worstation.nix ./nixos/profiles/role-worstation.nix
./nixos/profiles/role-dev.nix ./nixos/profiles/role-dev.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/workstation.nix; } { home-manager.users.jahanson = ./nixos/home/jahanson/workstation.nix; }
]; ];
}; };
"citadel" = mkNixosConfig {
# Gaming PC (dualboot windows)
hostname = "citadel";
system = "x86_64-linux";
hardwareModules = [
./nixos/profiles/hw-gaming-desktop.nix
];
profileModules = [
./nixos/profiles/role-worstation.nix
./nixos/profiles/role-dev.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/workstation.nix; }
];
}; };
"dns01" = mkNixosConfig {
# Rpi for DNS and misc services
hostname = "dns01";
system = "aarch64-linux";
hardwareModules = [
./nixos/profiles/hw-rpi4.nix
inputs.nixos-hardware.nixosModules.raspberry-pi-4
];
profileModules = [
./nixos/profiles/role-server.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/server.nix; }
];
};
"dns02" = mkNixosConfig {
# Rpi for DNS and misc services
hostname = "dns02";
system = "aarch64-linux";
hardwareModules = [
./nixos/profiles/hw-rpi4.nix
inputs.nixos-hardware.nixosModules.raspberry-pi-4
];
profileModules = [
./nixos/profiles/role-server.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/server.nix; }
];
};
"durandal" = mkNixosConfig {
# test lenovo tiny
hostname = "durandal";
system = "x86_64-linux";
hardwareModules = [
./nixos/profiles/hw-generic-x86.nix
];
profileModules = [
./nixos/profiles/role-server.nix
./nixos/profiles/role-dev.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/server.nix; }
];
};
"daedalus" = mkNixosConfig {
# lenovo tiny 720q NAS
hostname = "daedalus";
system = "x86_64-linux";
hardwareModules = [
./nixos/profiles/hw-generic-x86.nix
];
profileModules = [
./nixos/profiles/role-server.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/server.nix; }
];
};
"shodan" = mkNixosConfig {
# nuc11i7beh (?) homelab
hostname = "shodan";
system = "x86_64-linux";
hardwareModules = [
./nixos/profiles/hw-generic-x86.nix
];
profileModules = [
./nixos/profiles/role-server.nix
{ home-manager.users.truxnell = ./nixos/home/truxnell/server.nix; }
];
};
};
# # nix build .#images.rpi4
# rpi4 = nixpkgs.lib.nixosSystem {
# inherit specialArgs;
# modules = defaultModules ++ [
# "${nixpkgs}/nixos/modules/installer/sd-card/sd-image-aarch64.nix"
# ./nixos/hosts/images/sd-image
# ];
# };
# # nix build .#images.iso
# iso = nixpkgs.lib.nixosSystem {
# inherit specialArgs;
# modules = defaultModules ++ [
# "${nixpkgs}/nixos/modules/installer/cd-dvd/channel.nix"
# "${nixpkgs}/nixos/modules/installer/cd-dvd/iso-image.nix"
# ./nixos/hosts/images/cd-dvd
# ];
# };
# simple shortcut to allow for easier referencing of correct
# key for building images
# > nix build .#images.rpi4
# images.rpi4 = nixosConfigurations.rpi4.config.system.build.sdImage;
# images.iso = nixosConfigurations.iso.config.system.build.isoImage;
# Convenience output that aggregates the outputs for home, nixos. # Convenience output that aggregates the outputs for home, nixos.
# Also used in ci to build targets generally. # Also used in ci to build targets generally.
top = top =

View file

@ -1,13 +0,0 @@
use_sops() {
local path=${1}
eval "$(sops -d --output-type dotenv "$path" | direnv dotenv bash /dev/stdin)"
watch_file "$path"
}
if has nix; then
use flake
fi
if has sops; then
use sops ./flyctl-secret.sops.yaml
fi

View file

@ -1,14 +0,0 @@
---
# go-task runner file
version: "3"
tasks:
default:
silent: true
cmds:
- task -l
deploy:
desc: Deploy app
cmds:
- bash -c "doppler run -p github -c prd_vaultwarden -- fly deploy"

View file

@ -1,41 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1714750952,
"narHash": "sha256-oOUdvPrO8CbupgDSaPou+Jv6GL+uQA2QlE33D7OLzkM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5fd8536a9a5932d4ae8de52b7dc08d92041237fc",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"systems": "systems"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,30 +0,0 @@
{
inputs = {
# nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
systems.url = "github:nix-systems/default";
};
outputs =
{ systems
, nixpkgs
, ...
} @ inputs:
let
eachSystem = f:
nixpkgs.lib.genAttrs (import systems) (
system:
f nixpkgs.legacyPackages.${system}
);
in
{
devShells = eachSystem (pkgs: {
default = pkgs.mkShell {
packages = with pkgs; [
flyctl
doppler
go-task
];
};
});
};
}

View file

@ -1,52 +0,0 @@
primary_region = "syd"
kill_signal = "SIGINT"
kill_timeout = "5s"
[experimental]
auto_rollback = true
[build]
image = "ghcr.io/dani-garcia/vaultwarden:1.30.5@sha256:3d9d937b29281dc251ecae06e627b03d34a2c85d2ed876bf4d1ffe695836f864"
[env]
DATABASE_URL = "data/db.sqlite3"
PASSWORD_ITERATIONS = "2000000"
PRIMARY_REGION = "syd"
SIGNUPS_ALLOWED = "false"
INVITATIONS_ALLOWED = "true"
SMTP_FROM_NAME = "Vault"
SMTP_SECURITY = "off"
SMTP_SSL = "true"
TZ = "Australia/Melbourne"
WEB_VAULT_ENABLED = "true"
WEB_VAULT_FOLDER = "web-vault"
DATA_FOLDER = "data"
[[mounts]]
source = "vw_data_machines"
destination = "/data"
processes = ["app"]
[[services]]
protocol = "tcp"
internal_port = 80
processes = ["app"]
[[services.ports]]
port = 80
handlers = ["http"]
force_https = true
[[services.ports]]
port = 443
handlers = ["tls", "http"]
[services.concurrency]
type = "connections"
hard_limit = 25
soft_limit = 20
[[services.tcp_checks]]
interval = "15s"
timeout = "2s"
grace_period = "1s"
restart_limit = 0

View file

@ -0,0 +1,86 @@
{ lib, pkgs, self, config, ... }:
with config;
{
imports = [
../modules
];
config = {
myHome.username = "jahanson";
myHome.homeDirectory = "/home/jahanson/";
systemd.user.sessionVariables = {
EDITOR = "vim";
};
home = {
# Install these packages for my user
packages = with pkgs; [
# misc
file
which
tree
gnused
gnutar
gawk
zstd
gnupg
fastfetch
go-task
# archives
zip
xz
unzip
p7zip
# terminal file managers
nnn
ranger
yazi
# networking tools
iperf3
dnsutils # `dig` + `nslookup`
ldns # replacement of `dig`, it provide the command `drill`
aria2 # A lightweight multi-protocol & multi-source command-line download utility
socat # replacement of openbsd-netcat
nmap # A utility for network discovery and security auditing
ipcalc # it is a calculator for the IPv4/v6 addresses
# system tools
sysstat
lm_sensors # for `sensors` command
ethtool
pciutils # lspci
usbutils # lsusb
# system call monitoring
strace # system call monitoring
ltrace # library call monitoring
lsof # list open files
btop # replacement of htop/nmon
iotop # io monitoring
iftop # network monitoring
# utils
direnv # shell environment management
pre-commit # Pre-commit tasks for git
minio-client # S3 management
shellcheck
envsubst
# nix tools
nvd
];
sessionVariables = {
EDITOR = "vim";
};
};
};
}

View file

@ -0,0 +1,47 @@
{ lib, pkgs, self, config, inputs, ... }:
with config;
{
imports = [
./global.nix
];
myHome.programs.firefox.enable = true;
myHome.shell = {
starship.enable = true;
fish.enable = true;
git = {
enable = true;
username = "jahanson";
email = "joe@veri.dev";
# signingKey = ""; # TODO setup signing keys n shit
};
};
home = {
# Install these packages for my user
packages = with pkgs;
[
#apps
discord
yubioath-flutter
yubikey-manager-qt
flameshot
vlc
# cli
bat
dbus
direnv
git
nix-index
python3
fzf
ripgrep
brightnessctl
];
};
}

View file

@ -12,12 +12,12 @@
options.myHome.username = lib.mkOption { options.myHome.username = lib.mkOption {
type = lib.types.str; type = lib.types.str;
description = "users username"; description = "users username";
default = "truxnell"; default = "jahanson";
}; };
options.myHome.homeDirectory = lib.mkOption { options.myHome.homeDirectory = lib.mkOption {
type = lib.types.str; type = lib.types.str;
description = "users homedir"; description = "users homedir";
default = "truxnell"; default = "jahanson";
}; };
# Home-manager defaults # Home-manager defaults

View file

@ -54,12 +54,6 @@
definedAliases = [ "@gs" ]; definedAliases = [ "@gs" ];
}; };
# "Searx" = {
# urls = [{ template = "https://searx.trux.dev/?q={searchTerms}"; }];
# iconUpdateURL = "https://nixos.wiki/favicon.png";
# updateInterval = 24 * 60 * 60 * 1000; # every day
# definedAliases = [ "@searx" ];
# };
"Bing".metaData.hidden = true; "Bing".metaData.hidden = true;
"Google".metaData.alias = "@g"; # builtin engines only support specifying one additional alias "Google".metaData.alias = "@g"; # builtin engines only support specifying one additional alias
}; };

View file

@ -1,40 +0,0 @@
{ lib, pkgs, self, config, ... }:
with config;
{
imports = [
../modules
];
config = {
myHome.username = "truxnell";
myHome.homeDirectory = "/home/truxnell/";
# services.gpg-agent.pinentryPackage = pkgs.pinentry-qt;
systemd.user.sessionVariables = {
EDITOR = "nvim";
VISUAL = "nvim";
ZDOTDIR = "/home/pinpox/.config/zsh";
};
home = {
# Install these packages for my user
packages = with pkgs; [
eza
htop
unzip
];
sessionVariables = {
# Workaround for alacritty (breaks wezterm and other apps!)
# LIBGL_ALWAYS_SOFTWARE = "1";
EDITOR = "nvim";
VISUAL = "nvim";
ZDOTDIR = "/home/pinpox/.config/zsh";
};
};
};
}

View file

@ -1,113 +0,0 @@
{ lib, pkgs, self, config, inputs, ... }:
with config;
{
imports = [
./global.nix
];
myHome.programs.firefox.enable = true;
myHome.security = {
ssh = {
#TODO make this dynamic
enable = true;
matchBlocks = {
citadel = {
hostname = "citadel";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
rickenbacker = {
hostname = "rickenbacker";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
dns01 = {
hostname = "dns01";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
dns02 = {
hostname = "dns02";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
pikvm = {
hostname = "pikvm";
port = 22;
user = "root";
identityFile = "~/.ssh/id_ed25519";
};
durandal = {
hostname = "durandal";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
daedalus = {
hostname = "daedalus";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
shodan = {
hostname = "shodan";
port = 22;
identityFile = "~/.ssh/id_ed25519";
};
};
};
};
myHome.shell = {
starship.enable = true;
fish.enable = true;
wezterm.enable = true;
git = {
enable = true;
username = "truxnell";
email = "19149206+truxnell@users.noreply.github.com";
# signingKey = ""; # TODO setup signing keys n shit
};
};
home = {
# Install these packages for my user
packages = with pkgs;
[
#apps
discord
steam
spotify
prusa-slicer
bitwarden
yubioath-flutter
yubikey-manager-qt
flameshot
vlc
# cli
bat
dbus
direnv
git
nix-index
python3
fzf
ripgrep
brightnessctl
];
};
}

View file

@ -1,27 +0,0 @@
{ config, lib, pkgs, ... }:
{
imports =
[
# Include the results of the hardware scan.
./hardware-configuration.nix
];
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.enable = true;
users.users.truxnell = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
packages = with pkgs; [
];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZS9J1ydflZ4iJdJgO8+vnN8nNSlEwyn9tbWU9OcysW truxnell@home"
];
};
networking.hostId = "0a90730f";
system.stateVersion = "23.11";
}

View file

@ -1,43 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
config = {
mySystem = {
services.openssh.enable = true;
security.wheelNeedsSudoPassword = false;
time.hwClockLocalTime = true; # due to windows dualboot
};
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
networking.hostName = "citadel"; # Define your hostname.
fileSystems."/" =
{
device = "/dev/disk/by-uuid/701fc943-ede7-41ed-8a53-3cc38fc68fe5";
fsType = "ext4";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/1D5B-36D3";
fsType = "vfat";
};
swapDevices = [ ];
};
}

View file

@ -1,137 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
imports = [
];
config = {
mySystem.purpose = "Network Attached Storage";
mySystem.system.impermanence.enable = true;
mySystem.services = {
openssh.enable = true;
#containers
podman.enable = true;
nginx.enable = true;
sonarr.enable = true;
radarr.enable = true;
lidarr.enable = true;
readarr.enable = true;
sabnzbd.enable = true;
qbittorrent.enable = true;
prowlarr.enable = true;
};
mySystem.security.acme.enable = true;
mySystem.nasFolder = "/tank";
mySystem.system.resticBackup.local.location = "/tank/backup/nixos/nixos";
mySystem.system = {
zfs.enable = true;
zfs.mountPoolsAtBoot = [ "tank" ];
};
mySystem.services.nfs.enable = true;
mySystem.system.motd.networkInterfaces = [ "eno1" ];
boot = {
initrd.availableKernelModules = [ "xhci_pci" "ahci" "mpt3sas" "nvme" "usbhid" "usb_storage" "sd_mod" ];
initrd.kernelModules = [ ];
kernelModules = [ "kvm-intel" ];
extraModulePackages = [ ];
# for managing/mounting ntfs
supportedFilesystems = [ "ntfs" ];
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
# why not ensure we can memtest workstatons easily?
grub.memtest86.enable = true;
};
};
networking.hostName = "daedalus"; # Define your hostname.
networking.hostId = "ed3980cb"; # for zfs, helps stop importing to wrong machine
networking.useDHCP = lib.mkDefault true;
fileSystems."/" =
{
device = "rpool/local/root";
fsType = "zfs";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/F42E-1E48";
fsType = "vfat";
};
fileSystems."/nix" =
{
device = "rpool/local/nix";
fsType = "zfs";
};
fileSystems."/persist" =
{
device = "rpool/safe/persist";
fsType = "zfs";
neededForBoot = true; # for impermanence
};
swapDevices =
[{ device = "/dev/disk/by-uuid/c2f716ef-9e8c-466b-bcb0-699397cb2dc0"; }];
# TODO does this live somewhere else?
# it is very machine-specific...
# add user with `sudo smbpasswd -a my_user`
services.samba = {
enable = true;
openFirewall = true;
extraConfig = ''
workgroup = WORKGROUP
server string = daedalus
netbios name = daedalus
security = user
#use sendfile = yes
#max protocol = smb2
# note: localhost is the ipv6 localhost ::1
hosts allow = 10.8.10. 127.0.0.1 localhost
hosts deny = 0.0.0.0/0
guest account = nobody
map to guest = bad user
'';
shares = {
backup = {
path = "/tank/backup";
"read only" = "no";
};
documents = {
path = "/tank/documents";
"read only" = "no";
};
natflix = {
path = "/tank/natflix";
"read only" = "no";
};
# paperless = {
# path = "/tank/Apps/paperless/incoming";
# "read only" = "no";
# };
};
};
};
}

View file

@ -1,46 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
imports = [
];
mySystem.services = {
openssh.enable = true;
cfDdns.enable = true;
powerdns = {
enable = true;
admin-ui = false;
};
adguardhome.enable = true;
};
# no mutable state I care about
mySystem.system.resticBackup =
{
local.enable = false;
remote.enable = false;
};
networking.hostName = "dns01"; # Define your hostname.
networking.useDHCP = lib.mkDefault true;
fileSystems."/" =
{
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
};
swapDevices = [ ];
}

View file

@ -1,46 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
imports = [
];
mySystem.services = {
openssh.enable = true;
cfDdns.enable = true;
powerdns = {
enable = true;
admin-ui = false;
};
adguardhome.enable = true;
};
# no mutable state I care about
mySystem.system.resticBackup =
{
local.enable = false;
remote.enable = false;
};
networking.hostName = "dns02"; # Define your hostname.
networking.useDHCP = lib.mkDefault true;
fileSystems."/" =
{
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
};
swapDevices = [ ];
}

View file

@ -1,73 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
mySystem.purpose = "Development";
mySystem.services = {
openssh.enable = true;
podman.enable = true;
nginx.enable = true;
openvscode-server.enable = true;
postgresql =
{ enable = true; backup = false; };
calibre-web = { enable = true; backup = false; dev = true; };
};
# mySystem.containers.calibre = { enable = true; backup = false; dev = true; };
mySystem.system.systemd.pushover-alerts.enable = false;
mySystem.nfs.nas.enable = true;
mySystem.persistentFolder = "/persistent";
mySystem.system.motd.networkInterfaces = [ "eno1" ];
mySystem.security.acme.enable = true;
# Dev machine
mySystem.system.resticBackup =
{
local.enable = false;
remote.enable = false;
};
boot = {
initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
initrd.kernelModules = [ ];
kernelModules = [ ];
extraModulePackages = [ ];
# for managing/mounting ntfs
supportedFilesystems = [ "ntfs" ];
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
# why not ensure we can memtest workstatons easily?
# TODO check whether this is actually working, cant see it in grub?
grub.memtest86.enable = true;
};
};
networking.hostName = "durandal"; # Define your hostname.
networking.useDHCP = lib.mkDefault true;
fileSystems."/" =
{
device = "/dev/disk/by-uuid/2e843998-f409-4ccc-bc7c-07099ee0e936";
fsType = "ext4";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/12CE-A600";
fsType = "vfat";
};
swapDevices =
[{ device = "/dev/disk/by-uuid/0ae2765b-f3f4-4b1a-8ea6-599f37504d70"; }];
}

View file

@ -0,0 +1,40 @@
{ config
, lib
, pkgs
, ...
}: {
config = {
# hardware-configuration.nix - half of the hardware-configuration.nix file
mySystem = {
services.openssh.enable = true;
security.wheelNeedsSudoPassword = false;
};
# TODO build this in from flake host names
networking.hostName = "durincore";
fileSystems."/" =
{ device = "rpool/root";
fsType = "zfs";
};
fileSystems."/home" =
{ device = "rpool/home";
fsType = "zfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/F1B9-CA7C";
fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/e11fc7e0-7762-455f-93a2-ceb026f42cb7"; }
];
};
}

View file

@ -1,89 +0,0 @@
{ config, pkgs, lib, nixos-hardware, ... }:
{
imports = [
# nixos-hardware.nixosModules.raspberry-pi-4
../../common/nixos/openssh.nix
];
nix = {
settings = {
experimental-features = [ "nix-command" "flakes" ];
trusted-users = [ "root" "@wheel" ];
};
};
nixpkgs = {
# Configure your nixpkgs instance
config = {
# Disable if you don't want unfree packages
allowUnfree = true;
};
};
boot = {
initrd.availableKernelModules = [ "usbhid" "usb_storage" ];
# ttyAMA0 is the serial console broken out to the GPIO
kernelParams = [
"8250.nr_uarts=1"
"console=ttyAMA0,115200"
"console=tty1"
];
loader = {
grub.enable = false;
raspberryPi = {
version = 4;
};
};
};
# # https://nixos.wiki/wiki/NixOS_on_ARM/Raspberry_Pi_4
# hardware = {
# raspberry-pi."4".apply-overlays-dtmerge.enable = true;
# deviceTree = {
# enable = true;
# filter = "*rpi-4-*.dtb";
# };
# };
console.enable = false;
environment.systemPackages = with pkgs; [
libraspberrypi
raspberrypi-eeprom
ssh-to-age
vim
git
curl
wget
dnsutils
];
networking = {
hostName = "nixos";
wireless.enable = false;
networkmanager.enable = false;
};
# Define a user account. Don't forget to set a password with passwd.
users.users.truxnell = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
packages = with pkgs; [
];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZS9J1ydflZ4iJdJgO8+vnN8nNSlEwyn9tbWU9OcysW truxnell@home"
];
};
# Free up to 1GiB whenever there is less than 100MiB left.
nix.extraOptions = ''
min-free = ${toString (100 * 1024 * 1024)}
max-free = ${toString (1024 * 1024 * 1024)}
'';
nixpkgs.hostPlatform = "aarch64-linux";
system.stateVersion = "23.11";
}

View file

@ -1,89 +0,0 @@
{ config, pkgs, lib, nixos-hardware, ... }:
{
imports = [
# nixos-hardware.nixosModules.raspberry-pi-4
../../common/nixos/openssh.nix
];
nix = {
settings = {
experimental-features = [ "nix-command" "flakes" ];
trusted-users = [ "root" "@wheel" ];
};
};
nixpkgs = {
# Configure your nixpkgs instance
config = {
# Disable if you don't want unfree packages
allowUnfree = true;
};
};
boot = {
initrd.availableKernelModules = [ "usbhid" "usb_storage" ];
# ttyAMA0 is the serial console broken out to the GPIO
kernelParams = [
"8250.nr_uarts=1"
"console=ttyAMA0,115200"
"console=tty1"
];
loader = {
grub.enable = false;
raspberryPi = {
version = 4;
};
};
};
# # https://nixos.wiki/wiki/NixOS_on_ARM/Raspberry_Pi_4
# hardware = {
# raspberry-pi."4".apply-overlays-dtmerge.enable = true;
# deviceTree = {
# enable = true;
# filter = "*rpi-4-*.dtb";
# };
# };
console.enable = false;
environment.systemPackages = with pkgs; [
libraspberrypi
raspberrypi-eeprom
ssh-to-age
vim
git
curl
wget
dnsutils
];
networking = {
hostName = "nixos";
wireless.enable = false;
networkmanager.enable = false;
};
# Define a user account. Don't forget to set a password with passwd.
users.users.truxnell = {
isNormalUser = true;
extraGroups = [ "wheel" ]; # Enable sudo for the user.
packages = with pkgs; [
];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZS9J1ydflZ4iJdJgO8+vnN8nNSlEwyn9tbWU9OcysW truxnell@home"
];
};
# Free up to 1GiB whenever there is less than 100MiB left.
nix.extraOptions = ''
min-free = ${toString (100 * 1024 * 1024)}
max-free = ${toString (1024 * 1024 * 1024)}
'';
nixpkgs.hostPlatform = "aarch64-linux";
system.stateVersion = "23.11";
}

View file

@ -1,26 +0,0 @@
# Playground NixOS container
Spin up a TTY only container using systemd nspawn to experiment with configuration.
```bash
# Create container from configuration flake
sudo nixos-container create playground \
--local-address 10.235.1.2 \
--host-address 10.235.1.1 \
--flake .#playground
# Update container
sudo nixos-container update playground --flake .#playground
# Start container
sudo nixos-container start playground
# Attach to container TTY
sudo nixos-container login playground
# Stop when done testing
sudo nixos-container stop playground
# Destroy container
sudo nixos-container destroy playground
```

View file

@ -1,30 +0,0 @@
{ config
, lib
, ...
}: {
imports = [ ./playground.nix ];
boot.isContainer = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
networking = {
hostName = "playground";
useDHCP = false;
useHostResolvConf = false;
resolvconf.enable = true;
resolvconf.extraConfig = ''
resolv_conf_local_only=NO
name_server_blacklist=127.0.0.1
name_servers=1.1.1.1
'';
};
security.sudo.wheelNeedsPassword = false;
system.stateVersion = "22.05";
nix.gc.automatic = false;
mySystem.home-manager.enable = false;
# Workaround for broken home-manager
systemd.tmpfiles.rules = [
"d /nix/var/nix/gcroots/per-user/${config.mySystem.user} - ${config.mySystem.user} - - -"
"d /nix/var/nix/profiles/per-user/${config.mySystem.user} - ${config.mySystem.user} - - -"
];
}

View file

@ -1,7 +0,0 @@
{ config
, pkgs
, lib
, inputs
, outputs
, ...
}: { }

View file

@ -1,34 +0,0 @@
{ config
, lib
, pkgs
, ...
}: {
config = {
# hardware-configuration.nix is missing as I've abstracted out the parts
mySystem = {
services.openssh.enable = true;
security.wheelNeedsSudoPassword = false;
};
# TODO build this in from flake host names
networking.hostName = "rickenbacker";
fileSystems."/" =
{
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/44D0-91EC";
fsType = "vfat";
};
swapDevices = [ ];
};
}

View file

@ -1,109 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{ config
, lib
, pkgs
, ...
}: {
mySystem.purpose = "Homelab";
mySystem.system.impermanence.enable = true;
mySystem.services = {
openssh.enable = true;
podman.enable = true;
postgresql.enable = true;
nginx.enable = true;
gatus.enable = true;
homepage.enable = true;
# backrest.enable = true;
plex.enable = true;
tautulli.enable = true;
factorio.freight-forwarding.enable = true; # the factory must grow
searxng.enable = true;
whoogle.enable = true;
redlib.enable = true;
mosquitto.enable = true;
zigbee2mqtt.enable = true;
node-red.enable = true;
home-assistant.enable = true;
openvscode-server.enable = true;
radicale.enable = true;
miniflux.enable = true;
calibre-web.enable = true;
rss-bridge.enable = true;
};
mySystem.containers = {
calibre.enable = true;
ecowitt2mqtt.enable = true;
};
mySystem.security.acme.enable = true;
mySystem.nfs.nas.enable = true;
mySystem.persistentFolder = "/persist";
mySystem.system.motd.networkInterfaces = [ "enp1s0" ];
mySystem.nasFolder = "/mnt/nas";
mySystem.system.resticBackup.local.location = "/mnt/nas/backup/nixos/nixos";
boot = {
initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
initrd.kernelModules = [ ];
kernelModules = [ "kvm-intel" ];
extraModulePackages = [ ];
# for managing/mounting ntfs
supportedFilesystems = [ "ntfs" ];
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
# why not ensure we can memtest workstatons easily?
# TODO check whether this is actually working, cant see it in grub?
grub.memtest86.enable = true;
};
};
networking.hostName = "shodan"; # Define your hostname.
networking.hostId = "0a90730f";
networking.useDHCP = lib.mkDefault true;
fileSystems."/" =
{
device = "rpool/local/root";
fsType = "zfs";
};
fileSystems."/nix" =
{
device = "rpool/local/nix";
fsType = "zfs";
};
fileSystems."/persist" =
{
device = "rpool/safe/persist";
fsType = "zfs";
neededForBoot = true; # for impermanence
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/76FA-78DF";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
}

View file

@ -22,15 +22,15 @@ with lib;
mySystem = { mySystem = {
# basics for all devices # basics for all devices
time.timeZone = "Australia/Melbourne"; time.timeZone = "America/Chicago";
security.increaseWheelLoginLimits = true; security.increaseWheelLoginLimits = true;
system.packages = [ pkgs.bat ]; system.packages = [ pkgs.bat ];
domain = "trux.dev"; domain = "hsn.dev";
internalDomain = "l.voltaicforge.com"; internalDomain = "home.lan";
shell.fish.enable = true; shell.fish.enable = true;
# But wont enable plugins globally, leave them for workstations # But wont enable plugins globally, leave them for workstations
system.resticBackup.remote.location = "s3:https://f3b4625a2d02b0e6d1dec5a44f427191.r2.cloudflarestorage.com/nixos-restic"; system.resticBackup.remote.location = "s3:https://x.r2.cloudflarestorage.com/nixos-restic";
}; };
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [

View file

@ -1,80 +1,35 @@
services: services:
pushover: pushover:
env: ENC[AES256_GCM,data:JFaA8AGPO1tFgv+ApFSMCGxQPq4B9kMf07J0PzW96nFSJ2CIDv7Z4Eac49FkSdtf2XwbMkR2pitFKZOhavHJUfii7x0aafct7X7EWYK7VGk/Et4TGHszJN+r0X28Wtsw,iv:gwmTKE2/4hpkjWAODBleY1gz/yIy20cN/KuQM+p0V6A=,tag:bNro4dNks1+BaVjLgyqD3Q==,type:str] env: ENC[AES256_GCM,data:ebcDcOo7YPIJe//ewFmw0htN6KtmcToAaMngOXq6S2Ei2u4DeWsof4hsVUoBEwfkAB3CwdnUS4mOc6xZvanwKIyiwAAaz6tVQmqULFPs++SUk4R+ZhEUgoLovvtqGqxA,iv:FHcUugx5HyjBjxUg4ifyPuCQnWLm18caggLTntBwPIk=,tag:kkAcDjG9SJcTM5suvPvDCQ==,type:str]
pushover-user-key: ENC[AES256_GCM,data:VzuVqeSrFvz6rzArcrxPMSo5R71yhzZYMMROnPpA,iv:6ECnvOpm3/A8DHEw4lpNe+fX1VGD537pKToAIswdIWg=,tag:VfGx7Paeq+xJikSstiHS0g==,type:str] pushover-user-key: ENC[AES256_GCM,data:ESLCJvfLRZ/k6FoVC984+Zm6VyNjyod+2GFgIlRJ,iv:aBuLQJ6ysbflHetcjTyBuQ/nRrerEXrVXsDUmoseOH0=,tag:DeDO9gcSu8fIoCYiGg3ljw==,type:str]
pushover-api-key: ENC[AES256_GCM,data:3if3FcWV0Se01LqXDshIov3/2xrCgKwS2UXdR8q4,iv:Yj3r33IvUCUbn6IPKwd9xxBf0pkgcEiXnIx8tYRF6ok=,tag:NckP1sf+XOyc1shH1V2sxQ==,type:str] pushover-api-key: ENC[AES256_GCM,data:jHv+DTwMdIncAY2ZGfpCwdcRJGFdhT64wFTvShMQ,iv:keoAE8ETUw6U8g8vRQvT3ttBnm/yv+8mWdEpKlrAiko=,tag:udwZHEx4Xiy5WwIwrHA+EQ==,type:str]
truxnell-password: ENC[AES256_GCM,data:C6xsOc81W9hHtaUO/U5Wcm1LNTNKECVAX0XEqweDZDTozbXtOedeQ8zXAV9i1NkL08DeaDbI8L32m8YKTI/uHlKA+yjiQyqn9xskUVcoxGV9TxivzHuyCqJ1RM5+Bx3CDjWfuCEZIN+K1g==,iv:5bp6uSrZyVLY2fZiWRt0tdN1KGNVRDTHpeFWJIISfqs=,tag:Zwrefec8O8ZPr1lclrc5Vg==,type:str] jahanson-password: ENC[AES256_GCM,data:P7e7lvyrgDVAwYbxyDY=,iv:XWvaJOtL6nxlrU9JsA8HFCUK1N80lgAmgdc2ImNkaMM=,tag:RBEr3vwuwGoFhVvpgqyUcg==,type:str]
sops: sops:
kms: [] kms: []
gcp_kms: [] gcp_kms: []
azure_kv: [] azure_kv: []
hc_vault: [] hc_vault: []
age: age:
- recipient: age1lj5vmr02qkudvv2xedfj5tq8x93gllgpr6tzylwdlt7lud4tfv5qfqsd5u - recipient: age18kj3xhlvgjeg2awwku3r8d95w360uysu0w5ejghnp4kh8qmtge5qwa2vjp
enc: | enc: |
-----BEGIN AGE ENCRYPTED FILE----- -----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFNUFkdHZ2S3V0WUtWa3d5 YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA3MnEzVHZKbU5Ra2ZVRFZt
QzFERCtyRzZ6SDh1N1RHMXNJSkRpZnVnOEhVClpVZ3hMdThqZTZyQlllSUlMVXRK K3NOalNEeldGWVh5azNqbHRHbElEZzVqbWhvCm8vVFEwNUpzSHJoNzVQM0FURUZO
cjQwNjZ4Qy9NRE80WTh2QUI5K2pSUncKLS0tIEdQenp2OWZVQTBQd2tMRldZQVVH MGo5S3V3dDZJTGI2Yi9xeEJLSmhaNDQKLS0tIEpDbjRQMTZ4ZUVOY21HbFdSY28x
UjNxWU9ONTYya05jTVBPbUptNUNISmcKl4AO2oxYMcvkkYwMoKiFTb3t49nQ3GgS V0VaYngwbHdLMHNtdVpLZWs4NGwxbDgKeM1Y58TV+Xx6wmavCy8EsTlIbP93FA+k
d9YZm8frhqqMWL3wdsJSKGtlO8nsSOllbj8D9VSx9b9ywtPd+irhTA== +6OXTFD/9LTEA30Uqr8akcBIo0sxO0wzT/MxpfvDlnnqnTwpyEnx/Q==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
- recipient: age17edew3aahg3t5nte5g0a505sn96vnj8g8gqse8q06ccrrn2n3uysyshu2c - recipient: age1z3vjvkead2h934n3w4m5m7tg4tj5qlzagsq6ly84h3tcu7x4ldsqd3s5fg
enc: | enc: |
-----BEGIN AGE ENCRYPTED FILE----- -----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBsbkZ1eHE2SzdIOEQxVVpY YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4cG9obGRTa3RITTh4cFBZ
SjNQSEJwM0pnK1lVNFI4dmxrZkowbjkwR1JRCjk1Sm1VMVcrVGZJbGIrRmVJMHN2 N1JCSVp4SUJMRUtubmluRm43eWtic29VZXdBCjVjbWxGWWppeDVtd0lYSU5hR3hj
YjFqNlo4ci9lRi9OV3hVRFg0Wk1QUGcKLS0tIEJhbnNQYmlQVjdURTdOdmEwQXpz TnVkcEhLUldCeUhIenlqRU9CWkxoaTQKLS0tIHR4OEl6ZnByYUxsR1FIN3hrU1NB
ZlFaYy8zdXVwM01yVHMvZUtxNVpnNjQKemUlnQ9kWJ4lvmxHRLj9uFZWRk4DUBZj VGxDOVlTdjF0TXQxRlJKM2J5eGpKeHcKaOiijTJeONCRLRQx1ZY3YpLUeg3lEAWs
E1U/BH1ag8oFk7aMOvqYZmCH7niHzAYCdwjUQBxVEpu+TnqMu3z6ZA== OtELOH9rR9r1qonCZHvVGqCl6W+sE/F7ccg3RcIBrZhIqnOT+mwg3g==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
- recipient: age1u4tht685sqg6dkmjyer96r93pl425u6353md6fphpd84jh3jwcusvm7mgk lastmodified: "2024-05-10T14:50:15Z"
enc: | mac: ENC[AES256_GCM,data:qNmshF/s+GEOPFQpXELszXiI0dnKpwKBpltzsDNITm54D9YzHnv0iaNUbPB8NY+T1J/h7ibLdxLe0Cpo6aHiyaVmV7jrRT9FytA5B7nrDwp1JnfRLKQFHgRji/ARxIYYof7IOPyQjTq9RQsVKIky6Djtjebpe9N8s7hEm+K8dxc=,iv:nIxBxEW9WQgxCPgaKFlvES2tRPPwmq2Si7Mirz4Bzbc=,tag:FcA0zDhpTcRa3rhxJcIqDQ==,type:str]
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvaWRlSmY0OUNFbEE2S3Rw
cVpqZXZ4cmpwVDFoU0JIL2xkL3h0UGJuK3dFCkRPaEtJVzNMZVFaVzROUHJUdVRL
L0ROdnZMMmRwd0J0MzhRcDRMbnhjMGcKLS0tIHZWcXdEWVJWL3ZyL3A4UDMyVnpG
M3M1dTBVY3BkaExNbkRGdzMzU0VrUWsKM+0FCj/eqhFOWGG8IzqyNseUjGDN68eY
6dV5FamWMBxRXiBDgGVe48PDxb7K7qubCzcF19WbL6R/8nUpDjHqNQ==
-----END AGE ENCRYPTED FILE-----
- recipient: age1cp6vegrmqfkuj8nmt2u3z0sur7n0f7e9x9zmdv4zygp8j2pnucpsdkgagc
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqZHRvRmN0aCtLbGNRMnlW
ZDFzb2h3K2twdzFPd2UrU2ZKaDJzTVFtNHhvCjVoUW15QmNqeE5ETXFzSnhDS3hX
TVRvTXQyZkRSTDBHWGRxN3NjZ05VdHcKLS0tIEpOd2RhWkNPY3JEUXR1aWFMcUNp
VEdrQ0hFd0preVZMaHhtNTZrWHFIREUK0eRfXHKnyDXG8+SpPkNImNknNcXEe0wN
OPuBwjBAwXVa7KKiUOtMWkXkq/Pu5ePXZdj7mLHYtZWMqGV17Y34xw==
-----END AGE ENCRYPTED FILE-----
- recipient: age1ekt5xz7u2xgdzgsrffhd9x22n80cn4thxd8zxjy2ey5vq3ca7gnqz25g5r
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB2OVQwK3B3ZjRaOVpnbEZO
RXNSM1lEL0xpTGExVy9GVEorUDBmVmtpeWdzCjNodU1mZUYyUStHWnhld09BbHNK
V1RJNHgvUEtVS1hEeUpzSmJSZjNiejAKLS0tIGNPQ3dsd0xQdWp1T0QvVFA1YmVE
c1g2SmNlWGZXaFNXY0NWdDRkMFN0UXcK79ldF6D4sUJpkvDyUzHA/A6SW7+w6XBy
O0RKGRXwlO12QX4fjIDRIjc1TqRy1KsU7YbK0WrpP8ar/tMd3Nu6mg==
-----END AGE ENCRYPTED FILE-----
- recipient: age1jpeh4s553taxkyxhzlshzqjfrtvmmp5lw0hmpgn3mdnmgzku332qe082dl
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlbTUyUFV2dTFXWWF4Z0RT
ZE9nSHJIR3hycDR6YVU4c0RuM1FjaCt4L2tRCmRtS2VLZkRHNFY1b0FVeVk1WlE2
NEl2SVd6UUVCQVhBc2VzRCtZNWt4QTQKLS0tIHJyeFlwWHdHajBEODBTMWo0ai9n
MlBLVDN6V1JTOC9PblZvQ2FPSzZNalUKD19tba0s+/IZTGpdZQn80hUrTI2M9ih8
CFioBGEx4LLEhp0eF1YRtruXLD84UYvixQwQVVb9YJTBv9eCOlT6Kg==
-----END AGE ENCRYPTED FILE-----
- recipient: age1j2r8mypw44uvqhfs53424h6fu2rkr5m7asl7rl3zn3xzva9m3dcqpa97gw
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBrZWN0Kzh4UW10dTZDenhs
dnNaYU5IZFArdTQwdEtVM3lvK3Z6NkVWeXlVCktyQ3JHSTBEaFFRdjN1WlRNaEdX
U3N6VFV1cEtvc0RxQWx1eTRuZlJBcEEKLS0tIG1USHpPNkJmTEQvY2NCTkJIZkNh
aTVZSTh3VkV5N0l1c1h0V1prSDgvNVkKYcJz8MP5QVi2Edajv+f8arkh+df+b+AJ
RxG7irfHZdJQ0waLV/DzCk+narH49IyWvtbN7QoXqAL01OZF4ujDKg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-04-25T02:28:14Z"
mac: ENC[AES256_GCM,data:a3iSR/pqaHbTybjkr5Y7wZdvCAmL5/QirZe8IYg2yx5mcncXtKbCyR/eRs8L2w3SS9sPFTMG02y2+rz6aZV16PPmJlthiqGBpLPu8+GLWKFin0DBiRVYCuQUlaZK8zsHvV0M+xUbNL5vhRRgy/UHROOuwvRI/Mjma9tN9lwANG8=,iv:efmLgPZLafYhwJtmbkIZ6R/a4rFGEOyduY8mhgYMTTc=,tag:FehirgjHZYwB9FjERHGobg==,type:str]
pgp: [] pgp: []
unencrypted_suffix: _unencrypted unencrypted_suffix: _unencrypted
version: 3.8.1 version: 3.8.1

View file

@ -8,16 +8,16 @@ in
{ {
sops.secrets = { sops.secrets = {
truxnell-password = { jahanson-password = {
sopsFile = ./secrets.sops.yaml; sopsFile = ./secrets.sops.yaml;
neededForUsers = true; neededForUsers = true;
}; };
}; };
users.users.truxnell = { users.users.jahanson = {
isNormalUser = true; isNormalUser = true;
shell = pkgs.fish; shell = pkgs.fish;
hashedPasswordFile = config.sops.secrets.truxnell-password.path; hashedPasswordFile = config.sops.secrets.jahanson-password.path;
extraGroups = extraGroups =
[ [
"wheel" "wheel"
@ -32,7 +32,11 @@ in
]; ];
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZS9J1ydflZ4iJdJgO8+vnN8nNSlEwyn9tbWU9OcysW truxnell@home" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBsUe5YF5z8vGcEYtQX7AAiw2rJygGf2l7xxr8nZZa7w"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBH3VVFenoJfnu+IFUlD79uxl7L8SFoRup33J2HGny4WEdRgGR41s0MpFKDBmxXZHy4O9Nh8NMMnpy5VhUefnIKI="
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPHFQ3hDjjrKsecn3jmSWYlRXy4IJCrepgU1HaIV5VcmB3mUFmIZ/pCZnPmIG/Gbuqf1PP2FQDmHMX5t0hTYG9A="
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIETR70eQJiXaJuB+qpI1z+jFOPbEZoQNRcq4VXkojWfU"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIATyScd8ZRhV7uZmrQNSAbRTs9N/Dbx+Y8tGEDny30sA"
]; # TODO do i move to ingest github creds? ]; # TODO do i move to ingest github creds?
# packages = [ pkgs.home-manager ]; # packages = [ pkgs.home-manager ];

View file

@ -1,36 +0,0 @@
{ config, lib, pkgs, imports, boot, ... }:
with lib;
{
# Enable module for NVIDIA graphics
mySystem.hardware.nvidia.enable = true;
mySystem.system.packages = with pkgs; [
ntfs3g
];
boot = {
initrd.availableKernelModules = [ "nvme" "xhci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ];
kernelModules = [ "kvm-amd" ];
extraModulePackages = [ ];
# for managing/mounting ntfs
supportedFilesystems = [ "ntfs" ];
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
# why not ensure we can memtest workstatons easily?
grub.memtest86.enable = true;
};
};
# set xserver videodrivers for NVIDIA 4080 gpu
services.xserver.videoDrivers = [ "nvidia" ];
}

View file

@ -1,31 +0,0 @@
{ config, lib, pkgs, imports, boot, ... }:
with lib;
{
boot = {
initrd.availableKernelModules = [ "xhci_pci" "usb_storage" ];
initrd.kernelModules = [ ];
kernelModules = [ ];
extraModulePackages = [ ];
loader = {
# Use the extlinux boot loader. (NixOS wants to enable GRUB by default)
grub.enable = false;
# Enables the generation of /boot/extlinux/extlinux.conf
generic-extlinux-compatible.enable = true;
timeout = 2;
};
};
nixpkgs.hostPlatform.system = "aarch64-linux";
console.enable = false;
mySystem.system.packages = with pkgs; [
libraspberrypi
raspberrypi-eeprom
];
}

View file

@ -1,32 +0,0 @@
{ config, lib, pkgs, imports, boot, ... }:
with lib;
{
boot = {
initrd.availableKernelModules = [ "nvme" "xhci_pci" "usbhid" "usb_storage" "sd_mod" ];
initrd.kernelModules = [ "amdgpu" ];
kernelModules = [ "kvm-amd" ];
extraModulePackages = [ ];
# for managing/mounting ntfs
supportedFilesystems = [ "ntfs" ];
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
# why not ensure we can memtest workstatons easily?
grub.memtest86.enable = true;
};
};
# set xserver videodrivers for amp gpu
services.xserver.videoDrivers = [ "amdgpu" ];
# As this is a laptop explicitly enable nmcli (likely enabled by GUI anyway)
networking.networkmanager.enable = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -0,0 +1,23 @@
{ config, lib, pkgs, imports, boot, ... }:
with lib;
{
boot = {
initrd = {
availableKernelModules = [ "xhci_pci" "nvme" "usb_storage" "sd_mod" ];
kernelModules = [ ];
};
kernelModules = [ "kvm-intel" ];
extraModulePackages = [ ];
};
networking = {
useDHCP = lib.mkDefault true;
hostId = "ad4380db";
};
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp4s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -21,10 +21,10 @@ with config;
nil nil
nixpkgs-fmt nixpkgs-fmt
statix statix
nvd # nvd
gh gh
bind # for dns utils like named-checkconf # bind # for dns utils like named-checkconf
inputs.nix-inspect.packages.${pkgs.system}.default inputs.nix-inspect.packages.${pkgs.system}.default
]; ];

View file

@ -1,6 +1,6 @@
{ config, lib, pkgs, imports, boot, self, inputs, ... }: { config, lib, pkgs, imports, boot, self, inputs, ... }:
# Role for workstations # Role for workstations
# Covers desktops/laptops, expected to have a GUI and do worloads # Covers desktops/laptops, expected to have a GUI and do workloads
# Will have home-manager installs # Will have home-manager installs
with config; with config;
@ -61,17 +61,14 @@ with config;
}; };
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
# Sensors etc # Sensors etc
lm_sensors lm_sensors
cpufrequtils cpufrequtils
cpupower-gui cpupower-gui
]; ];
i18n = { i18n = {
defaultLocale = lib.mkDefault "en_AU.UTF-8"; defaultLocale = lib.mkDefault "en_US.UTF-8";
}; };
programs.mtr.enable = true; programs.mtr.enable = true;

View file

@ -47,6 +47,5 @@ pkgs.mkShell {
gitleaks gitleaks
mkdocs mkdocs
mqttui mqttui
]; ];
} }