Compare commits

...

114 Commits

Author SHA1 Message Date
=
230d830612 enable nginx dev 2025-04-05 02:38:17 -04:00
=
f843c7eaa3 certbot for dev 2025-04-05 02:12:24 -04:00
=
076757e1f8 troubleshoot build 2025-04-05 01:08:32 -04:00
=
0b1c18a3a0 troubleshoot build 2025-04-05 00:15:34 -04:00
=
f47ad625da troubleshoot build 2025-04-05 00:13:16 -04:00
=
2b56d30666 aur builder become changes 2025-04-05 00:07:14 -04:00
=
c12dfc18ce no become for delegated task 2025-04-04 23:43:36 -04:00
=
814a642cc0 delegate secret lookup 2025-04-04 23:42:51 -04:00
=
8cad395e34 aur repo host secret lookup 2025-04-04 23:41:35 -04:00
=
eb360951a1 aur repo host vars 2025-04-04 23:24:58 -04:00
=
9601aa4937 hashi vault lookups 2025-04-04 23:08:01 -04:00
=
81319370b1 wip 2025-03-13 08:59:00 -04:00
=
76f6f78112 add backup, readarr 2025-01-20 16:31:17 -05:00
=
9b0edab903 add jellyfin to media compose 2025-01-14 14:18:24 -05:00
=
c377f1a7d1 add prowlarr and sonarr 2025-01-12 14:18:23 -05:00
=
7f5a35d936 add radarr service 2025-01-12 00:03:26 -05:00
=
7b9f0e0ca5 add gallery cname 2025-01-04 01:20:36 -05:00
=
a490e4ad92 unifi nearly done 2024-12-30 22:35:25 -05:00
=
6734d78bef adjust nginx cert 2024-12-30 20:09:09 -05:00
=
6722ab4138 Syncthing working on truenas 2024-12-30 20:08:30 -05:00
=
e76d1a1f88 more apps 2024-12-30 01:18:40 -05:00
=
c090cc9cbe add minio to truenas 2024-12-25 01:04:30 -05:00
=
8ab3783a2b Add sops config 2024-12-23 23:52:05 -05:00
=
cdf20ba9ef Add sops kms keys 2024-12-23 18:35:04 -05:00
=
f0b3388e8d aws kms 2024-12-21 01:26:55 -05:00
=
27e2fc6058 truenas 2024-12-21 00:17:04 -05:00
=
b622bb29df matrix dns 2024-12-20 00:58:24 -05:00
=
bde6a5f208 dns and talos 2024-12-18 15:42:45 -05:00
85d6fe5056 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-12-09 23:36:16 -05:00
=
098f63fa5b talos 2024-12-09 23:34:13 -05:00
=
43fc89a966 mayastor 2024-12-09 02:08:04 -05:00
=
7aa2992228 talos wip 2024-12-08 01:36:48 -05:00
1775e24a45 add more dns records 2024-11-29 01:35:04 -05:00
d6983b4744 add syanpse mgmt playbooks 2024-10-30 01:52:00 -04:00
=
29cb12a2d1 tf dns wip 2024-10-26 16:48:20 +13:00
=
9464737fe9 cred test 2024-10-24 18:43:21 +13:00
=
14fc10a10a Cloudflare DNS via TF 2024-10-24 18:39:30 +13:00
fe38bebbd5 cloudflare dns 2024-10-23 00:22:32 -04:00
bad78681c6 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-16 21:52:18 +12:00
c8ab4633ca change qbittorrent data dir 2024-05-16 21:50:48 +12:00
627343b50f add nuc playbook 2024-05-16 21:35:45 +12:00
2d31a5524f resolve merge 2024-05-12 21:58:05 +12:00
2981bdb22f Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-12 21:57:52 +12:00
84930795b6 temp comment out roles 2024-05-12 21:55:37 +12:00
f068c9710b torrent working 2024-04-24 21:40:00 +12:00
afc0b57cfb add arr services
add music and subs nginx proxies
2024-04-23 18:03:25 +12:00
7df41b5c8d aur repo host complete 2024-04-23 15:47:14 +12:00
2cc78654fe custom remote repo working 2024-04-23 02:14:39 +12:00
a6eb508cf0 aur repo wip 2024-04-23 00:49:49 +12:00
85330c8645 aurutils install working 2024-04-22 21:46:14 +12:00
c05f3a845b certbot and nginx working 2024-04-22 01:37:46 +12:00
3d9241b475 kodi media services basic setup 2024-04-21 01:04:17 +12:00
cb4abe5722 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-04-20 22:24:47 +12:00
d0c1bb8717 start to add kodi ansible 2024-04-20 22:24:20 +12:00
5b83607fe0 attempted k8s resources as tf files, not worth the trouble 2024-04-20 02:10:01 +12:00
43dbb951fe add vultr block storage mount 2024-04-19 17:31:44 +12:00
f68c6b227a add vultr k8s 2024-04-18 13:28:39 +12:00
8d049f3056 opnsense b created 2024-04-17 04:06:14 +12:00
a0997ee8ec add floating ip assignments 2024-04-17 03:36:58 +12:00
b8c2dae1fa split resources into multiple tf files 2024-04-17 02:56:53 +12:00
c2f7590b44 Add hetzner terraform project 2024-04-17 02:37:58 +12:00
d8db6ba755 refine remote hwdb file 2024-04-13 18:21:03 +12:00
d6882bd306 update hwdb readme 2024-04-13 17:25:42 +12:00
f78dd67cd5 add conf dir and hwdb file 2024-04-13 16:49:48 +12:00
2aa50b4015 add sftp creds template 2024-04-05 15:09:50 +13:00
4dfe68a54b add mount-sftp script 2024-04-05 15:07:25 +13:00
bdf04302aa backup scripts 2024-02-10 23:12:35 +13:00
39cb2b0007 begin to add node backup 2024-02-03 01:24:47 +13:00
f10ce63569 adjust ups mon alerts 2024-01-12 12:54:24 +13:00
52c455d490 merged arch-install contents 2024-01-12 00:14:09 +13:00
c6755e8d97 nut and acme working 2024-01-11 18:15:16 +13:00
ba7cda511e organize playbooks into subdirs 2024-01-11 13:03:08 +13:00
7eddbba696 Add k8s shutdown/openup scripts
Add nut ansible roles
Add acme certificate ansible role
2024-01-11 01:11:16 +13:00
92df824252 nut wip 2024-01-10 02:05:03 +13:00
9e07845208 unifi working with db init 2024-01-05 02:41:50 +13:00
8d71ff222a add matrix reservation 2023-12-25 01:55:30 +13:00
117b36842c add transmission static ip
add internal v4 ingress service
2023-12-17 19:12:27 +13:00
2b2486f2fb add mail reservations 2023-12-08 15:29:55 +13:00
2e38d3d07f reservations, ingress service 2023-12-07 02:58:14 +13:00
af13cfbb41 new cluster wip 2023-11-29 02:02:52 +13:00
cd19a7687c house cluster wip 2023-11-27 03:31:14 +13:00
0923148d8e add packages 2023-11-22 00:10:42 +13:00
dda7bc7a10 add additional sc definition 2023-10-03 23:04:20 +13:00
9c477f2094 archinstall 2023-09-26 02:52:28 +13:00
e1349b2b90 add packages 2023-08-15 03:20:21 +10:00
cffbcaea8c sshd setup 2023-08-14 22:27:29 +10:00
e1fb6b94ee fix systemd templates 2023-08-13 14:03:03 +10:00
a2ec933cf8 refine systemd_networkd 2023-08-12 20:19:07 +10:00
24f3a7c485 install notes 2023-08-12 01:37:30 +10:00
f00093ef8e set mac address values 2023-08-11 14:52:41 +10:00
32ba17ea33 start building pacstrap list 2023-08-11 11:49:33 +10:00
8f22f5429a hypervisor refinement 2023-08-11 10:52:27 +10:00
2769a3430b hypervisor wip 2023-08-11 01:11:37 +10:00
84a20416e3 network and serial complete 2023-08-10 23:32:17 +10:00
621d9595f8 systemd-networkd templates 2023-08-10 21:29:39 +10:00
5f1e304301 jinja whitespace 2023-08-09 22:55:51 +10:00
df3587032d new hypervisor wip 2023-08-09 22:37:28 +10:00
5007f0b68e new hypervisor wip 2023-08-09 18:38:34 +10:00
590a50cd1a new hypervisor role 2023-08-09 15:49:23 +10:00
1df2adffdb fix user and from in notes 2023-04-18 14:13:00 +02:00
ddfccdfe96 add zfs event daemon notes 2023-04-17 22:06:00 +02:00
0c091aba7e add snapshot class 2023-03-23 23:28:02 +01:00
ef418f2839 k8s intel gpu files 2023-03-22 01:32:01 +01:00
ff0d769aa5 added calico ip reservations 2023-01-14 17:51:22 +10:00
e4c5846353 add hdd-zfs sc 2023-01-13 02:11:03 +10:00
afedcf16d5 merge fix 2023-01-12 15:55:53 +10:00
ff8e0581ec notes 2023-01-12 15:53:36 +10:00
ce3af85e73 changed service order 2023-01-12 13:06:45 +10:00
3fa49df87f added todo notes, enabled zfs-import.target 2023-01-12 12:49:42 +10:00
11115d515e move zfs_pools var 2023-01-11 15:19:15 +10:00
657ae3fa91 open ebs zfs 2023-01-11 10:59:26 +10:00
14a126afa0 zfs node work in prep for open-ebs zfs 2023-01-10 02:19:24 +10:00
5eb52e7adb resolve merge conflict 2023-01-05 17:46:06 +10:00
a86cb26010 cert issuer and ingress controller 2023-01-05 17:37:36 +10:00
292 changed files with 48056 additions and 286 deletions

49
.gitignore vendored
View File

@ -1,8 +1,41 @@
ansible/vault_password
ansible/inventory/host_vars/*/vault.yaml
ansible/roles/k8s_network/files/calico
ansible/roles/k8s_storage_rook/files/rook
ansible/roles/k8s_control/files/core-dns
ansible/roles/k8s_storage_ebs_manifests/files/ebs
.vscode
*/vault.yaml
# Local .terraform directories
**/.terraform/*
**/.terraform
.ansible/
.vscode/
ansible/collections/**
# registry password file
distribution/htpasswd
# .tfstate files
*.tfstate
*.tfstate.*
# Terraform lock file
**/.terraform.lock.hcl
# Terraform secrets file
**/secrets.auto.tfvars
# Crash log files
crash.log
crash.*.log
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
**/vault_password
**/vault.yaml
**/*secrets.yaml
**/*secret.yaml
.vscode/*

10
.sops.yaml Normal file
View File

@ -0,0 +1,10 @@
creation_rules:
- path_regex: (secret|secrets)\.(yml|yaml)$
unencrypted_regex: ^(apiVersion|kind|name|namespace|type)$
kms: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# kms:
# - arn: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# aws_profile: home
age: 'age1k5y5gj5fzpwtjgzqd4n93h4h9ek9jz8898rva5zsgj7zjet97ytq4dtzjs'
hc_vault_transit_uri: 'https://vault.balsillie.net:443/v1/sops/keys/krds'

27
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,27 @@
{
"yaml.schemas": {
"https://raw.githubusercontent.com/ansible/schemas/main/f/ansible.json": "file:///home/michael/Code/home/IaC/ansible/roles/vm_deploy/tasks/deploy.yaml",
"kubernetes://schema/storage.k8s.io/v1@storageclass": "file:///home/michael/Code/home/IaC/ansible/roles/k8s_storage_deploy/files/config/blockpool_ssd_replica.yaml"
},
"vs-kubernetes": {
"vs-kubernetes.namespace": "",
"disable-linters": ["resource-limits"],
"vs-kubernetes.kubectl-path": "",
"vs-kubernetes.helm-path": "",
"vs-kubernetes.minikube-path": "",
"vs-kubernetes.kubectlVersioning": "user-provided",
"vs-kubernetes.outputFormat": "yaml",
"vs-kubernetes.kubeconfig": "",
"vs-kubernetes.knownKubeconfigs": [],
"vs-kubernetes.autoCleanupOnDebugTerminate": false,
"vs-kubernetes.nodejs-autodetect-remote-root": true,
"vs-kubernetes.nodejs-remote-root": "",
"vs-kubernetes.nodejs-debug-port": 9229,
"vs-kubernetes.dotnet-vsdbg-path": "~/vsdbg/vsdbg",
"vs-kubernetes.local-tunnel-debug-provider": "",
"checkForMinikubeUpgrade": true,
"imageBuildTool": "Docker"
},
"ansible.python.interpreterPath": "/usr/bin/python"
}

View File

@ -1,32 +0,0 @@
The general idea is to bootstrap a bare metal host into a functioning kubernetes cluster.
These playbooks/roles in their current state will create all kubernetes nodes on a single host. This is for lab/testing/learning type scenarios.
With some adjustments though this could be used to provision multiple hypervisors, ideally with each running 2 VMs: a control-plane node and a worker node. If you've got the hardware or the cloud budget for that, then lucky you! :smile:
An outline of the steps, which are roughly broken up by playbook:
- [ ] Install Arch linux on the bare metal
- [x] Configure the bare metal Arch host as a hypervisor (qemu/kvm) - [Link](https://code.balsillie.net/michael/IaC/src/branch/master/ansible/playbooks/02_hypervisor.yaml)
- [ ] Install Arch linux into a VM on the hypervisor then convert it to a template.
- [x] Deploy 3 (or more) VMs from the template (uses backing store qcow images) - [Link](https://code.balsillie.net/michael/IaC/src/branch/master/ansible/playbooks/04_vm_deploy.yaml)
- [x] Create a kubernetes cluster from those 3 VMs - [Link](https://code.balsillie.net/michael/IaC/src/branch/master/ansible/playbooks/05_k8s_deploy.yaml)
- [x] Install calico networking into the cluster.
- [x] Remove the taint from control plane nodes. <-- Optional
- [x] Configure cluster storage using rook. <-- This didn't work due to hardware limitations (3 x VHDs on a single spinning HDD)
- [ ] Possible storage setup using [openEBS](https://openebs.io/docs/#quickstart-guides) zfs or device local PV
- [ ] Example PVC backups using one of [stash](https://stash.run/)/[velero](https://velero.io/)/[gemini](https://github.com/FairwindsOps/gemini) or other
- [ ] Deploy workloads into the cluster
What you don't see here is setup/configuration of an Opnsense VM to act as a firewall, this is too far off from being possible to automate.
Opnsense provides firewall, routing (including BGP peering to calico nodes), DNS and acts as a HA proxy load balancer to the kubernetes nodes. I'll add [notes](https://code.balsillie.net/michael/IaC/src/branch/master/notes/opnsense.md) at some point on how to configure opnsense but it's not something that can be done sensibly with ansible.
What you'll also need:
- Clone the git repo.
- Create a vault_password file (chmod 600) under the ansible directory.
- Ensure .gitignore is correctly setup so that vault_password doesn't get commited to source control.
- Create an ansible vault in your inventory directory tree to hold sensitive variables such as 'ansible_become_pass'. Again, .gitignore should ensure this vault file remains only on your workstation.
Check the defaults files for roles carefully. Variables are a scattered mess right now and need to be properly amalgamated.
Ansible roles were written to work on an Arch linux workstation, some tasks are intended to install packages to localhost (such as kubectl) and use pacman modules to do so. If you encounter problems with these steps, change those tasks to use your relevant package manager module, eg apt or yum.

View File

@ -1,11 +1,10 @@
[defaults]
inventory = ./inventory/inventory.yaml
inventory = ./inventory/
jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
library = modules
module_utils = module_utils
display_skipped_hosts = false
interpreter_python = auto_silent
collections_paths = ./collections
collections_path = ./collections
roles_path = ./roles
vault_password_file = ./vault_password

View File

@ -1,8 +0,0 @@
---
# connection
ansible_connection: ssh
ansible_become_method: sudo
ansible_become_user: root
ansible_port: 22

View File

@ -0,0 +1,6 @@
acme_certificate_csr_organization: Balsillie Family
acme_certificate_csr_locality: Queenstown
acme_certificate_csr_state: Otago
acme_certificate_csr_country: NZ
acme_certificate_csr_email: admin@balsillie.net
acme_certificate_directory: https://acme-v02.api.letsencrypt.org/directory

View File

@ -0,0 +1,3 @@
nut_client_admin_username: nut-admin
nut_client_primary_username: nut-primary
nut_client_secondary_username: nut-secondary

View File

@ -0,0 +1,3 @@
rfc2136_key_algorithm: hmac-sha256
rfc2136_key_name: rndc-house
rfc2136_server_address: 10.208.240.1

View File

@ -0,0 +1,23 @@
# code: language=ansible
aur_repo_packager_name: "Balsillie Family"
aur_repo_packager_email: "admin@balsillie.net"
aur_repo_dir: "/aur"
aur_repo_build_account: "aur-builder"
aur_repo_host_packages:
- pikaur
- jellyfin-media-player # If you get errors relating to icu, check 'icu' package version and perform a system update
- git-credential-keepassxc
- docker-credential-secretservice-bin
- ventoy-bin
- debtap
- aurutils
- ipmiview
- powershell-bin
- visual-studio-code-bin
- ttf-ms-fonts
- brave-bin
- teamviewer
- vmware-horizon-client

View File

@ -0,0 +1,25 @@
---
zfs_pools:
- name: ssd
ashift: 16
recordsize: 64k
type: ""
disks: /dev/vde
compression: "off"
datasets:
- name: ssd/data
encrypt: false
- name: ssd/data/open-ebs
encrypt: false
- name: hdd
ashift: 12
recordsize: 64k
type: mirror
disks: /dev/sda /dev/sdb
compression: "off"
datasets:
- name: hdd/data
encrypt: true
- name: hdd/data/open-ebs
encrypt: false

View File

@ -0,0 +1,17 @@
# code: language=ansible
# Connection (SSH)
ansible_connection: ansible.builtin.ssh
ansible_ssh_host: dev.balsillie.house
ansible_ssh_port: 22
ansible_ssh_host_key_checking: false
ansible_ssh_pipelining: false
ansible_ssh_user: ladmin
ansible_ssh_private_key_file: ~/.ssh/conf.d/home/dev.balsillie.house.key
# Become (sudo)
ansible_become_method: ansible.builtin.sudo
ansible_become_user: root
ansible_become_password: "{{ lookup('community.hashi_vault.vault_kv1_get', 'ansible/host_vars/dev.balsillie.house/ansible_connection').secret.ansible_become_password }}" # noqa yaml[line-length]

View File

@ -0,0 +1,17 @@
# code: language=ansible
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_cloudflare_api_token: "{{ lookup('community.hashi_vault.vault_kv1_get', 'cloudflare/balsillie.house/dns').secret.api_token }}" # noqa yaml[line-length]
certbot_dns_propagation_seconds: 15
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'cloudflare'
certbot_email: "certbot.dev@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- repo.balsillie.house

View File

@ -0,0 +1,9 @@
# code: language=ansible
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
nginx_user: "http"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.hv00@balsillie.email

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: hv00.balsillie.house
ansible_fqdn: hv00.balsillie.house
ansible_remote_addr: 10.192.110.100
ansible_port: 22
ansible_user: ladmin
# ansible_become_user: root
ansible_become_method: ansible.builtin.sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.hv00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -0,0 +1,5 @@
hypervisor:
storage: dir
qemu_bridges:
- br0

View File

@ -0,0 +1,17 @@
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
- name: unifi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: hv00.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9443
nginx_user: "http"

View File

@ -0,0 +1,38 @@
nut_client_local_server: true
nut_client_shutdown_cmd: /usr/bin/poweroff
nut_client_shutdown_exit: "true"
nut_client_hostsync: 240
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: primary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for HV00."
- name: LOWBATT
message: "UPS has reached low battery condition."
nut_client_notify_flags:
- name: LOWBATT
flags: SYSLOG+WALL+EXEC
- name: FSD
flags: SYSLOG+WALL+EXEC
- name: COMMOK
flags: SYSLOG+WALL+EXEC
- name: COMMBAD
flags: SYSLOG+WALL+EXEC
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC
- name: REPLBATT
flags: SYSLOG+WALL+EXEC
- name: NOCOMM
flags: SYSLOG+WALL+EXEC
- name: NOPARENT
flags: SYSLOG+WALL+EXEC
- name: BYPASS
flags: SYSLOG+WALL+EXEC
- name: NOTBYPASS
flags: SYSLOG+WALL+EXEC

View File

@ -0,0 +1,7 @@
nut_server_listen_address: 10.192.110.100
nut_server_listen_port: 3493
nut_server_certificate_file: /etc/ssl/private/hv00.balsillie.house.plain.combined.pem
nut_server_ups_devices:
- name: ups0
driver: usbhid-ups
port: auto

View File

@ -0,0 +1 @@
console_device: ttyS0

View File

@ -0,0 +1,16 @@
sshd:
config_path: home
auth:
pubkey: 'yes'
password: 'no'
empty: 'no'
listen:
port: '22'
family: inet
ipv4:
- '192.168.1.250'
- '10.192.110.100'
forwarding:
agent: 'no'
x11: 'no'
nickname: vault

View File

@ -0,0 +1,82 @@
systemd_networkd_configs:
- name: 00-eth0.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c3
- name: 00-eth1.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c4
- name: 00-eth2.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c5
- name: 00-wan.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c6
- name: 01-eth0.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c3
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 110
- 210
pvid: 210
- name: 01-eth1.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c4
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 210
pvid: 210
- name: 01-eth2.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c5
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 30
- 210
- 220
- 230
- name: 01-wan.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c6
arp: true
lldp: false
dhcp: true
- name: 10-br0.netdev
src: bridge.netdev.j2
vlan_filtering: true
stp: true
- name: 11-br0.network
src: bridge.network.j2
arp: false
dhcp: false
lldp: true
vlans:
- 110
- name: 20-vlan110.netdev
src: vlan.netdev.j2
vlan_id: 110
- name: 21-vlan110.network
src: vlan.network.j2
arp: true
lldp: true
dhcp: false
address:
ipv4:
- 10.192.110.100/24
gateway:
ipv4: 10.192.110.254
nameserver:
ipv4:
- 10.192.110.254

View File

@ -1,2 +0,0 @@
---
ansible_host: hv00.balsillie.net

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kodi00.balsillie.house
ansible_fqdn: kodi00.balsillie.house
ansible_remote_addr: 10.192.210.169
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: kodi00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -0,0 +1,81 @@
---
docker_users:
- ladmin
docker_networks:
- name: torrent
driver: bridge
driver_options:
# com.docker.network.bridge.name: docker-torrent
com.docker.network.bridge.enable_ip_masquerade: true
com.docker.network.bridge.enable_icc: true
# com.docker.network.container_iface_prefix: container-torrent
attachable: true
enable_ipv6: false
internal: false
ipam:
- subnet: 192.168.99.0/24
gateway: 192.168.99.254
docker_volumes:
- name: torrent-data
driver: local
driver_options:
type: none
device: /downloads
o: bind
- name: torrent-config
driver: local
driver_options:
type: none
device: /etc/qbittorrent
o: bind
docker_images:
- name: hotio/qbittorrent
tag: release
docker_containers:
- name: qbittorrent
image: hotio/qbittorrent:release
auto_remove: false
capabilities:
- NET_ADMIN
domainname: balsillie.house
env:
PUID: '968'
PGID: '968'
UMASK: '002'
TZ: Pacific/Auckland
WEBUI_PORTS: 8080/tcp
VPN_ENABLED: 'true'
VPN_CONF: 'wg0'
VPN_PROVIDER: 'proton'
VPN_LAN_NETWORK: ''
VPN_LAN_LEAK_ENABLED: 'false'
VPN_EXPOSE_PORTS_ON_LAN: ''
VPN_AUTO_PORT_FORWARD: 'true'
VPN_AUTO_PORT_FORWARD_TO_PORTS: ''
VPN_KEEP_LOCAL_DNS: 'false'
VPN_FIREWALL_TYPE: 'nftables'
VPN_HEALTHCHECK_ENABLED: 'true'
PRIVOXY_ENABLED: 'false'
UNBOUND_ENABLED: 'false'
etc_hosts:
tv.balsillie.house: 192.168.99.254
movies.balsillie.house: 192.168.99.254
hostname: torrent
networks:
- name: torrent
aliases:
- torrent
- qbittorrent
ipv4_address: 192.168.99.1
restart_policy: 'unless-stopped'
sysctls:
net.ipv4.conf.all.src_valid_mark: 1
net.ipv6.conf.all.disable_ipv6: 1
volumes:
- torrent-config:/config:rw
- torrent-data:/downloads:rw

View File

@ -0,0 +1,43 @@
nginx_sites:
- name: tv.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: movies.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 7878
- name: music.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8686
- name: subs.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 6767
- name: index.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9696
- name: torrent.balsillie.house
type: proxy
upstream:
host: 192.168.99.1
port: 8080
- name: jellyfin.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8096
- name: kodi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8082
nginx_user: "http"

View File

@ -0,0 +1,3 @@
---
sonarr_var: "sonarr_value"

View File

@ -0,0 +1,4 @@
sshd:
auth:
password: 'no'
pubkey: 'yes'

View File

@ -0,0 +1,7 @@
torrent_user: kodi
torrent_downloads_dir: /downloads
torrent_wireguard_address: 10.2.0.2
torrent_wireguard_dns: 10.2.0.1
torrent_wireguard_peer_endpoint: 103.75.11.18
torrent_wireguard_peer_public_key: 8Rm0uoG0H9BcSuA67/5gBv8tJgFZXNLm4sqEtkB9Nmw=

View File

@ -0,0 +1,21 @@
ufw_enabled: true
ufw_rules:
- name: "SSH from Local Subnet"
port: "22"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTP from Local Subnet"
port: "80"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTPS from Local Subnet"
port: "443"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.kube00@balsillie.email

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kube00.balsillie.house
ansible_fqdn: kube00.balsillie.house
ansible_remote_addr: 10.192.110.110
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,18 @@
nut_client_local_server: false
nut_client_shutdown_cmd: /scripts/shutdown.sh
nut_client_shutdown_exit: "false"
nut_client_hostsync: 15
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: secondary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for KUBE00."
nut_client_notify_flags:
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC

View File

@ -1,4 +0,0 @@
---
ansible_host: kube01.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGtk+mk1+J3sZ3CA/yS7XV2wH913IdJj0gznmb/nI2nV ladmin@kube01.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -1,4 +0,0 @@
---
ansible_host: kube02.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGOfsOJJJ34VT9mHv9moHQAQNTAok8sOr49rVTkIfDn9 ladmin@kube02.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -1,4 +0,0 @@
---
ansible_host: kube03.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINFqYq3CcxziLbWfp/0VpO5uD/HyjiKTXD8t/dAS01Oq ladmin@kube00.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -0,0 +1 @@
synapse_host_address: matrix.balsillie.net

View File

@ -0,0 +1,4 @@
ansible_connection: local
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo

View File

@ -0,0 +1,11 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- xmr.balsillie.house

View File

@ -1,64 +1,80 @@
all:
children:
aur_repo_hosts:
hosts:
dev.balsillie.house:
firewalls:
children:
opnsense:
hosts:
router.balsillie.house:
switches:
hosts:
sw00.balsillie.house:
waps:
hosts:
wap00.balsillie.house:
virtual_machines:
hosts:
fw00.balsillie.house:
win11.balsillie.house:
bare_metal:
hosts:
sw00.balsillie.house:
wap00.balsillie.house:
hv00.balsillie.house:
kube00.balsillie.house:
lat5420.balsillie.house:
lat7490.balsillie.house:
nuc.balsillie.house:
servers:
children:
hypervisors:
hosts:
server:
lab:
hv00:
vms:
hv00.balsillie.house: # vp2420
k8s:
children:
nodes:
k8s_control:
hosts:
node1:
node2:
node3:
kube00.balsillie.house:
k8s_worker:
hosts:
kube00.balsillie.net:
k8s_storage:
hosts:
kube00.balsillie.net:
nut_servers:
hosts:
router:
hetzner:
hosts:
hv00:
fw00:
k8s:
children:
k8s_control:
hv00.balsillie.house:
nut_clients:
hosts:
kube01:
# kube02:
# kube03:
k8s_taint:
hv00.balsillie.house:
kube00.balsillie.house:
nas:
hosts:
kube01:
# kube02:
# kube03:
k8s_worker:
hosts:
kube01:
# kube02:
# kube03:
k8s_storage:
hosts:
kube01:
# kube02:
# kube03:
firewalls:
children:
fortigate:
hosts:
fortigate00:
opnsense:
hosts:
fw00:
switches:
hosts:
sw00:
nas.balsillie.house:
workstations:
children:
arch:
hosts:
lat5420:
sff:
lat5420.balsillie.house:
sff.balsillie.house:
kodi00.balsillie.house:
nuc.balsillie.house:
windows:
hosts:
bridie:
lat7490.balsillie.house:
win11.balsillie.house:
laptops:
hosts:
lat5420.balsillie.house:
lat7490.balsillie.house:
desktops:
hosts:
sff.balsillie.house:
mp00.balsillie.house:
kodi00.balsillie.house:
nuc.balsillie.house:
kodi:
hosts:
kodi00.balsillie.house:

View File

@ -0,0 +1,10 @@
# code: language=ansible
- name: AUR Repo
hosts: aur_repo_hosts
become: true
gather_facts: true
roles:
# - certbot
- nginx
# - aur_repo_host

View File

@ -0,0 +1,10 @@
---
- name: Setup core home router
hosts:
- hv00.balsillie.house
gather_facts: true
become: true
roles:
# - role: aur_repo_host
- role: nginx

View File

@ -0,0 +1,15 @@
---
- name: Setup Kodi boxes
hosts:
- kodi00.balsillie.house
gather_facts: true
become: true
roles:
# - role: sshd
# - role: ufw
# - role: nginx
# - role: aur_repo_client
# - role: arr
- role: torrent
# - role: sonarr

View File

@ -0,0 +1,9 @@
---
- name: Setup NUC
hosts:
- nuc.balsillie.house
gather_facts: true
become: true
roles:
- role: certbot

View File

@ -0,0 +1,32 @@
- name: Install NUT
hosts:
- nut_servers
- nut_clients
become: true
gather_facts: true
tasks:
- name: Install NUT package on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: nut
state: latest
update_cache: true
- name: Setup NUT servers
gather_facts: false
hosts: nut_servers
become: true
roles:
- role: acme_certificate
acme_certificate_subject: "{{ ansible_host }}"
acme_certificate_zone: balsillie.house
acme_certificate_restart_services: ['nut-server.service']
- role: nut_server
- name: Setup NUT clients
gather_facts: false
hosts: nut_clients
become: true
roles:
- nut_client

View File

@ -0,0 +1 @@
../../roles/

View File

@ -20,19 +20,41 @@
# roles:
# - k8s_taint
# - name: configure storage operator
# - name: configure zfs storage on nodes
# hosts: k8s_storage
# gather_facts: true
# become: true
# roles:
# - zfs_repo_install
# - name: configure open-ebs storage operator
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_storage_ebs_deploy
# - k8s_storage_ebs_local_deploy
- name: configure ingress controller
- name: configure open-ebs zfs driver
hosts: localhost
gather_facts: false
become: false
roles:
- k8s_ingress_controller
- k8s_storage_ebs_zfs_deploy
# - name: configure smb storage provider
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_storage_smb_deploy
# - name: configure ingress controller
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_ingress_controller
# - name: configure cert manager
# hosts: localhost
@ -40,3 +62,5 @@
# become: false
# roles:
# - k8s_cert_manager

1
ansible/playbooks/k8s/roles Symbolic link
View File

@ -0,0 +1 @@
../../roles/

1
ansible/playbooks/roles Symbolic link
View File

@ -0,0 +1 @@
../roles

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room list
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: room_list
- name: Set empty_rooms fact
ansible.builtin.set_fact:
empty_rooms: "{{ room_list.json.rooms | selectattr('joined_local_members', '==', 0) | list }}"
- name: Debug empty room count
ansible.builtin.debug:
msg: "Total empty rooms to delete: {{ empty_rooms | length }}"
- name: Delete empty rooms
when: empty_rooms | length > 0
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room.room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
loop: "{{ empty_rooms }}"
loop_control:
loop_var: room
label: "{{ room.room_id }}"
register: purge_ids
- name: Write purge_ids to file
ansible.builtin.copy:
dest: "{{ playbook_dir }}/purge_ids_{{ now(utc=false, fmt='%Y-%m-%d_%H-%M-%S') }}.json"
content: "{{ purge_ids.results | map(attribute='json.delete_id') | list | to_nice_json }}"
mode: "0664"

View File

@ -0,0 +1,28 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to delete"
private: false
tasks:
- name: Delete room
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
register: purge_id
- name: Wait for purge to complete
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json.rooms | map(attribute='room_id') | list

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get large rooms
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/statistics/database/rooms"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: "purge_ids_file"
prompt: "Enter the file name containing the purge ids"
private: false
tasks:
- name: Load purge ids
ansible.builtin.slurp:
src: "{{ playbook_dir }}/{{ purge_ids_file }}"
register: purge_ids
- name: Set purge_ids_list fact
ansible.builtin.set_fact:
purge_ids_list: "{{ purge_ids.content | b64decode | from_json }}"
- name: Get purge status
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
loop: "{{ purge_ids_list }}"
register: purge_status
- name: Set purge_satus_totals
ansible.builtin.set_fact:
purge_status_shutting_down: "{{ purge_status.results | selectattr('json.status', '==', 'shutting_down') | list | length }}"
purge_status_purging: "{{ purge_status.results | selectattr('json.status', '==', 'purging') | list | length }}"
purge_status_complete: "{{ purge_status.results | selectattr('json.status', '==', 'complete') | list | length }}"
purge_status_failed: "{{ purge_status.results | selectattr('json.status', '==', 'failed') | list | length }}"
- name: Print status
ansible.builtin.debug:
msg: |
Shutting down: {{ purge_status_shutting_down }}
Purging: {{ purge_status_purging }}
Complete: {{ purge_status_complete }}
Failed: {{ purge_status_failed }}"

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Room members
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}/members"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,17 @@
---
- name: Configure Truenas
hosts: truenas
become: false
tasks:
- name: Install required packages
package:
name: "{{ item }}"
state: present
with_items:
- py37-ansible
- py37-pip
- py37-netifaces
- py37-netaddr
- py37-requests
- py37-yaml

View File

@ -0,0 +1,218 @@
---
- name: Install required python libraries system wide
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- python-cryptography
- python-dnspython
state: latest
update_cache: true
- name: Set certificate path facts
ansible.builtin.set_fact:
acme_certificate_certificate_path: "/etc/ssl/private/{{ acme_certificate_subject }}.pem"
acme_certificate_chain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.chain.pem"
acme_certificate_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.combined.pem"
acme_certificate_csr_path: "/etc/ssl/private/{{ acme_certificate_subject }}.csr"
acme_certificate_fullchain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.fullchain.pem"
acme_certificate_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.key"
acme_certificate_plain_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.combined.pem"
acme_certificate_plain_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.key"
- name: Create ACME account key directory
ansible.builtin.file:
group: root
mode: '0700'
owner: root
path: /etc/ssl/private/ACME
state: directory
- name: Create ACME account key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ acme_certificate_account_key_passphrase }}"
path: /etc/ssl/private/ACME/account.key
size: 4096
state: present
type: RSA
- name: Generate RSA private key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ ssl_passphrase }}"
path: "{{ acme_certificate_key_path }}"
size: 4096
state: present
type: RSA
register: genrsa_private_key
- name: Generate CSR
community.crypto.openssl_csr:
common_name: "{{ acme_certificate_subject }}"
country_name: "{{ acme_certificate_csr_country }}"
digest: sha256
email_address: "{{ acme_certificate_csr_email }}"
group: root
locality_name: "{{ acme_certificate_csr_locality }}"
mode: '0600'
organization_name: "{{ acme_certificate_csr_organization }}"
owner: root
path: "{{ acme_certificate_csr_path }}"
privatekey_passphrase: "{{ ssl_passphrase }}"
privatekey_path: "{{ acme_certificate_key_path }}"
state: present
state_or_province_name: "{{ acme_certificate_csr_state }}"
use_common_name_for_san: true
- name: Submit ACME certificate request
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
modify_account: true
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
register: challenge
- name: Debug ACME certificate challenge
ansible.builtin.debug:
var: challenge
- name: Proceed if challenge is changed
when:
- challenge is changed
- acme_certificate_subject in challenge.challenge_data
block:
- name: Answer ACME certificate challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: present
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
# zone: "{{ acme_certificate_zone }}"
register: nsupdate_result
- name: Debug nsupdate result
ansible.builtin.debug:
var: nsupdate_result
- name: Retrieve ACME certificate
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
data: "{{ challenge }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
- name: Cleanup ACME challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: absent
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
zone: "{{ acme_certificate_zone }}"
- name: Slurp fullchain contents
ansible.builtin.slurp:
src: "{{ acme_certificate_fullchain_path }}"
register: acme_certificate_fullchain_content
- name: Slurp private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_key_path }}"
register: acme_certificate_key_content
- name: Create combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Check if plain key file exists
ansible.builtin.stat:
path: "{{ acme_certificate_plain_key_path }}"
register: plain_key_file
- name: Create a plain text copy of the SSL private key # noqa: no-handler
when: |
genrsa_private_key.changed or
not plain_key_file.stat.exists
ansible.builtin.command:
cmd: openssl rsa -in {{ acme_certificate_key_path }} -passin pass:{{ ssl_passphrase }} -out {{ acme_certificate_plain_key_path }}
changed_when: true
- name: Slurp plain text private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_plain_key_path }}"
register: acme_certificate_key_content
- name: Create plain text combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_plain_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Dependant services block
when:
- (acme_certificate_restart_services | default([]) | length) >= 1
- challenge is changed
block:
- name: Check state of running services
ansible.builtin.service_facts:
- name: Restart dependant services
when:
- ansible_facts.services[item] is defined
- ansible_facts.services[item].state in ['running','failed']
ansible.builtin.service:
name: "{{ item }}"
state: restarted
loop: "{{ acme_certificate_restart_services }}"

View File

@ -0,0 +1,2 @@
{{ acme_certificate_fullchain_content['content'] | b64decode }}
{{ acme_certificate_key_content['content'] | b64decode }}

View File

@ -0,0 +1,49 @@
---
iso_source:
ntp_servers:
- time.example.com
pacstrap:
server: # Select from https://geo.mirror.pkgbuild.com/iso/latest/arch/pkglist.x86_64.txt
base
linux-lts
linux-firmware
intel-ucode
e2fsprogs
dosfstools
exfatprogs
nftables
openssh
ufw
nano
man-db
man-pages
texinfo
curl
which
usbutils
tzdata
tpm2-tss
tar
sudo
smartmontools
shadow
sed
screen
reflector
pv
pinentry
pciutils
parted
openssl
nbd
kmod
bash
bind
ca-certificates
ca-certificates-mozilla
ca-certificates-utils
efibootmgr
grep
mdadm
lvm2

View File

@ -0,0 +1,136 @@
---
- name: attach installation iso as virtual media
- name: boot from installation iso
- name: detect booted ip address
- name: configure disks
# Specify root disk and part, set to type 23 (linux root x86-64), label root
# Specify efi disk and part, set to type 1 (efi system), label efi
# format efi partition
# mkfs.fat -F32 /dev/mmcblk0p1
# Ecrypt root partition
# cryptsetup -y -v luksFormat /dev/sda1 # TODO add keyfile/password automatically
# cryptsetup open /dev/sda1 root
# mkfs.ext4 /dev/mapper/root
# mkdir /mnt/root
# mount /dev/mapper/root /mnt/root
# mkdir /mnt/root/efi
# mount /dev/mmcblk0p1 /mnt/root/efi
# Add cryptsetup params to kernel cmdline
# cryptdevice=UUID=device-UUID:root root=/dev/mapper/root rw
# add efi to /etc/fstab
# mkdir /mnt/mountpoint/etc
# sudo genfstab -L /mnt/mountpoint >> /mnt/mountpoint/etc/fstab
- name: sync ntp
# timedatectl set-timezone Australia/Brisbane
# timedatectl set-ntp true
# run reflector to get a list of mirrors
# relfector -c AU --save /etc/pacman.d/mirrorlist
# update dbs
# pacman -Sy
# pacstrap
# pacstrap -K /mnt/root base linux-lts linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool qemu-guest-agent arch-install-scripts lsof
# desktop
# pacstrap -K /mnt base linux linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool arch-install-scripts lsof btrfs-progs plasma-meta plasma-wayland-session kde-system dolphin-plugins
# gen fstab
# genfstab -L /mnt/root >> /mnt/root/etc/fstab
#
# chroot from here
#
# set hostname
# echo hv00 > /etc/hostname
# TODO add entries to /etc/hosts
# 127.0.0.1 localhost
# ::1 localhost
# 127.0.1.1 static_fqdn
# link timezone
# ln -sf /usr/share/zoneinfo/Australia/Brisbane /etc/localtime
# enable ntp again
# timedatectl set-ntp true # TODO move this post reboot
# sync hardware clock
# hwclock --systohc
# set locale
# sed -i 's/#en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/g' /etc/locale.gen
# locale-gen
# echo LANG=en_US.UTF-8 > /etc/locale.conf
# uncomment wheel group in /etc/sudoers
# sed -i 's/# %wheel ALL=(ALL:ALL) ALL/%wheel ALL=(ALL:ALL) ALL/g' /etc/sudoers
# add user
# useradd -u 1000 -U -m -b /home/ -G wheel -s /bin/bash ladmin
# set new user password
# disable root user
# passwd -l root
# usermod -s /sbin/nologin root
# create /etc/kernel/cmdline file
# the uuids are the DISK uuids from /dev/disk/by-uuid, NOT the partuuids
# echo 'cryptdevice=dbbb9fb2-5509-4701-a2bb-5660934a5378:root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# for sd-encrypt hook
# echo 'rd.luks.name=dbbb9fb2-5509-4701-a2bb-5660934a5378=root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# create a default systemd-networkd config
# enable systemd-networkd
# enable sshd
# enable ufw service
# enable ufw firewall
# create ufw config to allow ssh port 22
# modify mkinitcpio presets
# template file?
# output to default efi path ESP/efi/boot/bootx64.efi
# modify mkinitcpio.conf for encryption
# old HOOKS=(base udev autodetect modconf kms keyboard keymap consolefont block filesystems fsck)
# new HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# sed -i 's/^HOOKS=(base udev autodetect modconf block filesystems keyboard fsck)/HOOKS=(base udev autodetect modconf block encrypt filesystems keyboard fsck)/g' /etc/mkinitcpio.conf
# geneate sb keys with sbctl
# keys go to /usr/share/secureboot/keys/db/db.pem
# enroll sbctl keys
# add console= option to cmdline file
# create initcpio post hook /etc/initcpio/post/uki-sbsign
# make /etc/initcpio/post/uki-sbsign executable
# chmod +x /etc/initcpio/post/uki-sbsign
# make initcpio
# mkinitcpio -p linux-lts
# vfio and iommu
# add 'intel_iommu=on iommu=pt' to kernel cmdline
# add vfio binding
# vp2420 iGPU = 8086:4555
# add vfio-pci ids to /etc/kernel/cmdline
# vfio-pci.ids=8086:4555
# add vfio modules to mkinitcpio.conf
# MODULES=(vfio_pci vfio vfio_iommu_type1)
# ensure modconf hook is in mkinitcpio.conf
# HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# efibootmgr NO BACKSLASH ON A ROOT FILE
# efibootmgr -c -d /dev/nvme0n1 -p 1 -L "Arch Linux" -l "archlinux.efi"

View File

@ -0,0 +1,24 @@
---
- name: Install arr packages
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: "{{ arr_packages }}"
state: present
update_cache: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Start arr services
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- sonarr.service
- radarr.service
- lidarr.service
- prowlarr.service
- bazarr.service

View File

@ -0,0 +1,6 @@
arr_packages:
- sonarr
- radarr
- lidarr
- bazarr
- prowlarr

View File

@ -0,0 +1,50 @@
---
- name: Check if repo public key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- --list-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
register: repo_key_check
failed_when: repo_key_check.rc not in [0, 1]
changed_when: false
- name: Add repo public key to pacman keyring
when: repo_key_check.rc == 1
block:
- name: Import the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --recv-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
- --keyserver
- "{{ aur_repo_client_keyserver }}"
changed_when: true
- name: Trust the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_client_public_key_fingerprint }}"
changed_when: true
- name: Add home repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[{{ aur_repo_client_repo_name }}]
SigLevel = Required TrustedOnly
Server = {{ aur_repo_client_repo_address }}
create: false
state: present
insertafter: EOF
register: add_pacman_repo
- name: Update pacman database # noqa: no-handler
when: add_pacman_repo.changed
community.general.pacman:
update_cache: true

View File

@ -0,0 +1,6 @@
---
aur_repo_client_repo_name: "home"
aur_repo_client_repo_address: "https://repo.balsillie.house"
aur_repo_client_public_key_fingerprint: DB529158B99DD8311D78CA2FBE6003C744F56EE2
aur_repo_client_keyserver: hkps://keyserver.ubuntu.com

View File

@ -0,0 +1,12 @@
[Unit]
Description=Sync AUR packages
Wants=aur-sync.timer
[Service]
Type=oneshot
ExecStart=/usr/bin/aur sync --no-view --upgrades --no-confirm --clean --rm-deps --sign --database home
User=aur-builder
Group=aur-builder
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,12 @@
[Unit]
Description=Timer that runs aur sync service
Requires=aur-sync.service
[Timer]
Unit=aur-sync.service
OnCalendar=*-*-* 16:00:00
RandomizedDelaySec=120
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,270 @@
---
- name: Lookup aur_repo_host secret
delegate_to: localhost
become: false
community.hashi_vault.vault_kv1_get:
path: ansible/group_vars/aur_repo_hosts
register: aur_repo_host_secret
- name: Set aur_repo facts
ansible.builtin.set_fact:
aur_repo_private_key: "{{ aur_repo_host_secret.secret.aur_repo_private_key }}"
aur_repo_key_thumbprint: "{{ aur_repo_host_secret.secret.aur_repo_key_thumbprint }}"
- name: Create the makepkg drop-in config file
ansible.builtin.template:
dest: /etc/makepkg.conf.d/makepkg.conf
src: makepkg.conf.j2
owner: root
group: root
mode: "0644"
- name: Create the build user group
ansible.builtin.group:
name: "{{ aur_repo_build_account }}"
system: true
state: present
- name: Create the build user
ansible.builtin.user:
name: "{{ aur_repo_build_account }}"
password: '!'
group: "{{ aur_repo_build_account }}"
comment: "AUR Package Builder"
shell: /sbin/nologin
home: "{{ aur_repo_dir }}"
createhome: true
system: true
state: present
- name: Create the build user sudoer file
ansible.builtin.template:
dest: /etc/sudoers.d/{{ aur_repo_build_account }}
src: aur-sudoer.j2
owner: root
group: root
mode: "0640"
- name: Create the build dirs
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0775"
loop:
- "{{ aur_repo_dir }}"
- "{{ aur_repo_dir }}/packages"
- "{{ aur_repo_dir }}/sources"
- "{{ aur_repo_dir }}/srcpackages"
- /var/log/makepkg
- /tmp/build
- name: Check if the signing key is in build user's keyring
ansible.builtin.command:
cmd: gpg2 --list-secret-key --with-colons {{ aur_repo_key_thumbprint }}
failed_when: key_result.rc not in [0, 2]
changed_when: false
register: key_result
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: GPG key import block
when: key_result.rc == 2
block:
- name: Template out the signing private key
ansible.builtin.template:
dest: "/tmp/build/signing_key.asc"
src: signing_key.asc.j2
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0600"
- name: Import the signing key
ansible.builtin.command:
cmd: gpg2 --import /tmp/build/signing_key.asc
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Delete the signing key
ansible.builtin.file:
path: "/tmp/build/signing_key.asc"
state: absent
- name: Check if aurutils is already installed
ansible.builtin.stat:
follow: true
path: /usr/bin/aur
register: aurutils_stat
- name: Aurutils install block
when: not aurutils_stat.stat.exists
block:
- name: Install makepkg dependencies
community.general.pacman:
name:
- git
- base-devel
state: present
update_cache: true
- name: Clone aurutils
ansible.builtin.git:
depth: 1
dest: /tmp/aurutils
repo: https://aur.archlinux.org/aurutils.git
single_branch: true
version: master
- name: Slurp PKGBUILD contents
ansible.builtin.slurp:
path: /tmp/aurutils/PKGBUILD
register: aurutils_pkgbuild
- name: Parse PKGBUILD into facts
ansible.builtin.set_fact:
aurutils_dependencies: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^depends=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') | split(' ') }}" # noqa: yaml[line-length]
aurutils_pkgver: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgver=).*(?=$)', multiline=True) }}"
aurutils_pkgrel: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgrel=).*(?=$)', multiline=True) }}"
aurutils_arch: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^arch=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') }}"
- name: Install aurutils dependencies
community.general.pacman:
name: "{{ aurutils_dependencies }}"
state: present
reason: dependency
update_cache: false
- name: Build aurutils
ansible.builtin.command:
cmd: makepkg
chdir: /tmp/aurutils
creates: "{{ aur_repo_dir }}/packages/aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar"
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Update repo database
ansible.builtin.command:
argv:
- repo-add
- --prevent-downgrade
- --remove
- --sign
- --key
- "{{ aur_repo_key_thumbprint }}"
- home.db.tar
- aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar
chdir: "{{ aur_repo_dir }}/packages"
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Check if the signing key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- -l
- "{{ aur_repo_key_thumbprint }}"
failed_when: pacman_key_result.rc not in [0, 1]
changed_when: false
register: pacman_key_result
- name: Pacman key import block
when: pacman_key_result.rc == 1
block:
- name: Import the signing public key to arch keyring
ansible.builtin.command:
argv:
- pacman-key
- -r
- "{{ aur_repo_key_thumbprint }}"
- --keyserver
- hkps://keyserver.ubuntu.com
changed_when: true
- name: Locally sign the imported pacman key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_key_thumbprint }}"
changed_when: true
- name: Add custom repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[home]
SigLevel = Required TrustedOnly
Server = file://{{ aur_repo_dir }}/packages
create: false
state: present
insertafter: EOF
- name: Install aurutils
community.general.pacman:
name: aurutils
state: present
update_cache: true
# - name: Enable the multilib repository
# ansible.builtin.replace:
# path: /etc/pacman.conf
# backup: true
# regexp: '^[#]?\[multilib\]\n[#]?Include = \/etc\/pacman.d\/mirrorlist$'
# replace: '[multilib]\nInclude = /etc/pacman.d/mirrorlist'
# register: multilib_enable
# - name: Update the package database if multilib was enabled # noqa: no-handler
# when: multilib_enable.changed | default(false)
# community.general.pacman:
# update_cache: true
- name: Sync AUR packages
ansible.builtin.command:
cmd: aur sync --no-view -CnrS {{ item }}
loop: "{{ aur_repo_host_packages }}"
register: aur_sync_result
changed_when: (aur_sync_result.stderr_lines | last | replace(':','')) != "sync there is nothing to do"
failed_when: aur_sync_result.rc != 0
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Add the root www folder if it doesn't exist
ansible.builtin.file:
path: /var/www
state: directory
owner: http
group: http
mode: "0775"
- name: Link the aur repo to the web root
ansible.builtin.file:
src: "{{ aur_repo_dir }}/packages"
path: /var/www{{ aur_repo_dir }}
state: link
- name: Add the aur-sync systemd unit files
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/systemd/system/
owner: root
group: root
mode: "0644"
loop:
- aur-sync.service
- aur-sync.timer
register: aur_sync_unit_files
- name: Enable and start the aur-sync systemd timer # noqa: no-handler
when: aur_sync_unit_files.changed
ansible.builtin.systemd:
name: aur-sync.timer
enabled: true
state: started
daemon_reload: true

View File

@ -0,0 +1 @@
{{ aur_repo_build_account }} ALL = (root) NOPASSWD: /usr/bin/pacman, /usr/bin/pacsync

View File

@ -0,0 +1,21 @@
# Global Options
OPTIONS=(strip docs !libtool !staticlibs emptydirs zipman purge debug lto autodeps)
MAKEFLAGS="-j{{ (ansible_processor_nproc - 1) }}"
PACKAGER="{{ aur_repo_packager_name }} <{{ aur_repo_packager_email }}>"
# Build Environment
BUILDDIR=/tmp/build
BUILDENV=(!distcc color !ccache check sign)
GPGKEY={{ aur_repo_key_thumbprint }}
# Outputs
PKGDEST={{ aur_repo_dir }}/packages
SRCDEST={{ aur_repo_dir }}/sources
SRCPKGDEST={{ aur_repo_dir }}/srcpackages
LOGDEST=/var/log/makepkg
PKGEXT=".pkg.tar"
SRCEXT=".src.tar"

View File

@ -0,0 +1 @@
{{ aur_repo_private_key }}

View File

@ -1,2 +0,0 @@
---
iso_source:

View File

@ -1,11 +0,0 @@
---
- name: attach installation iso as virtual media
- name: boot from installation iso
- name: detect booted ip address
- name: configure disks
- name: sync ntp

View File

@ -0,0 +1,6 @@
---
- name: Restart nginx
ansible.builtin.service:
name: nginx.service
state: restarted

View File

@ -0,0 +1,67 @@
- name: Install certbot package (Archlinux)
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- certbot
- certbot-dns-{{ certbot_dns_plugin }}
state: present
update_cache: true
- name: Install certbot webserver plugin (Archlinux)
when:
- ansible_facts['os_family'] == "Archlinux"
- certbot_webserver_type == 'nginx'
community.general.pacman:
name:
- certbot-nginx
state: present
update_cache: true
- name: Template out the rfc2136 credentials file
when: certbot_dns_plugin == 'rfc2136'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out cloudflare credentials file
when: certbot_dns_plugin == 'cloudflare'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out the certbot default config
ansible.builtin.template:
src: cli.ini.j2
dest: /etc/letsencrypt/cli.ini
owner: root
group: root
mode: '0644'
- name: Request and install certificates
ansible.builtin.command:
argv:
- certbot
- certonly
- -n
- --dns-{{ certbot_dns_plugin }}
- --dns-{{ certbot_dns_plugin }}-credentials
- /etc/letsencrypt/{{ certbot_dns_plugin }}.conf
- --dns-{{ certbot_dns_plugin }}-propagation-seconds
- "{{ certbot_dns_propagation_seconds | default(10) }}"
- -d
- "{{ item }}"
creates: /etc/letsencrypt/live/{{ item }}/fullchain.pem
loop: "{{ certbot_domains }}"
notify: "{{ certbot_notify | default(omit) }}"
- name: Enable certbot renewal
ansible.builtin.service:
name: certbot-renew.timer
state: started
enabled: true

View File

@ -0,0 +1,3 @@
rsa-key-size = 4096
email = {{ certbot_email }}
agree-tos = true

View File

@ -0,0 +1 @@
dns_cloudflare_api_token = {{ certbot_cloudflare_api_token }}

View File

@ -0,0 +1,6 @@
dns_rfc2136_server = {{ certbot_rfc2136_server }}
dns_rfc2136_port = {{ certbot_rfc2136_port | default(53) }}
dns_rfc2136_name = {{ certbot_rfc2136_key_name }}
dns_rfc2136_secret = {{ certbot_rfc2136_key_secret }}
dns_rfc2136_algorithm = {{ certbot_rfc2136_key_algorithm | upper }}
dns_rfc2136_sign_query = true

View File

@ -0,0 +1,82 @@
---
- name: Install Docker on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: docker
state: present
update_cache: true
- name: Add users to docker group
ansible.builtin.user:
name: "{{ item }}"
groups: docker
append: true
loop: "{{ docker_users }}"
- name: Start and enable Docker
ansible.builtin.systemd:
name: docker
state: started
enabled: true
- name: Create Docker networks
when:
- docker_networks is defined
- docker_networks | length > 0
community.docker.docker_network:
attachable: "{{ item.attachable | default(true) }}"
driver: "{{ item.driver | default('bridge') }}"
driver_options: "{{ item.driver_options | default(omit) }}"
enable_ipv6: "{{ item.enable_ipv6 | default(false) }}"
internal: "{{ item.internal | default(false) }}"
ipam_config: "{{ item.ipam | default(omit) }}"
name: "{{ item.name }}"
state: "present"
loop: "{{ docker_networks }}"
- name: Create Docker volumes
when:
- docker_volumes is defined
- docker_volumes | length > 0
community.general.docker_volume:
driver: "{{ item.driver | default('local') }}"
driver_options: "{{ item.driver_options | default({}) }}"
recreate: "never"
state: "present"
volume_name: "{{ item.name }}"
loop: "{{ docker_volumes }}"
- name: Pull Docker images
when:
- docker_images is defined
- docker_images | length > 0
community.docker.docker_image_pull:
name: "{{ item.name }}"
pull: "always"
tag: "{{ item.tag | default('latest') }}"
loop: "{{ docker_images }}"
- name: Create Docker containers
when:
- docker_containers is defined
- docker_containers | length > 0
community.general.docker_container:
auto_remove: "{{ item.auto_remove | default(false) }}"
capabilities: "{{ item.capabilities | default(omit) }}"
command: "{{ item.command | default(omit) }}"
detach: true
domainname: "{{ item.domainname | default(omit) }}"
entrypoint: "{{ item.entrypoint | default(omit) }}"
env: "{{ item.env | default({}) }}"
etc_hosts: "{{ item.etc_hosts | default({}) }}"
hostname: "{{ item.hostname | default(item.name) }}"
image: "{{ item.image }}"
name: "{{ item.name }}"
networks: "{{ item.networks | default(omit) }}"
published_ports: "{{ item.ports | default([]) }}"
restart_policy: "{{ item.restart_policy | default('unless_stopped') }}"
state: 'started'
sysctls: "{{ item.sysctls | default({}) }}"
volumes: "{{ item.volumes | default([]) }}"
loop: "{{ docker_containers }}"

View File

@ -0,0 +1,20 @@
libvirt_packages:
Archlinux:
- qemu-base
- openbsd-netcat
- swtpm
- gettext
- libvirt
- libvirt-python
- python-lxml
hypervisor:
storage: dir
device: /dev/sdb
# hypervisor:
# storage: zfs
# datasets:
# - name: tank/vhds
# compression: lz4
# encryption: 'off'

View File

@ -0,0 +1,35 @@
---
- name: Create the libvirt storage directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: libvirt-qemu
group: libvirt-qemu
mode: '0775'
loop:
- /var/lib/libvirt/vhds/
- name: Define additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'dir_libvirt_pool.xml.j2') }}"
loop:
- name: vhds
path: /var/lib/libvirt/vhds/
- name: Create additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
command: build
loop:
- vhds
- name: Start additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
state: active
autostart: true
loop:
- vhds

View File

@ -0,0 +1,91 @@
---
- name: Configure disk partition
community.general.parted:
align: optimal
device: "{{ hypervisor.device }}"
fs_type: ext4
label: gpt
name: libvirt
number: 1
part_end: 100%
part_start: 0%
state: present
# TODO disk encryption
- name: Format filesystem
community.general.filesystem:
device: "{{ hypervisor.device }}1"
fstype: ext4
resizefs: true
state: present
- name: Get list of services
ansible.builtin.service_facts:
- name: Stop the libvirt services
when: item in ansible_facts.services
ansible.builtin.service:
name: "{{ item }}"
state: stopped
loop:
- libvirtd.service
- name: Check if libvirt storage directory exists
ansible.builtin.stat:
path: /var/lib/libvirt/
register: libvirt_storage
- name: Temp mount and copy block
when: libvirt_storage.stat.exists
block:
- name: Temporarily mount hypervisor storage
ansible.posix.mount:
path: /mnt/libvirt_temp/
src: "{{ hypervisor.device }}1"
fstype: ext4
state: mounted
boot: false
- name: Copy libvirt contents to hypervisor storage
ansible.builtin.copy:
src: /var/lib/libvirt/
dest: /mnt/libvirt_temp/
remote_src: true
mode: preserve
- name: Remove existing libvirt storage
ansible.builtin.file:
path: /var/lib/libvirt/
state: "{{ item }}"
owner: root
group: root
mode: '0775'
loop:
- absent
- directory
always:
- name: Unmount from temporary mount point
ansible.posix.mount:
path: /mnt/libvirt_temp/
state: absent
- name: Mount hypervisor storage
ansible.posix.mount:
path: /var/lib/libvirt/
src: "{{ hypervisor.device }}1"
fstype: ext4
state: mounted
boot: true
- name: Start the libvirt service
when: item in ansible_facts.services
ansible.builtin.service:
name: "{{ item }}"
state: started
loop:
- libvirtd.service

View File

@ -0,0 +1,40 @@
---
- name: Create libvirt zfs dataset(s)
community.general.zfs:
name: "{{ item.name }}"
state: present
extra_zfs_properties: # TODO fix property values
canmount: false
mountpoint: none
compression: false
primarycache: metadata
secondarycache: none
reservation: none
refreservation: none
dedup: false
encryption: "{{ item.encryption | default('off') }}"
volmode: dev
devices: false
atime: false
loop: "{{ hypervisor.datasets }}"
- name: Define additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
command: define
xml: "{{ lookup('template', 'zfs_libvirt_pool.xml.j2') }}"
loop: "{{ hypervisor.datasets }}"
- name: Create additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
command: build
loop: "{{ hypervisor.datasets }}"
- name: Start additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
state: active
autostart: true
loop: "{{ hypervisor.datasets }}"

Some files were not shown because too many files have changed in this diff Show More