Compare commits

...

216 Commits

Author SHA1 Message Date
=
230d830612 enable nginx dev 2025-04-05 02:38:17 -04:00
=
f843c7eaa3 certbot for dev 2025-04-05 02:12:24 -04:00
=
076757e1f8 troubleshoot build 2025-04-05 01:08:32 -04:00
=
0b1c18a3a0 troubleshoot build 2025-04-05 00:15:34 -04:00
=
f47ad625da troubleshoot build 2025-04-05 00:13:16 -04:00
=
2b56d30666 aur builder become changes 2025-04-05 00:07:14 -04:00
=
c12dfc18ce no become for delegated task 2025-04-04 23:43:36 -04:00
=
814a642cc0 delegate secret lookup 2025-04-04 23:42:51 -04:00
=
8cad395e34 aur repo host secret lookup 2025-04-04 23:41:35 -04:00
=
eb360951a1 aur repo host vars 2025-04-04 23:24:58 -04:00
=
9601aa4937 hashi vault lookups 2025-04-04 23:08:01 -04:00
=
81319370b1 wip 2025-03-13 08:59:00 -04:00
=
76f6f78112 add backup, readarr 2025-01-20 16:31:17 -05:00
=
9b0edab903 add jellyfin to media compose 2025-01-14 14:18:24 -05:00
=
c377f1a7d1 add prowlarr and sonarr 2025-01-12 14:18:23 -05:00
=
7f5a35d936 add radarr service 2025-01-12 00:03:26 -05:00
=
7b9f0e0ca5 add gallery cname 2025-01-04 01:20:36 -05:00
=
a490e4ad92 unifi nearly done 2024-12-30 22:35:25 -05:00
=
6734d78bef adjust nginx cert 2024-12-30 20:09:09 -05:00
=
6722ab4138 Syncthing working on truenas 2024-12-30 20:08:30 -05:00
=
e76d1a1f88 more apps 2024-12-30 01:18:40 -05:00
=
c090cc9cbe add minio to truenas 2024-12-25 01:04:30 -05:00
=
8ab3783a2b Add sops config 2024-12-23 23:52:05 -05:00
=
cdf20ba9ef Add sops kms keys 2024-12-23 18:35:04 -05:00
=
f0b3388e8d aws kms 2024-12-21 01:26:55 -05:00
=
27e2fc6058 truenas 2024-12-21 00:17:04 -05:00
=
b622bb29df matrix dns 2024-12-20 00:58:24 -05:00
=
bde6a5f208 dns and talos 2024-12-18 15:42:45 -05:00
85d6fe5056 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-12-09 23:36:16 -05:00
=
098f63fa5b talos 2024-12-09 23:34:13 -05:00
=
43fc89a966 mayastor 2024-12-09 02:08:04 -05:00
=
7aa2992228 talos wip 2024-12-08 01:36:48 -05:00
1775e24a45 add more dns records 2024-11-29 01:35:04 -05:00
d6983b4744 add syanpse mgmt playbooks 2024-10-30 01:52:00 -04:00
=
29cb12a2d1 tf dns wip 2024-10-26 16:48:20 +13:00
=
9464737fe9 cred test 2024-10-24 18:43:21 +13:00
=
14fc10a10a Cloudflare DNS via TF 2024-10-24 18:39:30 +13:00
fe38bebbd5 cloudflare dns 2024-10-23 00:22:32 -04:00
bad78681c6 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-16 21:52:18 +12:00
c8ab4633ca change qbittorrent data dir 2024-05-16 21:50:48 +12:00
627343b50f add nuc playbook 2024-05-16 21:35:45 +12:00
2d31a5524f resolve merge 2024-05-12 21:58:05 +12:00
2981bdb22f Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-12 21:57:52 +12:00
84930795b6 temp comment out roles 2024-05-12 21:55:37 +12:00
f068c9710b torrent working 2024-04-24 21:40:00 +12:00
afc0b57cfb add arr services
add music and subs nginx proxies
2024-04-23 18:03:25 +12:00
7df41b5c8d aur repo host complete 2024-04-23 15:47:14 +12:00
2cc78654fe custom remote repo working 2024-04-23 02:14:39 +12:00
a6eb508cf0 aur repo wip 2024-04-23 00:49:49 +12:00
85330c8645 aurutils install working 2024-04-22 21:46:14 +12:00
c05f3a845b certbot and nginx working 2024-04-22 01:37:46 +12:00
3d9241b475 kodi media services basic setup 2024-04-21 01:04:17 +12:00
cb4abe5722 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-04-20 22:24:47 +12:00
d0c1bb8717 start to add kodi ansible 2024-04-20 22:24:20 +12:00
5b83607fe0 attempted k8s resources as tf files, not worth the trouble 2024-04-20 02:10:01 +12:00
43dbb951fe add vultr block storage mount 2024-04-19 17:31:44 +12:00
f68c6b227a add vultr k8s 2024-04-18 13:28:39 +12:00
8d049f3056 opnsense b created 2024-04-17 04:06:14 +12:00
a0997ee8ec add floating ip assignments 2024-04-17 03:36:58 +12:00
b8c2dae1fa split resources into multiple tf files 2024-04-17 02:56:53 +12:00
c2f7590b44 Add hetzner terraform project 2024-04-17 02:37:58 +12:00
d8db6ba755 refine remote hwdb file 2024-04-13 18:21:03 +12:00
d6882bd306 update hwdb readme 2024-04-13 17:25:42 +12:00
f78dd67cd5 add conf dir and hwdb file 2024-04-13 16:49:48 +12:00
2aa50b4015 add sftp creds template 2024-04-05 15:09:50 +13:00
4dfe68a54b add mount-sftp script 2024-04-05 15:07:25 +13:00
bdf04302aa backup scripts 2024-02-10 23:12:35 +13:00
39cb2b0007 begin to add node backup 2024-02-03 01:24:47 +13:00
f10ce63569 adjust ups mon alerts 2024-01-12 12:54:24 +13:00
52c455d490 merged arch-install contents 2024-01-12 00:14:09 +13:00
c6755e8d97 nut and acme working 2024-01-11 18:15:16 +13:00
ba7cda511e organize playbooks into subdirs 2024-01-11 13:03:08 +13:00
7eddbba696 Add k8s shutdown/openup scripts
Add nut ansible roles
Add acme certificate ansible role
2024-01-11 01:11:16 +13:00
92df824252 nut wip 2024-01-10 02:05:03 +13:00
9e07845208 unifi working with db init 2024-01-05 02:41:50 +13:00
8d71ff222a add matrix reservation 2023-12-25 01:55:30 +13:00
117b36842c add transmission static ip
add internal v4 ingress service
2023-12-17 19:12:27 +13:00
2b2486f2fb add mail reservations 2023-12-08 15:29:55 +13:00
2e38d3d07f reservations, ingress service 2023-12-07 02:58:14 +13:00
af13cfbb41 new cluster wip 2023-11-29 02:02:52 +13:00
cd19a7687c house cluster wip 2023-11-27 03:31:14 +13:00
0923148d8e add packages 2023-11-22 00:10:42 +13:00
dda7bc7a10 add additional sc definition 2023-10-03 23:04:20 +13:00
9c477f2094 archinstall 2023-09-26 02:52:28 +13:00
e1349b2b90 add packages 2023-08-15 03:20:21 +10:00
cffbcaea8c sshd setup 2023-08-14 22:27:29 +10:00
e1fb6b94ee fix systemd templates 2023-08-13 14:03:03 +10:00
a2ec933cf8 refine systemd_networkd 2023-08-12 20:19:07 +10:00
24f3a7c485 install notes 2023-08-12 01:37:30 +10:00
f00093ef8e set mac address values 2023-08-11 14:52:41 +10:00
32ba17ea33 start building pacstrap list 2023-08-11 11:49:33 +10:00
8f22f5429a hypervisor refinement 2023-08-11 10:52:27 +10:00
2769a3430b hypervisor wip 2023-08-11 01:11:37 +10:00
84a20416e3 network and serial complete 2023-08-10 23:32:17 +10:00
621d9595f8 systemd-networkd templates 2023-08-10 21:29:39 +10:00
5f1e304301 jinja whitespace 2023-08-09 22:55:51 +10:00
df3587032d new hypervisor wip 2023-08-09 22:37:28 +10:00
5007f0b68e new hypervisor wip 2023-08-09 18:38:34 +10:00
590a50cd1a new hypervisor role 2023-08-09 15:49:23 +10:00
1df2adffdb fix user and from in notes 2023-04-18 14:13:00 +02:00
ddfccdfe96 add zfs event daemon notes 2023-04-17 22:06:00 +02:00
0c091aba7e add snapshot class 2023-03-23 23:28:02 +01:00
ef418f2839 k8s intel gpu files 2023-03-22 01:32:01 +01:00
ff0d769aa5 added calico ip reservations 2023-01-14 17:51:22 +10:00
e4c5846353 add hdd-zfs sc 2023-01-13 02:11:03 +10:00
afedcf16d5 merge fix 2023-01-12 15:55:53 +10:00
ff8e0581ec notes 2023-01-12 15:53:36 +10:00
ce3af85e73 changed service order 2023-01-12 13:06:45 +10:00
3fa49df87f added todo notes, enabled zfs-import.target 2023-01-12 12:49:42 +10:00
11115d515e move zfs_pools var 2023-01-11 15:19:15 +10:00
657ae3fa91 open ebs zfs 2023-01-11 10:59:26 +10:00
14a126afa0 zfs node work in prep for open-ebs zfs 2023-01-10 02:19:24 +10:00
5eb52e7adb resolve merge conflict 2023-01-05 17:46:06 +10:00
a86cb26010 cert issuer and ingress controller 2023-01-05 17:37:36 +10:00
a985895225 cert issuer and ingress controller 2022-12-09 21:26:01 +13:00
b352a796e0 folder rename 2022-12-09 02:42:30 +13:00
0a6a3e5371 cert manager 2022-12-09 02:40:44 +13:00
ce9f7891fa add calico bgp configuration 2022-12-07 00:12:15 +13:00
9fd4c6f001 add ipv6 bgp peer config 2022-12-06 23:06:44 +13:00
4ff4a3198e not sure was tired 2022-12-06 12:48:15 +13:00
970576cbb9 fact merging 2022-12-06 02:49:23 +13:00
77cfa06ab1 calico fact setting 2022-12-06 02:31:59 +13:00
74ba8cad51 unix socket notation 2022-12-06 02:19:08 +13:00
5111482194 more subnet problems 2022-12-06 02:14:47 +13:00
1f7bc2cfb6 adjust v6 cidr mask sizes 2022-12-06 02:09:13 +13:00
8d7d005431 typo 2022-12-06 02:01:33 +13:00
678da5e314 configure for ipv6 dual stack 2022-12-06 01:56:55 +13:00
5c72b57d9c comment out containerd restart 2022-12-06 00:22:12 +13:00
6b7e1182b0 b 2022-12-06 00:16:54 +13:00
4a7a11aad7 chain management 2022-12-06 00:09:28 +13:00
bcae7303ab k8s destroy 2022-12-05 23:57:14 +13:00
f230db3739 reoganize storage roles 2022-12-05 23:18:04 +13:00
3e70f70fa3 added ebs hostpath storage 2022-11-10 01:09:46 +13:00
e5fdf35669 add ebs to todo 2022-11-03 16:25:55 +13:00
47266ef6b7 add backup to todo 2022-11-03 15:04:24 +13:00
a62240b02a tuning 2022-11-03 03:59:42 +13:00
5bd82a4d33 ceph seems broken 2022-11-03 01:34:34 +13:00
d9a28d21a8 rook storage working 2022-11-02 22:18:55 +13:00
4d4977b164 early storage operator work 2022-11-02 14:48:24 +13:00
73982fd7b7 k8s taint module 2022-11-02 13:21:31 +13:00
75e78ea7c8 readme 2022-11-02 10:44:56 +13:00
09a2662c50 change emojis 2022-11-02 01:14:44 +13:00
7608d86cde improve archive readme 2022-11-02 01:13:45 +13:00
7710d6b097 improve main readme 2022-11-02 01:09:00 +13:00
a8edbf45d6 doco typos 2022-11-02 01:04:30 +13:00
9fb0635789 doco 2022-11-02 01:03:56 +13:00
cce06a515b re-organize 2022-11-02 00:40:04 +13:00
028fe4b8c4 tidyup 2022-11-02 00:13:58 +13:00
a685f44139 tidyup 2022-11-02 00:13:10 +13:00
d663591e2a tidyup 2022-11-02 00:13:02 +13:00
a046ade37b tidyup 2022-11-02 00:12:19 +13:00
9fb6bcfab8 tidyup 2022-11-02 00:11:47 +13:00
ecdaad56af revert 6e2a91013a
revert tidyup
2022-11-02 00:09:04 +13:00
6e2a91013a tidyup 2022-11-02 00:08:16 +13:00
103caae226 remove unused host_vars 2022-11-02 00:07:36 +13:00
a61705b9a3 revert 92ac1dd6c1
revert Delete 'ansible/inventory/host_vars/kube01/vault.yaml'
2022-11-01 22:53:25 +13:00
92ac1dd6c1 Delete 'ansible/inventory/host_vars/kube01/vault.yaml' 2022-11-01 22:52:33 +13:00
b98e368259 Delete '.vscode/settings.json' 2022-11-01 22:51:22 +13:00
a07565128e tidy up 2022-11-01 22:50:18 +13:00
29e722d1b5 additions to gitignore 2022-11-01 22:27:06 +13:00
6020b9771c cluster networking operational 2022-11-01 22:13:56 +13:00
a60c397d1b control plane stand up working 2022-11-01 02:56:41 +13:00
37da64cacf kubeadm init 2022-10-31 03:01:23 +13:00
7662f4a11f begin k8s config 2022-10-31 02:25:47 +13:00
4abb4a929f uncomment vm deploy block 2022-10-31 01:56:54 +13:00
a7f197eda2 container image disk config 2022-10-31 01:56:21 +13:00
c8abd13f86 begin disk config 2022-10-31 00:56:06 +13:00
263907032b delete fw vars before undefine 2022-10-31 00:02:50 +13:00
c9d7c99f77 descriptions 2022-10-30 23:59:04 +13:00
13148f3e69 libvirt module sytax 2022-10-30 23:58:47 +13:00
2093fca5d2 fixes to vm_destroy 2022-10-30 23:57:26 +13:00
50c451e94d add vm_destroy playbook and role
correct network addressing in template
write hostname to /etc/hostname
2022-10-30 23:48:43 +13:00
73c9106c8f adjust inventory to number from 01 2022-10-30 22:46:04 +13:00
a69ac7f0cd change vm start state from started to running 2022-10-30 22:40:13 +13:00
83346eb842 cut down vm devices 2022-10-30 22:39:02 +13:00
7e3fefd220 dev boot elmnts cannot be used with os boot elmnts 2022-10-30 22:30:42 +13:00
c0553c7d22 libvirt xml syntax 2022-10-30 22:24:26 +13:00
0a969fddb4 correct libvirt xml 2022-10-30 22:18:55 +13:00
07cbae13f7 line join in cmd 2022-10-30 22:13:23 +13:00
0ada17f736 change from copyin to upload in virt-customise cmd 2022-10-30 22:09:51 +13:00
f648ce0448 correct virt customize command
use vars for vhd sizes
2022-10-30 21:51:27 +13:00
595c2f177e reorganize default vars 2022-10-30 21:43:12 +13:00
300ca326ee remove hv00 var, rely on defaults 2022-10-30 21:38:58 +13:00
8cbfb1a2c6 indentation 2022-10-30 21:26:10 +13:00
c6eb329c73 split vm_deploy 2022-10-30 21:22:34 +13:00
7ce5e21906 comment out vm hardening (temp) 2022-10-30 21:12:35 +13:00
239ababf1d finalise vm_deploy role 2022-10-30 21:11:14 +13:00
03700ba0fb refine disks 2022-10-30 02:39:44 +13:00
ba5672b72a add vault creds 2022-10-30 02:32:27 +13:00
d25e57ee9f adjust vars 2022-10-30 00:41:07 +13:00
c84eb30443 sysprep problems 2022-10-25 02:55:23 +13:00
8daf2d1ff8 progress on vm_template 2022-10-24 22:53:50 +13:00
09d6c93224 adding credentials 2022-10-24 21:53:39 +13:00
8aa59da325 arranging vars 2022-10-24 17:32:38 +13:00
686dde0156 ? 2022-10-24 15:56:51 +13:00
d1508efd3f restructure 2022-10-24 15:50:38 +13:00
fb6a488dac vm standup work for k8s 2022-10-24 02:54:34 +13:00
31157afa57 begin k8s setup 2022-10-23 19:58:01 +13:00
020efcc3bb add msmtp/zed/smartd/scrub-timers 2022-10-19 13:09:01 +13:00
ae8633767a . 2022-10-04 02:47:03 +13:00
2cfb3b180f rename vars file 2022-10-04 00:15:06 +13:00
dac4962ace . 2022-10-04 00:11:25 +13:00
92a04af990 add cloud-init templates 2022-10-03 15:30:22 +13:00
08d55c4f22 begin arch packer template 2022-10-03 15:19:24 +13:00
ea117c30f8 zfs conditionals and delegated perms 2022-09-07 01:28:00 +12:00
e7870f6ef6 networking 2022-09-05 22:56:49 +12:00
1166e932c5 libvirt config 2022-09-05 21:27:26 +12:00
140749e34c libvirt and firewall additions 2022-09-05 15:28:28 +12:00
21679647c6 libvirt role 2022-09-05 00:33:46 +12:00
63116af524 notes 2022-09-04 01:47:32 +12:00
450d60cd46 start zfs install 2022-09-04 00:20:27 +12:00
06ea234395 pikaur 2022-09-04 00:14:01 +12:00
b8fb28d748 pikaur 2022-09-04 00:09:20 +12:00
26abd5af81 update inventory 2022-09-03 02:36:26 +12:00
ccad767c43 update inventory 2022-09-03 02:33:23 +12:00
f397225862 reorganize for vault 2022-09-03 02:21:14 +12:00
363 changed files with 74887 additions and 147 deletions

43
.gitignore vendored
View File

@ -1,2 +1,41 @@
ansible/vault_password # Local .terraform directories
ansible/inventory/group_vars/vault.yml **/.terraform/*
**/.terraform
.ansible/
.vscode/
ansible/collections/**
# registry password file
distribution/htpasswd
# .tfstate files
*.tfstate
*.tfstate.*
# Terraform lock file
**/.terraform.lock.hcl
# Terraform secrets file
**/secrets.auto.tfvars
# Crash log files
crash.log
crash.*.log
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
**/vault_password
**/vault.yaml
**/*secrets.yaml
**/*secret.yaml
.vscode/*

10
.sops.yaml Normal file
View File

@ -0,0 +1,10 @@
creation_rules:
- path_regex: (secret|secrets)\.(yml|yaml)$
unencrypted_regex: ^(apiVersion|kind|name|namespace|type)$
kms: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# kms:
# - arn: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# aws_profile: home
age: 'age1k5y5gj5fzpwtjgzqd4n93h4h9ek9jz8898rva5zsgj7zjet97ytq4dtzjs'
hc_vault_transit_uri: 'https://vault.balsillie.net:443/v1/sops/keys/krds'

27
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,27 @@
{
"yaml.schemas": {
"https://raw.githubusercontent.com/ansible/schemas/main/f/ansible.json": "file:///home/michael/Code/home/IaC/ansible/roles/vm_deploy/tasks/deploy.yaml",
"kubernetes://schema/storage.k8s.io/v1@storageclass": "file:///home/michael/Code/home/IaC/ansible/roles/k8s_storage_deploy/files/config/blockpool_ssd_replica.yaml"
},
"vs-kubernetes": {
"vs-kubernetes.namespace": "",
"disable-linters": ["resource-limits"],
"vs-kubernetes.kubectl-path": "",
"vs-kubernetes.helm-path": "",
"vs-kubernetes.minikube-path": "",
"vs-kubernetes.kubectlVersioning": "user-provided",
"vs-kubernetes.outputFormat": "yaml",
"vs-kubernetes.kubeconfig": "",
"vs-kubernetes.knownKubeconfigs": [],
"vs-kubernetes.autoCleanupOnDebugTerminate": false,
"vs-kubernetes.nodejs-autodetect-remote-root": true,
"vs-kubernetes.nodejs-remote-root": "",
"vs-kubernetes.nodejs-debug-port": 9229,
"vs-kubernetes.dotnet-vsdbg-path": "~/vsdbg/vsdbg",
"vs-kubernetes.local-tunnel-debug-provider": "",
"checkForMinikubeUpgrade": true,
"imageBuildTool": "Docker"
},
"ansible.python.interpreterPath": "/usr/bin/python"
}

View File

@ -1,7 +0,0 @@
Ansible for configuring base hosts
Packer for creating VM templates.
Terraform for deploying VMs based on those templates.
Ansible for configuring deployed VMs into clusters.

View File

@ -1,11 +1,11 @@
[defaults] [defaults]
inventory = ./inventory/inventory.yaml inventory = ./inventory/
jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
library = modules library = modules
module_utils = module_utils module_utils = module_utils
display_skipped_hosts = false display_skipped_hosts = false
interpreter_python = auto_silent interpreter_python = auto_silent
collections_paths = ./collections
collections_path = ./collections collections_path = ./collections
roles_path = ./roles roles_path = ./roles
vault_password_file = ./vault_password vault_password_file = ./vault_password
playbook_dir = ./playbooks/

View File

@ -1,6 +0,0 @@
---
# connection
ansible_connection: ssh
ansible_become_method: sudo

View File

@ -0,0 +1,6 @@
acme_certificate_csr_organization: Balsillie Family
acme_certificate_csr_locality: Queenstown
acme_certificate_csr_state: Otago
acme_certificate_csr_country: NZ
acme_certificate_csr_email: admin@balsillie.net
acme_certificate_directory: https://acme-v02.api.letsencrypt.org/directory

View File

@ -0,0 +1,3 @@
nut_client_admin_username: nut-admin
nut_client_primary_username: nut-primary
nut_client_secondary_username: nut-secondary

View File

@ -0,0 +1,3 @@
rfc2136_key_algorithm: hmac-sha256
rfc2136_key_name: rndc-house
rfc2136_server_address: 10.208.240.1

View File

@ -0,0 +1,23 @@
# code: language=ansible
aur_repo_packager_name: "Balsillie Family"
aur_repo_packager_email: "admin@balsillie.net"
aur_repo_dir: "/aur"
aur_repo_build_account: "aur-builder"
aur_repo_host_packages:
- pikaur
- jellyfin-media-player # If you get errors relating to icu, check 'icu' package version and perform a system update
- git-credential-keepassxc
- docker-credential-secretservice-bin
- ventoy-bin
- debtap
- aurutils
- ipmiview
- powershell-bin
- visual-studio-code-bin
- ttf-ms-fonts
- brave-bin
- teamviewer
- vmware-horizon-client

View File

@ -0,0 +1,25 @@
---
zfs_pools:
- name: ssd
ashift: 16
recordsize: 64k
type: ""
disks: /dev/vde
compression: "off"
datasets:
- name: ssd/data
encrypt: false
- name: ssd/data/open-ebs
encrypt: false
- name: hdd
ashift: 12
recordsize: 64k
type: mirror
disks: /dev/sda /dev/sdb
compression: "off"
datasets:
- name: hdd/data
encrypt: true
- name: hdd/data/open-ebs
encrypt: false

View File

@ -0,0 +1,17 @@
# code: language=ansible
# Connection (SSH)
ansible_connection: ansible.builtin.ssh
ansible_ssh_host: dev.balsillie.house
ansible_ssh_port: 22
ansible_ssh_host_key_checking: false
ansible_ssh_pipelining: false
ansible_ssh_user: ladmin
ansible_ssh_private_key_file: ~/.ssh/conf.d/home/dev.balsillie.house.key
# Become (sudo)
ansible_become_method: ansible.builtin.sudo
ansible_become_user: root
ansible_become_password: "{{ lookup('community.hashi_vault.vault_kv1_get', 'ansible/host_vars/dev.balsillie.house/ansible_connection').secret.ansible_become_password }}" # noqa yaml[line-length]

View File

@ -0,0 +1,17 @@
# code: language=ansible
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_cloudflare_api_token: "{{ lookup('community.hashi_vault.vault_kv1_get', 'cloudflare/balsillie.house/dns').secret.api_token }}" # noqa yaml[line-length]
certbot_dns_propagation_seconds: 15
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'cloudflare'
certbot_email: "certbot.dev@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- repo.balsillie.house

View File

@ -0,0 +1,9 @@
# code: language=ansible
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
nginx_user: "http"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.hv00@balsillie.email

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: hv00.balsillie.house
ansible_fqdn: hv00.balsillie.house
ansible_remote_addr: 10.192.110.100
ansible_port: 22
ansible_user: ladmin
# ansible_become_user: root
ansible_become_method: ansible.builtin.sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.hv00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -0,0 +1,5 @@
hypervisor:
storage: dir
qemu_bridges:
- br0

View File

@ -0,0 +1,17 @@
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
- name: unifi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: hv00.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9443
nginx_user: "http"

View File

@ -0,0 +1,38 @@
nut_client_local_server: true
nut_client_shutdown_cmd: /usr/bin/poweroff
nut_client_shutdown_exit: "true"
nut_client_hostsync: 240
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: primary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for HV00."
- name: LOWBATT
message: "UPS has reached low battery condition."
nut_client_notify_flags:
- name: LOWBATT
flags: SYSLOG+WALL+EXEC
- name: FSD
flags: SYSLOG+WALL+EXEC
- name: COMMOK
flags: SYSLOG+WALL+EXEC
- name: COMMBAD
flags: SYSLOG+WALL+EXEC
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC
- name: REPLBATT
flags: SYSLOG+WALL+EXEC
- name: NOCOMM
flags: SYSLOG+WALL+EXEC
- name: NOPARENT
flags: SYSLOG+WALL+EXEC
- name: BYPASS
flags: SYSLOG+WALL+EXEC
- name: NOTBYPASS
flags: SYSLOG+WALL+EXEC

View File

@ -0,0 +1,7 @@
nut_server_listen_address: 10.192.110.100
nut_server_listen_port: 3493
nut_server_certificate_file: /etc/ssl/private/hv00.balsillie.house.plain.combined.pem
nut_server_ups_devices:
- name: ups0
driver: usbhid-ups
port: auto

View File

@ -0,0 +1 @@
console_device: ttyS0

View File

@ -0,0 +1,16 @@
sshd:
config_path: home
auth:
pubkey: 'yes'
password: 'no'
empty: 'no'
listen:
port: '22'
family: inet
ipv4:
- '192.168.1.250'
- '10.192.110.100'
forwarding:
agent: 'no'
x11: 'no'
nickname: vault

View File

@ -0,0 +1,82 @@
systemd_networkd_configs:
- name: 00-eth0.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c3
- name: 00-eth1.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c4
- name: 00-eth2.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c5
- name: 00-wan.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c6
- name: 01-eth0.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c3
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 110
- 210
pvid: 210
- name: 01-eth1.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c4
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 210
pvid: 210
- name: 01-eth2.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c5
arp: false
lldp: true
dhcp: false
bridge:
name: br0
vlans:
- 30
- 210
- 220
- 230
- name: 01-wan.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c6
arp: true
lldp: false
dhcp: true
- name: 10-br0.netdev
src: bridge.netdev.j2
vlan_filtering: true
stp: true
- name: 11-br0.network
src: bridge.network.j2
arp: false
dhcp: false
lldp: true
vlans:
- 110
- name: 20-vlan110.netdev
src: vlan.netdev.j2
vlan_id: 110
- name: 21-vlan110.network
src: vlan.network.j2
arp: true
lldp: true
dhcp: false
address:
ipv4:
- 10.192.110.100/24
gateway:
ipv4: 10.192.110.254
nameserver:
ipv4:
- 10.192.110.254

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kodi00.balsillie.house
ansible_fqdn: kodi00.balsillie.house
ansible_remote_addr: 10.192.210.169
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: kodi00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -0,0 +1,81 @@
---
docker_users:
- ladmin
docker_networks:
- name: torrent
driver: bridge
driver_options:
# com.docker.network.bridge.name: docker-torrent
com.docker.network.bridge.enable_ip_masquerade: true
com.docker.network.bridge.enable_icc: true
# com.docker.network.container_iface_prefix: container-torrent
attachable: true
enable_ipv6: false
internal: false
ipam:
- subnet: 192.168.99.0/24
gateway: 192.168.99.254
docker_volumes:
- name: torrent-data
driver: local
driver_options:
type: none
device: /downloads
o: bind
- name: torrent-config
driver: local
driver_options:
type: none
device: /etc/qbittorrent
o: bind
docker_images:
- name: hotio/qbittorrent
tag: release
docker_containers:
- name: qbittorrent
image: hotio/qbittorrent:release
auto_remove: false
capabilities:
- NET_ADMIN
domainname: balsillie.house
env:
PUID: '968'
PGID: '968'
UMASK: '002'
TZ: Pacific/Auckland
WEBUI_PORTS: 8080/tcp
VPN_ENABLED: 'true'
VPN_CONF: 'wg0'
VPN_PROVIDER: 'proton'
VPN_LAN_NETWORK: ''
VPN_LAN_LEAK_ENABLED: 'false'
VPN_EXPOSE_PORTS_ON_LAN: ''
VPN_AUTO_PORT_FORWARD: 'true'
VPN_AUTO_PORT_FORWARD_TO_PORTS: ''
VPN_KEEP_LOCAL_DNS: 'false'
VPN_FIREWALL_TYPE: 'nftables'
VPN_HEALTHCHECK_ENABLED: 'true'
PRIVOXY_ENABLED: 'false'
UNBOUND_ENABLED: 'false'
etc_hosts:
tv.balsillie.house: 192.168.99.254
movies.balsillie.house: 192.168.99.254
hostname: torrent
networks:
- name: torrent
aliases:
- torrent
- qbittorrent
ipv4_address: 192.168.99.1
restart_policy: 'unless-stopped'
sysctls:
net.ipv4.conf.all.src_valid_mark: 1
net.ipv6.conf.all.disable_ipv6: 1
volumes:
- torrent-config:/config:rw
- torrent-data:/downloads:rw

View File

@ -0,0 +1,43 @@
nginx_sites:
- name: tv.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: movies.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 7878
- name: music.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8686
- name: subs.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 6767
- name: index.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9696
- name: torrent.balsillie.house
type: proxy
upstream:
host: 192.168.99.1
port: 8080
- name: jellyfin.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8096
- name: kodi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8082
nginx_user: "http"

View File

@ -0,0 +1,3 @@
---
sonarr_var: "sonarr_value"

View File

@ -0,0 +1,4 @@
sshd:
auth:
password: 'no'
pubkey: 'yes'

View File

@ -0,0 +1,7 @@
torrent_user: kodi
torrent_downloads_dir: /downloads
torrent_wireguard_address: 10.2.0.2
torrent_wireguard_dns: 10.2.0.1
torrent_wireguard_peer_endpoint: 103.75.11.18
torrent_wireguard_peer_public_key: 8Rm0uoG0H9BcSuA67/5gBv8tJgFZXNLm4sqEtkB9Nmw=

View File

@ -0,0 +1,21 @@
ufw_enabled: true
ufw_rules:
- name: "SSH from Local Subnet"
port: "22"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTP from Local Subnet"
port: "80"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTPS from Local Subnet"
port: "443"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.kube00@balsillie.email

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kube00.balsillie.house
ansible_fqdn: kube00.balsillie.house
ansible_remote_addr: 10.192.110.110
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,18 @@
nut_client_local_server: false
nut_client_shutdown_cmd: /scripts/shutdown.sh
nut_client_shutdown_exit: "false"
nut_client_hostsync: 15
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: secondary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for KUBE00."
nut_client_notify_flags:
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC

View File

@ -1,13 +0,0 @@
---
# connection
ansible_user: ladmin
ansible_become_user: root
ansible_become_pass: "{{ lab_become_password }}"
ansible_os_family: Arch
# sshd
authorized_keys_file: lab_authorized_keys
openssh_configuration_src: sshd_config_arch

View File

@ -0,0 +1 @@
synapse_host_address: matrix.balsillie.net

View File

@ -0,0 +1,4 @@
ansible_connection: local
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo

View File

@ -0,0 +1,11 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- xmr.balsillie.house

View File

@ -1,7 +0,0 @@
---
# connection
ansible_user: michael
ansible_become_user: michael
ansible_os_family: Arch

View File

@ -1,28 +1,80 @@
all: all:
children: children:
hypervisors: aur_repo_hosts:
hosts: hosts:
server: dev.balsillie.house:
ansible_host: server.balsillie.net firewalls:
lab: children:
ansible_host: lab.balsillie.net opnsense:
nodes: hosts:
router.balsillie.house:
switches:
hosts: hosts:
node1: sw00.balsillie.house:
ansible_host: node1.balsillie.net waps:
node2:
ansible_host: node2.balsillie.net
node3:
ansible_host: node3.balsillie.net
guests:
hosts: hosts:
router: wap00.balsillie.house:
ansible_host: router.balsillie.net virtual_machines:
hosts:
fw00.balsillie.house:
win11.balsillie.house:
bare_metal:
hosts:
sw00.balsillie.house:
wap00.balsillie.house:
hv00.balsillie.house:
kube00.balsillie.house:
lat5420.balsillie.house:
lat7490.balsillie.house:
nuc.balsillie.house:
servers:
children:
hypervisors:
hosts:
hv00.balsillie.house: # vp2420
k8s:
children:
k8s_control:
hosts:
kube00.balsillie.house:
k8s_worker:
hosts:
kube00.balsillie.net:
k8s_storage:
hosts:
kube00.balsillie.net:
nut_servers:
hosts:
hv00.balsillie.house:
nut_clients:
hosts:
hv00.balsillie.house:
kube00.balsillie.house:
nas:
hosts:
nas.balsillie.house:
workstations: workstations:
hosts: children:
lat5420: arch:
ansible_host: lat5420.balsillie.net hosts:
sff: lat5420.balsillie.house:
ansible_host: sff.balsillie.net sff.balsillie.house:
bridie: kodi00.balsillie.house:
ansible_host: bridie.balsillie.net nuc.balsillie.house:
windows:
hosts:
lat7490.balsillie.house:
win11.balsillie.house:
laptops:
hosts:
lat5420.balsillie.house:
lat7490.balsillie.house:
desktops:
hosts:
sff.balsillie.house:
mp00.balsillie.house:
kodi00.balsillie.house:
nuc.balsillie.house:
kodi:
hosts:
kodi00.balsillie.house:

View File

@ -0,0 +1,10 @@
# code: language=ansible
- name: AUR Repo
hosts: aur_repo_hosts
become: true
gather_facts: true
roles:
# - certbot
- nginx
# - aur_repo_host

View File

@ -0,0 +1,10 @@
---
- name: Setup core home router
hosts:
- hv00.balsillie.house
gather_facts: true
become: true
roles:
# - role: aur_repo_host
- role: nginx

View File

@ -0,0 +1,15 @@
---
- name: Setup Kodi boxes
hosts:
- kodi00.balsillie.house
gather_facts: true
become: true
roles:
# - role: sshd
# - role: ufw
# - role: nginx
# - role: aur_repo_client
# - role: arr
- role: torrent
# - role: sonarr

View File

@ -0,0 +1,9 @@
---
- name: Setup NUC
hosts:
- nuc.balsillie.house
gather_facts: true
become: true
roles:
- role: certbot

View File

@ -0,0 +1,32 @@
- name: Install NUT
hosts:
- nut_servers
- nut_clients
become: true
gather_facts: true
tasks:
- name: Install NUT package on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: nut
state: latest
update_cache: true
- name: Setup NUT servers
gather_facts: false
hosts: nut_servers
become: true
roles:
- role: acme_certificate
acme_certificate_subject: "{{ ansible_host }}"
acme_certificate_zone: balsillie.house
acme_certificate_restart_services: ['nut-server.service']
- role: nut_server
- name: Setup NUT clients
gather_facts: false
hosts: nut_clients
become: true
roles:
- nut_client

View File

@ -0,0 +1 @@
../../roles/

View File

@ -0,0 +1,6 @@
---
- ansible.builtin.import_playbook: ./01_baremetal.yaml
- ansible.builtin.import_playbook: ./02_hypervisor.yaml
- ansible.builtin.import_playbook: ./03_vm_template.yaml
- ansible.builtin.import_playbook: ./04_vm_deploy.yaml
- ansible.builtin.import_playbook: ./05_k8s_deploy.yaml

View File

@ -0,0 +1,7 @@
---
- name: bare metal os installation
gather_facts: false
hosts: hv00
become: true
roles:
- baremetal

View File

@ -0,0 +1,15 @@
---
- name: configure hyervisor host
gather_facts: false
hosts: hv00
become: true
roles:
- python-install
- sshd
- firewall
- pikaur
- msmtp
- zfs-install
- libvirt-server

View File

@ -0,0 +1,7 @@
---
- name: create vm template
hosts: hv00
gather_facts: true
become: true
roles:
- vm_template

View File

@ -0,0 +1,22 @@
---
- name: create the vms
hosts: hv00
gather_facts: true
become: true
roles:
- vm_deploy
# - name: vm hardening
# hosts: k8s
# gather_facts: true
# become: true
# roles:
# - sshd
# - firewall
- name: configure vm disks
hosts: k8s
gather_facts: true
become: true
roles:
- vm_disks

View File

@ -0,0 +1,66 @@
---
# - name: configure control plane
# hosts: k8s_control
# gather_facts: true
# become: true
# roles:
# - k8s_control
# - name: configure calico networking
# hosts: localhost
# gather_facts: true
# become: false
# roles:
# - k8s_network
# - name: remove control-plane taints
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_taint
# - name: configure zfs storage on nodes
# hosts: k8s_storage
# gather_facts: true
# become: true
# roles:
# - zfs_repo_install
# - name: configure open-ebs storage operator
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_storage_ebs_local_deploy
- name: configure open-ebs zfs driver
hosts: localhost
gather_facts: false
become: false
roles:
- k8s_storage_ebs_zfs_deploy
# - name: configure smb storage provider
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_storage_smb_deploy
# - name: configure ingress controller
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_ingress_controller
# - name: configure cert manager
# hosts: localhost
# gather_facts: false
# become: false
# roles:
# - k8s_cert_manager

View File

@ -0,0 +1,7 @@
---
- name: destroy rook storage
hosts: k8s_worker
gather_facts: true
become: true
roles:
- k8s_ebs_destroy

View File

@ -0,0 +1,7 @@
---
- name: destroy rook storage
hosts: k8s_worker
gather_facts: true
become: true
roles:
- k8s_rook_destroy

View File

@ -0,0 +1,7 @@
---
- name: destroy the k8s cluster
hosts: k8s
gather_facts: true
become: true
roles:
- k8s_destroy

View File

@ -0,0 +1,7 @@
---
- name: destroy the vms
hosts: hv00
gather_facts: true
become: true
roles:
- vm_destroy

View File

@ -0,0 +1,3 @@
---
- ansible.builtin.import_playbook: ./97_k8s_destroy.yaml
- ansible.builtin.import_playbook: ./98_vm_destroy.yaml

1
ansible/playbooks/k8s/roles Symbolic link
View File

@ -0,0 +1 @@
../../roles/

View File

@ -1,10 +0,0 @@
---
- name: Configure lab host
gather_facts: false
hosts: lab
become: true
roles:
- python
- sshd
- firewall

1
ansible/playbooks/roles Symbolic link
View File

@ -0,0 +1 @@
../roles

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room list
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: room_list
- name: Set empty_rooms fact
ansible.builtin.set_fact:
empty_rooms: "{{ room_list.json.rooms | selectattr('joined_local_members', '==', 0) | list }}"
- name: Debug empty room count
ansible.builtin.debug:
msg: "Total empty rooms to delete: {{ empty_rooms | length }}"
- name: Delete empty rooms
when: empty_rooms | length > 0
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room.room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
loop: "{{ empty_rooms }}"
loop_control:
loop_var: room
label: "{{ room.room_id }}"
register: purge_ids
- name: Write purge_ids to file
ansible.builtin.copy:
dest: "{{ playbook_dir }}/purge_ids_{{ now(utc=false, fmt='%Y-%m-%d_%H-%M-%S') }}.json"
content: "{{ purge_ids.results | map(attribute='json.delete_id') | list | to_nice_json }}"
mode: "0664"

View File

@ -0,0 +1,28 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to delete"
private: false
tasks:
- name: Delete room
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
register: purge_id
- name: Wait for purge to complete
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json.rooms | map(attribute='room_id') | list

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get large rooms
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/statistics/database/rooms"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: "purge_ids_file"
prompt: "Enter the file name containing the purge ids"
private: false
tasks:
- name: Load purge ids
ansible.builtin.slurp:
src: "{{ playbook_dir }}/{{ purge_ids_file }}"
register: purge_ids
- name: Set purge_ids_list fact
ansible.builtin.set_fact:
purge_ids_list: "{{ purge_ids.content | b64decode | from_json }}"
- name: Get purge status
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
loop: "{{ purge_ids_list }}"
register: purge_status
- name: Set purge_satus_totals
ansible.builtin.set_fact:
purge_status_shutting_down: "{{ purge_status.results | selectattr('json.status', '==', 'shutting_down') | list | length }}"
purge_status_purging: "{{ purge_status.results | selectattr('json.status', '==', 'purging') | list | length }}"
purge_status_complete: "{{ purge_status.results | selectattr('json.status', '==', 'complete') | list | length }}"
purge_status_failed: "{{ purge_status.results | selectattr('json.status', '==', 'failed') | list | length }}"
- name: Print status
ansible.builtin.debug:
msg: |
Shutting down: {{ purge_status_shutting_down }}
Purging: {{ purge_status_purging }}
Complete: {{ purge_status_complete }}
Failed: {{ purge_status_failed }}"

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Room members
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}/members"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,17 @@
---
- name: Configure Truenas
hosts: truenas
become: false
tasks:
- name: Install required packages
package:
name: "{{ item }}"
state: present
with_items:
- py37-ansible
- py37-pip
- py37-netifaces
- py37-netaddr
- py37-requests
- py37-yaml

View File

@ -0,0 +1,218 @@
---
- name: Install required python libraries system wide
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- python-cryptography
- python-dnspython
state: latest
update_cache: true
- name: Set certificate path facts
ansible.builtin.set_fact:
acme_certificate_certificate_path: "/etc/ssl/private/{{ acme_certificate_subject }}.pem"
acme_certificate_chain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.chain.pem"
acme_certificate_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.combined.pem"
acme_certificate_csr_path: "/etc/ssl/private/{{ acme_certificate_subject }}.csr"
acme_certificate_fullchain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.fullchain.pem"
acme_certificate_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.key"
acme_certificate_plain_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.combined.pem"
acme_certificate_plain_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.key"
- name: Create ACME account key directory
ansible.builtin.file:
group: root
mode: '0700'
owner: root
path: /etc/ssl/private/ACME
state: directory
- name: Create ACME account key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ acme_certificate_account_key_passphrase }}"
path: /etc/ssl/private/ACME/account.key
size: 4096
state: present
type: RSA
- name: Generate RSA private key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ ssl_passphrase }}"
path: "{{ acme_certificate_key_path }}"
size: 4096
state: present
type: RSA
register: genrsa_private_key
- name: Generate CSR
community.crypto.openssl_csr:
common_name: "{{ acme_certificate_subject }}"
country_name: "{{ acme_certificate_csr_country }}"
digest: sha256
email_address: "{{ acme_certificate_csr_email }}"
group: root
locality_name: "{{ acme_certificate_csr_locality }}"
mode: '0600'
organization_name: "{{ acme_certificate_csr_organization }}"
owner: root
path: "{{ acme_certificate_csr_path }}"
privatekey_passphrase: "{{ ssl_passphrase }}"
privatekey_path: "{{ acme_certificate_key_path }}"
state: present
state_or_province_name: "{{ acme_certificate_csr_state }}"
use_common_name_for_san: true
- name: Submit ACME certificate request
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
modify_account: true
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
register: challenge
- name: Debug ACME certificate challenge
ansible.builtin.debug:
var: challenge
- name: Proceed if challenge is changed
when:
- challenge is changed
- acme_certificate_subject in challenge.challenge_data
block:
- name: Answer ACME certificate challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: present
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
# zone: "{{ acme_certificate_zone }}"
register: nsupdate_result
- name: Debug nsupdate result
ansible.builtin.debug:
var: nsupdate_result
- name: Retrieve ACME certificate
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
data: "{{ challenge }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
- name: Cleanup ACME challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: absent
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
zone: "{{ acme_certificate_zone }}"
- name: Slurp fullchain contents
ansible.builtin.slurp:
src: "{{ acme_certificate_fullchain_path }}"
register: acme_certificate_fullchain_content
- name: Slurp private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_key_path }}"
register: acme_certificate_key_content
- name: Create combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Check if plain key file exists
ansible.builtin.stat:
path: "{{ acme_certificate_plain_key_path }}"
register: plain_key_file
- name: Create a plain text copy of the SSL private key # noqa: no-handler
when: |
genrsa_private_key.changed or
not plain_key_file.stat.exists
ansible.builtin.command:
cmd: openssl rsa -in {{ acme_certificate_key_path }} -passin pass:{{ ssl_passphrase }} -out {{ acme_certificate_plain_key_path }}
changed_when: true
- name: Slurp plain text private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_plain_key_path }}"
register: acme_certificate_key_content
- name: Create plain text combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_plain_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Dependant services block
when:
- (acme_certificate_restart_services | default([]) | length) >= 1
- challenge is changed
block:
- name: Check state of running services
ansible.builtin.service_facts:
- name: Restart dependant services
when:
- ansible_facts.services[item] is defined
- ansible_facts.services[item].state in ['running','failed']
ansible.builtin.service:
name: "{{ item }}"
state: restarted
loop: "{{ acme_certificate_restart_services }}"

View File

@ -0,0 +1,2 @@
{{ acme_certificate_fullchain_content['content'] | b64decode }}
{{ acme_certificate_key_content['content'] | b64decode }}

View File

@ -0,0 +1,49 @@
---
iso_source:
ntp_servers:
- time.example.com
pacstrap:
server: # Select from https://geo.mirror.pkgbuild.com/iso/latest/arch/pkglist.x86_64.txt
base
linux-lts
linux-firmware
intel-ucode
e2fsprogs
dosfstools
exfatprogs
nftables
openssh
ufw
nano
man-db
man-pages
texinfo
curl
which
usbutils
tzdata
tpm2-tss
tar
sudo
smartmontools
shadow
sed
screen
reflector
pv
pinentry
pciutils
parted
openssl
nbd
kmod
bash
bind
ca-certificates
ca-certificates-mozilla
ca-certificates-utils
efibootmgr
grep
mdadm
lvm2

View File

@ -0,0 +1,3 @@
linux-lts
linux-lts-headers
base

View File

@ -0,0 +1,136 @@
---
- name: attach installation iso as virtual media
- name: boot from installation iso
- name: detect booted ip address
- name: configure disks
# Specify root disk and part, set to type 23 (linux root x86-64), label root
# Specify efi disk and part, set to type 1 (efi system), label efi
# format efi partition
# mkfs.fat -F32 /dev/mmcblk0p1
# Ecrypt root partition
# cryptsetup -y -v luksFormat /dev/sda1 # TODO add keyfile/password automatically
# cryptsetup open /dev/sda1 root
# mkfs.ext4 /dev/mapper/root
# mkdir /mnt/root
# mount /dev/mapper/root /mnt/root
# mkdir /mnt/root/efi
# mount /dev/mmcblk0p1 /mnt/root/efi
# Add cryptsetup params to kernel cmdline
# cryptdevice=UUID=device-UUID:root root=/dev/mapper/root rw
# add efi to /etc/fstab
# mkdir /mnt/mountpoint/etc
# sudo genfstab -L /mnt/mountpoint >> /mnt/mountpoint/etc/fstab
- name: sync ntp
# timedatectl set-timezone Australia/Brisbane
# timedatectl set-ntp true
# run reflector to get a list of mirrors
# relfector -c AU --save /etc/pacman.d/mirrorlist
# update dbs
# pacman -Sy
# pacstrap
# pacstrap -K /mnt/root base linux-lts linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool qemu-guest-agent arch-install-scripts lsof
# desktop
# pacstrap -K /mnt base linux linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool arch-install-scripts lsof btrfs-progs plasma-meta plasma-wayland-session kde-system dolphin-plugins
# gen fstab
# genfstab -L /mnt/root >> /mnt/root/etc/fstab
#
# chroot from here
#
# set hostname
# echo hv00 > /etc/hostname
# TODO add entries to /etc/hosts
# 127.0.0.1 localhost
# ::1 localhost
# 127.0.1.1 static_fqdn
# link timezone
# ln -sf /usr/share/zoneinfo/Australia/Brisbane /etc/localtime
# enable ntp again
# timedatectl set-ntp true # TODO move this post reboot
# sync hardware clock
# hwclock --systohc
# set locale
# sed -i 's/#en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/g' /etc/locale.gen
# locale-gen
# echo LANG=en_US.UTF-8 > /etc/locale.conf
# uncomment wheel group in /etc/sudoers
# sed -i 's/# %wheel ALL=(ALL:ALL) ALL/%wheel ALL=(ALL:ALL) ALL/g' /etc/sudoers
# add user
# useradd -u 1000 -U -m -b /home/ -G wheel -s /bin/bash ladmin
# set new user password
# disable root user
# passwd -l root
# usermod -s /sbin/nologin root
# create /etc/kernel/cmdline file
# the uuids are the DISK uuids from /dev/disk/by-uuid, NOT the partuuids
# echo 'cryptdevice=dbbb9fb2-5509-4701-a2bb-5660934a5378:root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# for sd-encrypt hook
# echo 'rd.luks.name=dbbb9fb2-5509-4701-a2bb-5660934a5378=root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# create a default systemd-networkd config
# enable systemd-networkd
# enable sshd
# enable ufw service
# enable ufw firewall
# create ufw config to allow ssh port 22
# modify mkinitcpio presets
# template file?
# output to default efi path ESP/efi/boot/bootx64.efi
# modify mkinitcpio.conf for encryption
# old HOOKS=(base udev autodetect modconf kms keyboard keymap consolefont block filesystems fsck)
# new HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# sed -i 's/^HOOKS=(base udev autodetect modconf block filesystems keyboard fsck)/HOOKS=(base udev autodetect modconf block encrypt filesystems keyboard fsck)/g' /etc/mkinitcpio.conf
# geneate sb keys with sbctl
# keys go to /usr/share/secureboot/keys/db/db.pem
# enroll sbctl keys
# add console= option to cmdline file
# create initcpio post hook /etc/initcpio/post/uki-sbsign
# make /etc/initcpio/post/uki-sbsign executable
# chmod +x /etc/initcpio/post/uki-sbsign
# make initcpio
# mkinitcpio -p linux-lts
# vfio and iommu
# add 'intel_iommu=on iommu=pt' to kernel cmdline
# add vfio binding
# vp2420 iGPU = 8086:4555
# add vfio-pci ids to /etc/kernel/cmdline
# vfio-pci.ids=8086:4555
# add vfio modules to mkinitcpio.conf
# MODULES=(vfio_pci vfio vfio_iommu_type1)
# ensure modconf hook is in mkinitcpio.conf
# HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# efibootmgr NO BACKSLASH ON A ROOT FILE
# efibootmgr -c -d /dev/nvme0n1 -p 1 -L "Arch Linux" -l "archlinux.efi"

View File

@ -0,0 +1,9 @@
[Match]
MACAddress={{ mac_address }}
[Link]
ARP=no
[Network]
DHCP=no
Bond=lacp

View File

@ -0,0 +1,24 @@
---
- name: Install arr packages
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: "{{ arr_packages }}"
state: present
update_cache: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Start arr services
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- sonarr.service
- radarr.service
- lidarr.service
- prowlarr.service
- bazarr.service

View File

@ -0,0 +1,6 @@
arr_packages:
- sonarr
- radarr
- lidarr
- bazarr
- prowlarr

View File

@ -0,0 +1,50 @@
---
- name: Check if repo public key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- --list-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
register: repo_key_check
failed_when: repo_key_check.rc not in [0, 1]
changed_when: false
- name: Add repo public key to pacman keyring
when: repo_key_check.rc == 1
block:
- name: Import the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --recv-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
- --keyserver
- "{{ aur_repo_client_keyserver }}"
changed_when: true
- name: Trust the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_client_public_key_fingerprint }}"
changed_when: true
- name: Add home repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[{{ aur_repo_client_repo_name }}]
SigLevel = Required TrustedOnly
Server = {{ aur_repo_client_repo_address }}
create: false
state: present
insertafter: EOF
register: add_pacman_repo
- name: Update pacman database # noqa: no-handler
when: add_pacman_repo.changed
community.general.pacman:
update_cache: true

View File

@ -0,0 +1,6 @@
---
aur_repo_client_repo_name: "home"
aur_repo_client_repo_address: "https://repo.balsillie.house"
aur_repo_client_public_key_fingerprint: DB529158B99DD8311D78CA2FBE6003C744F56EE2
aur_repo_client_keyserver: hkps://keyserver.ubuntu.com

View File

@ -0,0 +1,12 @@
[Unit]
Description=Sync AUR packages
Wants=aur-sync.timer
[Service]
Type=oneshot
ExecStart=/usr/bin/aur sync --no-view --upgrades --no-confirm --clean --rm-deps --sign --database home
User=aur-builder
Group=aur-builder
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,12 @@
[Unit]
Description=Timer that runs aur sync service
Requires=aur-sync.service
[Timer]
Unit=aur-sync.service
OnCalendar=*-*-* 16:00:00
RandomizedDelaySec=120
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,270 @@
---
- name: Lookup aur_repo_host secret
delegate_to: localhost
become: false
community.hashi_vault.vault_kv1_get:
path: ansible/group_vars/aur_repo_hosts
register: aur_repo_host_secret
- name: Set aur_repo facts
ansible.builtin.set_fact:
aur_repo_private_key: "{{ aur_repo_host_secret.secret.aur_repo_private_key }}"
aur_repo_key_thumbprint: "{{ aur_repo_host_secret.secret.aur_repo_key_thumbprint }}"
- name: Create the makepkg drop-in config file
ansible.builtin.template:
dest: /etc/makepkg.conf.d/makepkg.conf
src: makepkg.conf.j2
owner: root
group: root
mode: "0644"
- name: Create the build user group
ansible.builtin.group:
name: "{{ aur_repo_build_account }}"
system: true
state: present
- name: Create the build user
ansible.builtin.user:
name: "{{ aur_repo_build_account }}"
password: '!'
group: "{{ aur_repo_build_account }}"
comment: "AUR Package Builder"
shell: /sbin/nologin
home: "{{ aur_repo_dir }}"
createhome: true
system: true
state: present
- name: Create the build user sudoer file
ansible.builtin.template:
dest: /etc/sudoers.d/{{ aur_repo_build_account }}
src: aur-sudoer.j2
owner: root
group: root
mode: "0640"
- name: Create the build dirs
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0775"
loop:
- "{{ aur_repo_dir }}"
- "{{ aur_repo_dir }}/packages"
- "{{ aur_repo_dir }}/sources"
- "{{ aur_repo_dir }}/srcpackages"
- /var/log/makepkg
- /tmp/build
- name: Check if the signing key is in build user's keyring
ansible.builtin.command:
cmd: gpg2 --list-secret-key --with-colons {{ aur_repo_key_thumbprint }}
failed_when: key_result.rc not in [0, 2]
changed_when: false
register: key_result
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: GPG key import block
when: key_result.rc == 2
block:
- name: Template out the signing private key
ansible.builtin.template:
dest: "/tmp/build/signing_key.asc"
src: signing_key.asc.j2
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0600"
- name: Import the signing key
ansible.builtin.command:
cmd: gpg2 --import /tmp/build/signing_key.asc
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Delete the signing key
ansible.builtin.file:
path: "/tmp/build/signing_key.asc"
state: absent
- name: Check if aurutils is already installed
ansible.builtin.stat:
follow: true
path: /usr/bin/aur
register: aurutils_stat
- name: Aurutils install block
when: not aurutils_stat.stat.exists
block:
- name: Install makepkg dependencies
community.general.pacman:
name:
- git
- base-devel
state: present
update_cache: true
- name: Clone aurutils
ansible.builtin.git:
depth: 1
dest: /tmp/aurutils
repo: https://aur.archlinux.org/aurutils.git
single_branch: true
version: master
- name: Slurp PKGBUILD contents
ansible.builtin.slurp:
path: /tmp/aurutils/PKGBUILD
register: aurutils_pkgbuild
- name: Parse PKGBUILD into facts
ansible.builtin.set_fact:
aurutils_dependencies: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^depends=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') | split(' ') }}" # noqa: yaml[line-length]
aurutils_pkgver: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgver=).*(?=$)', multiline=True) }}"
aurutils_pkgrel: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgrel=).*(?=$)', multiline=True) }}"
aurutils_arch: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^arch=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') }}"
- name: Install aurutils dependencies
community.general.pacman:
name: "{{ aurutils_dependencies }}"
state: present
reason: dependency
update_cache: false
- name: Build aurutils
ansible.builtin.command:
cmd: makepkg
chdir: /tmp/aurutils
creates: "{{ aur_repo_dir }}/packages/aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar"
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Update repo database
ansible.builtin.command:
argv:
- repo-add
- --prevent-downgrade
- --remove
- --sign
- --key
- "{{ aur_repo_key_thumbprint }}"
- home.db.tar
- aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar
chdir: "{{ aur_repo_dir }}/packages"
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Check if the signing key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- -l
- "{{ aur_repo_key_thumbprint }}"
failed_when: pacman_key_result.rc not in [0, 1]
changed_when: false
register: pacman_key_result
- name: Pacman key import block
when: pacman_key_result.rc == 1
block:
- name: Import the signing public key to arch keyring
ansible.builtin.command:
argv:
- pacman-key
- -r
- "{{ aur_repo_key_thumbprint }}"
- --keyserver
- hkps://keyserver.ubuntu.com
changed_when: true
- name: Locally sign the imported pacman key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_key_thumbprint }}"
changed_when: true
- name: Add custom repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[home]
SigLevel = Required TrustedOnly
Server = file://{{ aur_repo_dir }}/packages
create: false
state: present
insertafter: EOF
- name: Install aurutils
community.general.pacman:
name: aurutils
state: present
update_cache: true
# - name: Enable the multilib repository
# ansible.builtin.replace:
# path: /etc/pacman.conf
# backup: true
# regexp: '^[#]?\[multilib\]\n[#]?Include = \/etc\/pacman.d\/mirrorlist$'
# replace: '[multilib]\nInclude = /etc/pacman.d/mirrorlist'
# register: multilib_enable
# - name: Update the package database if multilib was enabled # noqa: no-handler
# when: multilib_enable.changed | default(false)
# community.general.pacman:
# update_cache: true
- name: Sync AUR packages
ansible.builtin.command:
cmd: aur sync --no-view -CnrS {{ item }}
loop: "{{ aur_repo_host_packages }}"
register: aur_sync_result
changed_when: (aur_sync_result.stderr_lines | last | replace(':','')) != "sync there is nothing to do"
failed_when: aur_sync_result.rc != 0
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Add the root www folder if it doesn't exist
ansible.builtin.file:
path: /var/www
state: directory
owner: http
group: http
mode: "0775"
- name: Link the aur repo to the web root
ansible.builtin.file:
src: "{{ aur_repo_dir }}/packages"
path: /var/www{{ aur_repo_dir }}
state: link
- name: Add the aur-sync systemd unit files
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/systemd/system/
owner: root
group: root
mode: "0644"
loop:
- aur-sync.service
- aur-sync.timer
register: aur_sync_unit_files
- name: Enable and start the aur-sync systemd timer # noqa: no-handler
when: aur_sync_unit_files.changed
ansible.builtin.systemd:
name: aur-sync.timer
enabled: true
state: started
daemon_reload: true

View File

@ -0,0 +1 @@
{{ aur_repo_build_account }} ALL = (root) NOPASSWD: /usr/bin/pacman, /usr/bin/pacsync

View File

@ -0,0 +1,21 @@
# Global Options
OPTIONS=(strip docs !libtool !staticlibs emptydirs zipman purge debug lto autodeps)
MAKEFLAGS="-j{{ (ansible_processor_nproc - 1) }}"
PACKAGER="{{ aur_repo_packager_name }} <{{ aur_repo_packager_email }}>"
# Build Environment
BUILDDIR=/tmp/build
BUILDENV=(!distcc color !ccache check sign)
GPGKEY={{ aur_repo_key_thumbprint }}
# Outputs
PKGDEST={{ aur_repo_dir }}/packages
SRCDEST={{ aur_repo_dir }}/sources
SRCPKGDEST={{ aur_repo_dir }}/srcpackages
LOGDEST=/var/log/makepkg
PKGEXT=".pkg.tar"
SRCEXT=".src.tar"

View File

@ -0,0 +1 @@
{{ aur_repo_private_key }}

View File

@ -0,0 +1,6 @@
---
- name: Restart nginx
ansible.builtin.service:
name: nginx.service
state: restarted

View File

@ -0,0 +1,67 @@
- name: Install certbot package (Archlinux)
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- certbot
- certbot-dns-{{ certbot_dns_plugin }}
state: present
update_cache: true
- name: Install certbot webserver plugin (Archlinux)
when:
- ansible_facts['os_family'] == "Archlinux"
- certbot_webserver_type == 'nginx'
community.general.pacman:
name:
- certbot-nginx
state: present
update_cache: true
- name: Template out the rfc2136 credentials file
when: certbot_dns_plugin == 'rfc2136'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out cloudflare credentials file
when: certbot_dns_plugin == 'cloudflare'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out the certbot default config
ansible.builtin.template:
src: cli.ini.j2
dest: /etc/letsencrypt/cli.ini
owner: root
group: root
mode: '0644'
- name: Request and install certificates
ansible.builtin.command:
argv:
- certbot
- certonly
- -n
- --dns-{{ certbot_dns_plugin }}
- --dns-{{ certbot_dns_plugin }}-credentials
- /etc/letsencrypt/{{ certbot_dns_plugin }}.conf
- --dns-{{ certbot_dns_plugin }}-propagation-seconds
- "{{ certbot_dns_propagation_seconds | default(10) }}"
- -d
- "{{ item }}"
creates: /etc/letsencrypt/live/{{ item }}/fullchain.pem
loop: "{{ certbot_domains }}"
notify: "{{ certbot_notify | default(omit) }}"
- name: Enable certbot renewal
ansible.builtin.service:
name: certbot-renew.timer
state: started
enabled: true

View File

@ -0,0 +1,3 @@
rsa-key-size = 4096
email = {{ certbot_email }}
agree-tos = true

View File

@ -0,0 +1 @@
dns_cloudflare_api_token = {{ certbot_cloudflare_api_token }}

View File

@ -0,0 +1,6 @@
dns_rfc2136_server = {{ certbot_rfc2136_server }}
dns_rfc2136_port = {{ certbot_rfc2136_port | default(53) }}
dns_rfc2136_name = {{ certbot_rfc2136_key_name }}
dns_rfc2136_secret = {{ certbot_rfc2136_key_secret }}
dns_rfc2136_algorithm = {{ certbot_rfc2136_key_algorithm | upper }}
dns_rfc2136_sign_query = true

View File

@ -0,0 +1,82 @@
---
- name: Install Docker on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: docker
state: present
update_cache: true
- name: Add users to docker group
ansible.builtin.user:
name: "{{ item }}"
groups: docker
append: true
loop: "{{ docker_users }}"
- name: Start and enable Docker
ansible.builtin.systemd:
name: docker
state: started
enabled: true
- name: Create Docker networks
when:
- docker_networks is defined
- docker_networks | length > 0
community.docker.docker_network:
attachable: "{{ item.attachable | default(true) }}"
driver: "{{ item.driver | default('bridge') }}"
driver_options: "{{ item.driver_options | default(omit) }}"
enable_ipv6: "{{ item.enable_ipv6 | default(false) }}"
internal: "{{ item.internal | default(false) }}"
ipam_config: "{{ item.ipam | default(omit) }}"
name: "{{ item.name }}"
state: "present"
loop: "{{ docker_networks }}"
- name: Create Docker volumes
when:
- docker_volumes is defined
- docker_volumes | length > 0
community.general.docker_volume:
driver: "{{ item.driver | default('local') }}"
driver_options: "{{ item.driver_options | default({}) }}"
recreate: "never"
state: "present"
volume_name: "{{ item.name }}"
loop: "{{ docker_volumes }}"
- name: Pull Docker images
when:
- docker_images is defined
- docker_images | length > 0
community.docker.docker_image_pull:
name: "{{ item.name }}"
pull: "always"
tag: "{{ item.tag | default('latest') }}"
loop: "{{ docker_images }}"
- name: Create Docker containers
when:
- docker_containers is defined
- docker_containers | length > 0
community.general.docker_container:
auto_remove: "{{ item.auto_remove | default(false) }}"
capabilities: "{{ item.capabilities | default(omit) }}"
command: "{{ item.command | default(omit) }}"
detach: true
domainname: "{{ item.domainname | default(omit) }}"
entrypoint: "{{ item.entrypoint | default(omit) }}"
env: "{{ item.env | default({}) }}"
etc_hosts: "{{ item.etc_hosts | default({}) }}"
hostname: "{{ item.hostname | default(item.name) }}"
image: "{{ item.image }}"
name: "{{ item.name }}"
networks: "{{ item.networks | default(omit) }}"
published_ports: "{{ item.ports | default([]) }}"
restart_policy: "{{ item.restart_policy | default('unless_stopped') }}"
state: 'started'
sysctls: "{{ item.sysctls | default({}) }}"
volumes: "{{ item.volumes | default([]) }}"
loop: "{{ docker_containers }}"

View File

@ -1,2 +1,4 @@
--- ---
firewall_package: ufw firewall_package: ufw
firewall_ssh_interface: br22
firewall_spice_interface: br22

View File

@ -5,6 +5,54 @@
name: "{{ firewall_package }}" name: "{{ firewall_package }}"
state: latest state: latest
update_cache: true update_cache: true
reason: explicit
when: when:
- ansible_os_family == 'Arch' - ansible_os_family == 'Arch'
- name: start ufw in allow mode
become: true
community.general.ufw:
policy: allow
state: enabled
- name: start and enable ufw service
become: true
ansible.builtin.service:
name: ufw.service
state: started
enabled: yes
- name: add ssh rules
become: true
community.general.ufw:
comment: SSH access
rule: allow
to_port: '22'
proto: tcp
interface: "{{ firewall_ssh_interface }}"
direction: in
src: "{{ item }}"
loop:
- 192.168.20.0/24
- 192.168.72.0/24
- 2406:e001:a:cb20::/64
- name: add spice rules
become: true
community.general.ufw:
comment: SPICE access to guests
rule: allow
to_port: 5901:5904
proto: tcp
interface: "{{ firewall_spice_interface }}"
direction: in
src: '{{ item }}'
loop:
- 192.168.20.0/24
- 192.168.72.0/24
- 2406:e001:a:cb20::/64
- name: restore default deny policy
become: true
community.general.ufw:
policy: deny
logging: low

View File

@ -0,0 +1,20 @@
libvirt_packages:
Archlinux:
- qemu-base
- openbsd-netcat
- swtpm
- gettext
- libvirt
- libvirt-python
- python-lxml
hypervisor:
storage: dir
device: /dev/sdb
# hypervisor:
# storage: zfs
# datasets:
# - name: tank/vhds
# compression: lz4
# encryption: 'off'

View File

@ -0,0 +1,35 @@
---
- name: Create the libvirt storage directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: libvirt-qemu
group: libvirt-qemu
mode: '0775'
loop:
- /var/lib/libvirt/vhds/
- name: Define additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'dir_libvirt_pool.xml.j2') }}"
loop:
- name: vhds
path: /var/lib/libvirt/vhds/
- name: Create additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
command: build
loop:
- vhds
- name: Start additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
state: active
autostart: true
loop:
- vhds

View File

@ -0,0 +1,91 @@
---
- name: Configure disk partition
community.general.parted:
align: optimal
device: "{{ hypervisor.device }}"
fs_type: ext4
label: gpt
name: libvirt
number: 1
part_end: 100%
part_start: 0%
state: present
# TODO disk encryption
- name: Format filesystem
community.general.filesystem:
device: "{{ hypervisor.device }}1"
fstype: ext4
resizefs: true
state: present
- name: Get list of services
ansible.builtin.service_facts:
- name: Stop the libvirt services
when: item in ansible_facts.services
ansible.builtin.service:
name: "{{ item }}"
state: stopped
loop:
- libvirtd.service
- name: Check if libvirt storage directory exists
ansible.builtin.stat:
path: /var/lib/libvirt/
register: libvirt_storage
- name: Temp mount and copy block
when: libvirt_storage.stat.exists
block:
- name: Temporarily mount hypervisor storage
ansible.posix.mount:
path: /mnt/libvirt_temp/
src: "{{ hypervisor.device }}1"
fstype: ext4
state: mounted
boot: false
- name: Copy libvirt contents to hypervisor storage
ansible.builtin.copy:
src: /var/lib/libvirt/
dest: /mnt/libvirt_temp/
remote_src: true
mode: preserve
- name: Remove existing libvirt storage
ansible.builtin.file:
path: /var/lib/libvirt/
state: "{{ item }}"
owner: root
group: root
mode: '0775'
loop:
- absent
- directory
always:
- name: Unmount from temporary mount point
ansible.posix.mount:
path: /mnt/libvirt_temp/
state: absent
- name: Mount hypervisor storage
ansible.posix.mount:
path: /var/lib/libvirt/
src: "{{ hypervisor.device }}1"
fstype: ext4
state: mounted
boot: true
- name: Start the libvirt service
when: item in ansible_facts.services
ansible.builtin.service:
name: "{{ item }}"
state: started
loop:
- libvirtd.service

View File

@ -0,0 +1,40 @@
---
- name: Create libvirt zfs dataset(s)
community.general.zfs:
name: "{{ item.name }}"
state: present
extra_zfs_properties: # TODO fix property values
canmount: false
mountpoint: none
compression: false
primarycache: metadata
secondarycache: none
reservation: none
refreservation: none
dedup: false
encryption: "{{ item.encryption | default('off') }}"
volmode: dev
devices: false
atime: false
loop: "{{ hypervisor.datasets }}"
- name: Define additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
command: define
xml: "{{ lookup('template', 'zfs_libvirt_pool.xml.j2') }}"
loop: "{{ hypervisor.datasets }}"
- name: Create additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
command: build
loop: "{{ hypervisor.datasets }}"
- name: Start additional libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item.name | split('/') | last }}"
state: active
autostart: true
loop: "{{ hypervisor.datasets }}"

View File

@ -0,0 +1,136 @@
---
- name: Format and mount the libvirt disk if it is not root
when:
- hypervisor.device is defined
- hypervisor.device not in (ansible_mounts | json_query('[?mount == `/var/lib/libvirt`].device'))
ansible.builtin.include_tasks:
file: libvirt_drive_mount.yaml
- name: Install libvirt packages (Archlinux)
when: ansible_distribution == 'Archlinux'
community.general.pacman:
name: "{{ libvirt_packages['Archlinux'] }}"
state: present
update_cache: true
- name: Add user to libvirt group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups:
- libvirt
- libvirt-qemu
append: true
- name: Load br_netfilter kernel module so sysctl flags can be set
community.general.modprobe:
name: br_netfilter
state: present
- name: Set required sysctl flags for bridging
ansible.posix.sysctl:
name: "{{ item.name }}"
reload: true
state: present
sysctl_file: /etc/sysctl.d/bridge.conf
sysctl_set: true
value: "{{ item.value }}"
loop:
- name: net.ipv4.ip_forward
value: 1
- name: net.bridge.bridge-nf-call-iptables
value: 0
- name: net.bridge.bridge-nf-call-ip6tables
value: 0
- name: net.bridge.bridge-nf-call-arptables
value: 0
- name: Add bridge(s) to qemu_bridge_helper
when: qemu_bridges is defined
ansible.builtin.lineinfile:
path: /etc/qemu/bridge.conf
line: "{{ item }}"
state: present
backup: false
insertafter: EOF
loop: "{{ qemu_bridges | default(['virbr0']) }}"
- name: Start and enable libvirt service
ansible.builtin.service:
name: libvirtd.service
state: started
enabled: true
- name: Stop the default libvirt network
community.libvirt.virt_net:
name: default
state: inactive
- name: Remove default libvirt network
community.libvirt.virt_net:
name: default
state: absent
- name: Remove the default libvirt storage pool
community.libvirt.virt_pool:
name: default
state: deleted
- name: Create standard libvirt storage directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: libvirt-qemu
group: libvirt-qemu
mode: '0775'
loop:
- /var/lib/libvirt/isos/
- /var/lib/libvirt/nvram/
- name: Get libvirt storage pool facts
community.libvirt.virt_pool:
command: facts
- name: Define the standard libvirt storage pools # TODO add when condition against existing pools
community.libvirt.virt_pool:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'dir_libvirt_pool.xml.j2') }}"
loop:
- name: isos
path: /var/lib/libvirt/isos/
- name: nvram
path: /var/lib/libvirt/nvram/
- name: Create the standard libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
command: build
loop:
- isos
- nvram
- name: Start the standard libvirt storage pools
community.libvirt.virt_pool:
name: "{{ item }}"
state: active
autostart: true
loop:
- isos
- nvram
- name: Setup additional libvirt storage (dir)
when: hypervisor.storage == 'dir'
ansible.builtin.include_tasks:
file: libvirt_dir.yaml
- name: Setup additional libvirt storage (zfs)
when: hypervisor.storage == 'zfs'
ansible.builtin.include_tasks:
file: libvirt_zfs.yaml
# - name: Enroll libvirtd TLS certificate
# - name: Configure libvirtd TLS listener
# - name: Open libvirtd TLS firewall ports

Some files were not shown because too many files have changed in this diff Show More