Compare commits

...

87 Commits

Author SHA1 Message Date
=
230d830612 enable nginx dev 2025-04-05 02:38:17 -04:00
=
f843c7eaa3 certbot for dev 2025-04-05 02:12:24 -04:00
=
076757e1f8 troubleshoot build 2025-04-05 01:08:32 -04:00
=
0b1c18a3a0 troubleshoot build 2025-04-05 00:15:34 -04:00
=
f47ad625da troubleshoot build 2025-04-05 00:13:16 -04:00
=
2b56d30666 aur builder become changes 2025-04-05 00:07:14 -04:00
=
c12dfc18ce no become for delegated task 2025-04-04 23:43:36 -04:00
=
814a642cc0 delegate secret lookup 2025-04-04 23:42:51 -04:00
=
8cad395e34 aur repo host secret lookup 2025-04-04 23:41:35 -04:00
=
eb360951a1 aur repo host vars 2025-04-04 23:24:58 -04:00
=
9601aa4937 hashi vault lookups 2025-04-04 23:08:01 -04:00
=
81319370b1 wip 2025-03-13 08:59:00 -04:00
=
76f6f78112 add backup, readarr 2025-01-20 16:31:17 -05:00
=
9b0edab903 add jellyfin to media compose 2025-01-14 14:18:24 -05:00
=
c377f1a7d1 add prowlarr and sonarr 2025-01-12 14:18:23 -05:00
=
7f5a35d936 add radarr service 2025-01-12 00:03:26 -05:00
=
7b9f0e0ca5 add gallery cname 2025-01-04 01:20:36 -05:00
=
a490e4ad92 unifi nearly done 2024-12-30 22:35:25 -05:00
=
6734d78bef adjust nginx cert 2024-12-30 20:09:09 -05:00
=
6722ab4138 Syncthing working on truenas 2024-12-30 20:08:30 -05:00
=
e76d1a1f88 more apps 2024-12-30 01:18:40 -05:00
=
c090cc9cbe add minio to truenas 2024-12-25 01:04:30 -05:00
=
8ab3783a2b Add sops config 2024-12-23 23:52:05 -05:00
=
cdf20ba9ef Add sops kms keys 2024-12-23 18:35:04 -05:00
=
f0b3388e8d aws kms 2024-12-21 01:26:55 -05:00
=
27e2fc6058 truenas 2024-12-21 00:17:04 -05:00
=
b622bb29df matrix dns 2024-12-20 00:58:24 -05:00
=
bde6a5f208 dns and talos 2024-12-18 15:42:45 -05:00
85d6fe5056 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-12-09 23:36:16 -05:00
=
098f63fa5b talos 2024-12-09 23:34:13 -05:00
=
43fc89a966 mayastor 2024-12-09 02:08:04 -05:00
=
7aa2992228 talos wip 2024-12-08 01:36:48 -05:00
1775e24a45 add more dns records 2024-11-29 01:35:04 -05:00
d6983b4744 add syanpse mgmt playbooks 2024-10-30 01:52:00 -04:00
=
29cb12a2d1 tf dns wip 2024-10-26 16:48:20 +13:00
=
9464737fe9 cred test 2024-10-24 18:43:21 +13:00
=
14fc10a10a Cloudflare DNS via TF 2024-10-24 18:39:30 +13:00
fe38bebbd5 cloudflare dns 2024-10-23 00:22:32 -04:00
bad78681c6 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-16 21:52:18 +12:00
c8ab4633ca change qbittorrent data dir 2024-05-16 21:50:48 +12:00
627343b50f add nuc playbook 2024-05-16 21:35:45 +12:00
2d31a5524f resolve merge 2024-05-12 21:58:05 +12:00
2981bdb22f Merge remote-tracking branch 'refs/remotes/origin/main' 2024-05-12 21:57:52 +12:00
84930795b6 temp comment out roles 2024-05-12 21:55:37 +12:00
f068c9710b torrent working 2024-04-24 21:40:00 +12:00
afc0b57cfb add arr services
add music and subs nginx proxies
2024-04-23 18:03:25 +12:00
7df41b5c8d aur repo host complete 2024-04-23 15:47:14 +12:00
2cc78654fe custom remote repo working 2024-04-23 02:14:39 +12:00
a6eb508cf0 aur repo wip 2024-04-23 00:49:49 +12:00
85330c8645 aurutils install working 2024-04-22 21:46:14 +12:00
c05f3a845b certbot and nginx working 2024-04-22 01:37:46 +12:00
3d9241b475 kodi media services basic setup 2024-04-21 01:04:17 +12:00
cb4abe5722 Merge remote-tracking branch 'refs/remotes/origin/main' 2024-04-20 22:24:47 +12:00
d0c1bb8717 start to add kodi ansible 2024-04-20 22:24:20 +12:00
5b83607fe0 attempted k8s resources as tf files, not worth the trouble 2024-04-20 02:10:01 +12:00
43dbb951fe add vultr block storage mount 2024-04-19 17:31:44 +12:00
f68c6b227a add vultr k8s 2024-04-18 13:28:39 +12:00
8d049f3056 opnsense b created 2024-04-17 04:06:14 +12:00
a0997ee8ec add floating ip assignments 2024-04-17 03:36:58 +12:00
b8c2dae1fa split resources into multiple tf files 2024-04-17 02:56:53 +12:00
c2f7590b44 Add hetzner terraform project 2024-04-17 02:37:58 +12:00
d8db6ba755 refine remote hwdb file 2024-04-13 18:21:03 +12:00
d6882bd306 update hwdb readme 2024-04-13 17:25:42 +12:00
f78dd67cd5 add conf dir and hwdb file 2024-04-13 16:49:48 +12:00
2aa50b4015 add sftp creds template 2024-04-05 15:09:50 +13:00
4dfe68a54b add mount-sftp script 2024-04-05 15:07:25 +13:00
bdf04302aa backup scripts 2024-02-10 23:12:35 +13:00
39cb2b0007 begin to add node backup 2024-02-03 01:24:47 +13:00
f10ce63569 adjust ups mon alerts 2024-01-12 12:54:24 +13:00
52c455d490 merged arch-install contents 2024-01-12 00:14:09 +13:00
c6755e8d97 nut and acme working 2024-01-11 18:15:16 +13:00
ba7cda511e organize playbooks into subdirs 2024-01-11 13:03:08 +13:00
7eddbba696 Add k8s shutdown/openup scripts
Add nut ansible roles
Add acme certificate ansible role
2024-01-11 01:11:16 +13:00
92df824252 nut wip 2024-01-10 02:05:03 +13:00
9e07845208 unifi working with db init 2024-01-05 02:41:50 +13:00
8d71ff222a add matrix reservation 2023-12-25 01:55:30 +13:00
117b36842c add transmission static ip
add internal v4 ingress service
2023-12-17 19:12:27 +13:00
2b2486f2fb add mail reservations 2023-12-08 15:29:55 +13:00
2e38d3d07f reservations, ingress service 2023-12-07 02:58:14 +13:00
af13cfbb41 new cluster wip 2023-11-29 02:02:52 +13:00
cd19a7687c house cluster wip 2023-11-27 03:31:14 +13:00
0923148d8e add packages 2023-11-22 00:10:42 +13:00
dda7bc7a10 add additional sc definition 2023-10-03 23:04:20 +13:00
9c477f2094 archinstall 2023-09-26 02:52:28 +13:00
e1349b2b90 add packages 2023-08-15 03:20:21 +10:00
cffbcaea8c sshd setup 2023-08-14 22:27:29 +10:00
e1fb6b94ee fix systemd templates 2023-08-13 14:03:03 +10:00
197 changed files with 7132 additions and 253 deletions

36
.gitignore vendored
View File

@ -1,3 +1,39 @@
# Local .terraform directories
**/.terraform/*
**/.terraform
.ansible/
.vscode/
ansible/collections/**
# registry password file
distribution/htpasswd
# .tfstate files
*.tfstate
*.tfstate.*
# Terraform lock file
**/.terraform.lock.hcl
# Terraform secrets file
**/secrets.auto.tfvars
# Crash log files
crash.log
crash.*.log
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
**/vault_password
**/vault.yaml
**/*secrets.yaml

10
.sops.yaml Normal file
View File

@ -0,0 +1,10 @@
creation_rules:
- path_regex: (secret|secrets)\.(yml|yaml)$
unencrypted_regex: ^(apiVersion|kind|name|namespace|type)$
kms: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# kms:
# - arn: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
# aws_profile: home
age: 'age1k5y5gj5fzpwtjgzqd4n93h4h9ek9jz8898rva5zsgj7zjet97ytq4dtzjs'
hc_vault_transit_uri: 'https://vault.balsillie.net:443/v1/sops/keys/krds'

View File

@ -23,5 +23,5 @@
"checkForMinikubeUpgrade": true,
"imageBuildTool": "Docker"
},
"ansible.python.interpreterPath": "/usr/bin/python3"
"ansible.python.interpreterPath": "/usr/bin/python"
}

View File

@ -5,7 +5,6 @@ library = modules
module_utils = module_utils
display_skipped_hosts = false
interpreter_python = auto_silent
collections_paths = ./collections
collections_path = ./collections
roles_path = ./roles
vault_password_file = ./vault_password

View File

@ -0,0 +1,6 @@
acme_certificate_csr_organization: Balsillie Family
acme_certificate_csr_locality: Queenstown
acme_certificate_csr_state: Otago
acme_certificate_csr_country: NZ
acme_certificate_csr_email: admin@balsillie.net
acme_certificate_directory: https://acme-v02.api.letsencrypt.org/directory

View File

@ -1 +0,0 @@
---

View File

@ -0,0 +1,3 @@
nut_client_admin_username: nut-admin
nut_client_primary_username: nut-primary
nut_client_secondary_username: nut-secondary

View File

@ -0,0 +1,3 @@
rfc2136_key_algorithm: hmac-sha256
rfc2136_key_name: rndc-house
rfc2136_server_address: 10.208.240.1

View File

@ -0,0 +1,23 @@
# code: language=ansible
aur_repo_packager_name: "Balsillie Family"
aur_repo_packager_email: "admin@balsillie.net"
aur_repo_dir: "/aur"
aur_repo_build_account: "aur-builder"
aur_repo_host_packages:
- pikaur
- jellyfin-media-player # If you get errors relating to icu, check 'icu' package version and perform a system update
- git-credential-keepassxc
- docker-credential-secretservice-bin
- ventoy-bin
- debtap
- aurutils
- ipmiview
- powershell-bin
- visual-studio-code-bin
- ttf-ms-fonts
- brave-bin
- teamviewer
- vmware-horizon-client

View File

@ -0,0 +1,17 @@
# code: language=ansible
# Connection (SSH)
ansible_connection: ansible.builtin.ssh
ansible_ssh_host: dev.balsillie.house
ansible_ssh_port: 22
ansible_ssh_host_key_checking: false
ansible_ssh_pipelining: false
ansible_ssh_user: ladmin
ansible_ssh_private_key_file: ~/.ssh/conf.d/home/dev.balsillie.house.key
# Become (sudo)
ansible_become_method: ansible.builtin.sudo
ansible_become_user: root
ansible_become_password: "{{ lookup('community.hashi_vault.vault_kv1_get', 'ansible/host_vars/dev.balsillie.house/ansible_connection').secret.ansible_become_password }}" # noqa yaml[line-length]

View File

@ -0,0 +1,17 @@
# code: language=ansible
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_cloudflare_api_token: "{{ lookup('community.hashi_vault.vault_kv1_get', 'cloudflare/balsillie.house/dns').secret.api_token }}" # noqa yaml[line-length]
certbot_dns_propagation_seconds: 15
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'cloudflare'
certbot_email: "certbot.dev@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- repo.balsillie.house

View File

@ -0,0 +1,9 @@
# code: language=ansible
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
nginx_user: "http"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.hv00@balsillie.email

View File

@ -1,16 +1,9 @@
$ANSIBLE_VAULT;1.1;AES256
30653030376238643536303332376530306565363333613230303263653935626332383862646539
3739623265323837613333343363343461353837643637650a616637656563313265636366616134
61636335613330393239656262663735316365613435303766643964353964666537353338646666
3536363034316632390a363234343466363937613631316130333566313037306636386130303137
33366462303461393866633233643033356231343232313832636335336232383234626163623533
64656339346264306265353839373362373034306261316238346365373639326566313866363263
62613639313566373233303734666331633038383638316361353838313634383163626563333137
62393835663963646431353431396238663062363031613735623937373835383630653165373634
32356365363162333661323765333236363934636461366664666431333338326362656439366339
62313265616666386164343336623032386536343134336232613164363236656236646332356335
36643362613832656666376233363436313030626566356134306533643862333536336662653630
32663936333434346530343639383330633538306536346432333136393765316366356362353735
30636536333436346166616232643238373964306139313265623934616636663234336162306338
34343934613136623837353436353462303036643837656636386533333266663265643538633333
373133383866666465383332373336343739
ansible_connection: ssh
ansible_host: hv00.balsillie.house
ansible_fqdn: hv00.balsillie.house
ansible_remote_addr: 10.192.110.100
ansible_port: 22
ansible_user: ladmin
# ansible_become_user: root
ansible_become_method: ansible.builtin.sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.hv00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -1,6 +1,5 @@
hypervisor:
storage: dir
device: /dev/sda
qemu_bridges:
- br0

View File

@ -0,0 +1,17 @@
nginx_sites:
- name: repo.balsillie.house
type: site
autoindex: 'on'
root: /var/www/aur
- name: unifi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: hv00.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9443
nginx_user: "http"

View File

@ -0,0 +1,38 @@
nut_client_local_server: true
nut_client_shutdown_cmd: /usr/bin/poweroff
nut_client_shutdown_exit: "true"
nut_client_hostsync: 240
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: primary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for HV00."
- name: LOWBATT
message: "UPS has reached low battery condition."
nut_client_notify_flags:
- name: LOWBATT
flags: SYSLOG+WALL+EXEC
- name: FSD
flags: SYSLOG+WALL+EXEC
- name: COMMOK
flags: SYSLOG+WALL+EXEC
- name: COMMBAD
flags: SYSLOG+WALL+EXEC
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC
- name: REPLBATT
flags: SYSLOG+WALL+EXEC
- name: NOCOMM
flags: SYSLOG+WALL+EXEC
- name: NOPARENT
flags: SYSLOG+WALL+EXEC
- name: BYPASS
flags: SYSLOG+WALL+EXEC
- name: NOTBYPASS
flags: SYSLOG+WALL+EXEC

View File

@ -0,0 +1,7 @@
nut_server_listen_address: 10.192.110.100
nut_server_listen_port: 3493
nut_server_certificate_file: /etc/ssl/private/hv00.balsillie.house.plain.combined.pem
nut_server_ups_devices:
- name: ups0
driver: usbhid-ups
port: auto

View File

@ -0,0 +1,16 @@
sshd:
config_path: home
auth:
pubkey: 'yes'
password: 'no'
empty: 'no'
listen:
port: '22'
family: inet
ipv4:
- '192.168.1.250'
- '10.192.110.100'
forwarding:
agent: 'no'
x11: 'no'
nickname: vault

View File

@ -8,7 +8,7 @@ systemd_networkd_configs:
- name: 00-eth2.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c5
- name: 00-eth3.link
- name: 00-wan.link
src: ethernet.link.j2
mac_address: 64-62-66-21-e9-c6
- name: 01-eth0.network
@ -47,10 +47,10 @@ systemd_networkd_configs:
- 210
- 220
- 230
- name: 01-eth3.network
- name: 01-wan.network
src: ethernet.network.j2
mac_address: 64-62-66-21-e9-c6
arp: false
arp: true
lldp: false
dhcp: true
- name: 10-br0.netdev
@ -63,7 +63,7 @@ systemd_networkd_configs:
dhcp: false
lldp: true
vlans:
- vlan110
- 110
- name: 20-vlan110.netdev
src: vlan.netdev.j2
vlan_id: 110
@ -74,7 +74,7 @@ systemd_networkd_configs:
dhcp: false
address:
ipv4:
- 10.192.110.1/24
- 10.192.110.100/24
gateway:
ipv4: 10.192.110.254
nameserver:

View File

@ -1,2 +0,0 @@
---
ansible_host: hv00.balsillie.net

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kodi00.balsillie.house
ansible_fqdn: kodi00.balsillie.house
ansible_remote_addr: 10.192.210.169
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: kodi00.balsillie.house

View File

@ -0,0 +1,8 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"

View File

@ -0,0 +1,81 @@
---
docker_users:
- ladmin
docker_networks:
- name: torrent
driver: bridge
driver_options:
# com.docker.network.bridge.name: docker-torrent
com.docker.network.bridge.enable_ip_masquerade: true
com.docker.network.bridge.enable_icc: true
# com.docker.network.container_iface_prefix: container-torrent
attachable: true
enable_ipv6: false
internal: false
ipam:
- subnet: 192.168.99.0/24
gateway: 192.168.99.254
docker_volumes:
- name: torrent-data
driver: local
driver_options:
type: none
device: /downloads
o: bind
- name: torrent-config
driver: local
driver_options:
type: none
device: /etc/qbittorrent
o: bind
docker_images:
- name: hotio/qbittorrent
tag: release
docker_containers:
- name: qbittorrent
image: hotio/qbittorrent:release
auto_remove: false
capabilities:
- NET_ADMIN
domainname: balsillie.house
env:
PUID: '968'
PGID: '968'
UMASK: '002'
TZ: Pacific/Auckland
WEBUI_PORTS: 8080/tcp
VPN_ENABLED: 'true'
VPN_CONF: 'wg0'
VPN_PROVIDER: 'proton'
VPN_LAN_NETWORK: ''
VPN_LAN_LEAK_ENABLED: 'false'
VPN_EXPOSE_PORTS_ON_LAN: ''
VPN_AUTO_PORT_FORWARD: 'true'
VPN_AUTO_PORT_FORWARD_TO_PORTS: ''
VPN_KEEP_LOCAL_DNS: 'false'
VPN_FIREWALL_TYPE: 'nftables'
VPN_HEALTHCHECK_ENABLED: 'true'
PRIVOXY_ENABLED: 'false'
UNBOUND_ENABLED: 'false'
etc_hosts:
tv.balsillie.house: 192.168.99.254
movies.balsillie.house: 192.168.99.254
hostname: torrent
networks:
- name: torrent
aliases:
- torrent
- qbittorrent
ipv4_address: 192.168.99.1
restart_policy: 'unless-stopped'
sysctls:
net.ipv4.conf.all.src_valid_mark: 1
net.ipv6.conf.all.disable_ipv6: 1
volumes:
- torrent-config:/config:rw
- torrent-data:/downloads:rw

View File

@ -0,0 +1,43 @@
nginx_sites:
- name: tv.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8989
- name: movies.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 7878
- name: music.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8686
- name: subs.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 6767
- name: index.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 9696
- name: torrent.balsillie.house
type: proxy
upstream:
host: 192.168.99.1
port: 8080
- name: jellyfin.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8096
- name: kodi.balsillie.house
type: proxy
upstream:
host: 127.0.0.1
port: 8082
nginx_user: "http"

View File

@ -0,0 +1,3 @@
---
sonarr_var: "sonarr_value"

View File

@ -0,0 +1,4 @@
sshd:
auth:
password: 'no'
pubkey: 'yes'

View File

@ -0,0 +1,7 @@
torrent_user: kodi
torrent_downloads_dir: /downloads
torrent_wireguard_address: 10.2.0.2
torrent_wireguard_dns: 10.2.0.1
torrent_wireguard_peer_endpoint: 103.75.11.18
torrent_wireguard_peer_public_key: 8Rm0uoG0H9BcSuA67/5gBv8tJgFZXNLm4sqEtkB9Nmw=

View File

@ -0,0 +1,21 @@
ufw_enabled: true
ufw_rules:
- name: "SSH from Local Subnet"
port: "22"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTP from Local Subnet"
port: "80"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"
- name: "HTTPS from Local Subnet"
port: "443"
protocol: "tcp"
action: "allow"
source: "10.192.210.0/24"
destination: "10.192.210.169"

View File

@ -0,0 +1 @@
acme_certificate_account_email: acme.kube00@balsillie.email

View File

@ -0,0 +1,9 @@
ansible_connection: ssh
ansible_host: kube00.balsillie.house
ansible_fqdn: kube00.balsillie.house
ansible_remote_addr: 10.192.110.110
ansible_port: 22
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo
static_fqdn: hv00.balsillie.house

View File

@ -0,0 +1,18 @@
nut_client_local_server: false
nut_client_shutdown_cmd: /scripts/shutdown.sh
nut_client_shutdown_exit: "false"
nut_client_hostsync: 15
nut_client_notify_cmd: /scripts/notify.sh
nut_client_min_supplies: 1
nut_client_ups_devices:
- name: ups0
host: hv00.balsillie.house
type: secondary
port: 3493
powervalue: 1
nut_client_notify_messages:
- name: SHUTDOWN
message: "UPSMON shutdown triggered for KUBE00."
nut_client_notify_flags:
- name: SHUTDOWN
flags: SYSLOG+WALL+EXEC

View File

@ -1,4 +0,0 @@
---
ansible_host: kube01.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGtk+mk1+J3sZ3CA/yS7XV2wH913IdJj0gznmb/nI2nV ladmin@kube01.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -1,4 +0,0 @@
---
ansible_host: kube02.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGOfsOJJJ34VT9mHv9moHQAQNTAok8sOr49rVTkIfDn9 ladmin@kube02.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -1,4 +0,0 @@
---
ansible_host: kube03.balsillie.net
ssh_public_key_string: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINFqYq3CcxziLbWfp/0VpO5uD/HyjiKTXD8t/dAS01Oq ladmin@kube00.balsillie.net
k8s_remove_control_plane_taint: true

View File

@ -1,3 +0,0 @@
ebs_zfs_storage_classes:
- name: ssd-zfs
dataset: ssd/data/open-ebs

View File

@ -0,0 +1 @@
synapse_host_address: matrix.balsillie.net

View File

@ -1,13 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
32663239363537353936346439323334373561303531343365356338626336626237386562376335
3637303166393236323236623637613632313831373065620a646639336130613534666633643633
33393032356261393764646166643465366164356236666464333439333039633934643732616666
6537396433663666650a316266393334656534323135643939336662626563646461363131336437
32383963366163323065376230633366383830626539396563323661643266643139316334616237
35633264626637346635613262383236396530313335346139653239316433646338613339303638
65326134306438333265636337376538313337356164663865653036343666353335663336376463
61616465333461656461313464623635336533363132626534373230633139373064636634613136
33633134313538326662323534386533363833326337383837393036653637663561323837373162
32613733353637313862323837653663343134323761363339333032383239643633666632663563
39366362663334316634346339663337386439386162636639393137306138303163333538616664
64333366663134356435

View File

@ -0,0 +1,4 @@
ansible_connection: local
ansible_user: ladmin
ansible_become_user: root
ansible_become_method: sudo

View File

@ -0,0 +1,11 @@
certbot_rfc2136_server: '10.208.240.1'
certbot_rfc2136_key_name: 'rndc-house'
certbot_rfc2136_key_algorithm: 'hmac-sha256'
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
certbot_dns_plugin: 'rfc2136'
certbot_email: "certbot.kodi00@balsillie.email"
certbot_acme_server: "acme-v02.api.letsencrypt.org"
certbot_domains:
- xmr.balsillie.house

View File

@ -1,16 +1,13 @@
all:
children:
ups:
aur_repo_hosts:
hosts:
ups00.balsillie.house:
dev.balsillie.house:
firewalls:
children:
opnsense:
hosts:
fw00.balsillie.net:
openwrt:
hosts:
fw00.balsillie.house:
router.balsillie.house:
switches:
hosts:
sw00.balsillie.house:
@ -19,63 +16,55 @@ all:
wap00.balsillie.house:
virtual_machines:
hosts:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
fw00.balsillie.net:
fw00.balsillie.house:
mp00.balsillie.house:
win11.balsillie.house:
bare_metal:
hosts:
ups00.balsillie.house:
sw00.balsillie.house:
wap00.balsillie.house:
hv00.balsillie.house:
hv00.balsillie.net:
kube00.balsillie.house:
lat5420.balsillie.house:
lat7490.balsillie.house:
sff.balsillie.house:
nuc.balsillie.house:
servers:
children:
hypervisors:
hosts:
hv00.balsillie.net: # Hetzner
hv00.balsillie.house: # vp2420
hv01.balsillie.net: # 4U Rosewill
k8s:
children:
k8s_control:
hosts:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
k8s_taint:
hosts:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
kube00.balsillie.house:
k8s_worker:
hosts:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
kube00.balsillie.net:
k8s_storage:
hosts:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
kube00.balsillie.net:
nut_servers:
hosts:
hv00.balsillie.house:
nut_clients:
hosts:
hv00.balsillie.house:
kube00.balsillie.house:
nas:
hosts:
nas.balsillie.house:
workstations:
children:
arch:
hosts:
lat5420.balsillie.house:
sff.balsillie.house:
mp00.balsillie.house:
kodi00.balsillie.house:
nuc.balsillie.house:
windows:
hosts:
lat7490.balsillie.house:
win11.balsillie.house:
win11.balsillie.house:
laptops:
hosts:
lat5420.balsillie.house:
@ -83,19 +72,9 @@ all:
desktops:
hosts:
sff.balsillie.house:
mp00.balsillie.house:
hetzner:
hosts:
fw00.balsillie.net:
hv00.balsillie.net:
kube01.balsillie.net:
kube02.balsillie.net:
kube03.balsillie.net:
house:
hosts:
hv00.balsillie.house:
fw00.balsillie.house:
mp00.balsillie.house:
win11.balsillie.house:
lat5420.balsillie.house:
sff.balsillie.house:
mp00.balsillie.house:
kodi00.balsillie.house:
nuc.balsillie.house:
kodi:
hosts:
kodi00.balsillie.house:

View File

@ -0,0 +1,10 @@
# code: language=ansible
- name: AUR Repo
hosts: aur_repo_hosts
become: true
gather_facts: true
roles:
# - certbot
- nginx
# - aur_repo_host

View File

@ -0,0 +1,10 @@
---
- name: Setup core home router
hosts:
- hv00.balsillie.house
gather_facts: true
become: true
roles:
# - role: aur_repo_host
- role: nginx

View File

@ -0,0 +1,15 @@
---
- name: Setup Kodi boxes
hosts:
- kodi00.balsillie.house
gather_facts: true
become: true
roles:
# - role: sshd
# - role: ufw
# - role: nginx
# - role: aur_repo_client
# - role: arr
- role: torrent
# - role: sonarr

View File

@ -0,0 +1,9 @@
---
- name: Setup NUC
hosts:
- nuc.balsillie.house
gather_facts: true
become: true
roles:
- role: certbot

View File

@ -0,0 +1,32 @@
- name: Install NUT
hosts:
- nut_servers
- nut_clients
become: true
gather_facts: true
tasks:
- name: Install NUT package on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: nut
state: latest
update_cache: true
- name: Setup NUT servers
gather_facts: false
hosts: nut_servers
become: true
roles:
- role: acme_certificate
acme_certificate_subject: "{{ ansible_host }}"
acme_certificate_zone: balsillie.house
acme_certificate_restart_services: ['nut-server.service']
- role: nut_server
- name: Setup NUT clients
gather_facts: false
hosts: nut_clients
become: true
roles:
- nut_client

View File

@ -0,0 +1 @@
../../roles/

1
ansible/playbooks/k8s/roles Symbolic link
View File

@ -0,0 +1 @@
../../roles/

1
ansible/playbooks/roles Symbolic link
View File

@ -0,0 +1 @@
../roles

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room list
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: room_list
- name: Set empty_rooms fact
ansible.builtin.set_fact:
empty_rooms: "{{ room_list.json.rooms | selectattr('joined_local_members', '==', 0) | list }}"
- name: Debug empty room count
ansible.builtin.debug:
msg: "Total empty rooms to delete: {{ empty_rooms | length }}"
- name: Delete empty rooms
when: empty_rooms | length > 0
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room.room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
loop: "{{ empty_rooms }}"
loop_control:
loop_var: room
label: "{{ room.room_id }}"
register: purge_ids
- name: Write purge_ids to file
ansible.builtin.copy:
dest: "{{ playbook_dir }}/purge_ids_{{ now(utc=false, fmt='%Y-%m-%d_%H-%M-%S') }}.json"
content: "{{ purge_ids.results | map(attribute='json.delete_id') | list | to_nice_json }}"
mode: "0664"

View File

@ -0,0 +1,28 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to delete"
private: false
tasks:
- name: Delete room
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room_id }}"
method: DELETE
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
body_format: json
body: {}
register: purge_id
- name: Wait for purge to complete
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json.rooms | map(attribute='room_id') | list

View File

@ -0,0 +1,19 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
tasks:
- name: Get large rooms
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/statistics/database/rooms"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,44 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: "purge_ids_file"
prompt: "Enter the file name containing the purge ids"
private: false
tasks:
- name: Load purge ids
ansible.builtin.slurp:
src: "{{ playbook_dir }}/{{ purge_ids_file }}"
register: purge_ids
- name: Set purge_ids_list fact
ansible.builtin.set_fact:
purge_ids_list: "{{ purge_ids.content | b64decode | from_json }}"
- name: Get purge status
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
loop: "{{ purge_ids_list }}"
register: purge_status
- name: Set purge_satus_totals
ansible.builtin.set_fact:
purge_status_shutting_down: "{{ purge_status.results | selectattr('json.status', '==', 'shutting_down') | list | length }}"
purge_status_purging: "{{ purge_status.results | selectattr('json.status', '==', 'purging') | list | length }}"
purge_status_complete: "{{ purge_status.results | selectattr('json.status', '==', 'complete') | list | length }}"
purge_status_failed: "{{ purge_status.results | selectattr('json.status', '==', 'failed') | list | length }}"
- name: Print status
ansible.builtin.debug:
msg: |
Shutting down: {{ purge_status_shutting_down }}
Purging: {{ purge_status_purging }}
Complete: {{ purge_status_complete }}
Failed: {{ purge_status_failed }}"

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Clean Synapse
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,23 @@
# code: language=ansible
- name: Room members
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- name: room_id
prompt: "Enter the room ID to fetch"
private: false
tasks:
- name: Get room details
ansible.builtin.uri:
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}/members"
headers:
Authorization: "Bearer {{ synapse_admin_token }}"
register: result
- name: Print result
ansible.builtin.debug:
var: result.json

View File

@ -0,0 +1,17 @@
---
- name: Configure Truenas
hosts: truenas
become: false
tasks:
- name: Install required packages
package:
name: "{{ item }}"
state: present
with_items:
- py37-ansible
- py37-pip
- py37-netifaces
- py37-netaddr
- py37-requests
- py37-yaml

View File

@ -1,25 +0,0 @@
---
# Arch install bare metal
# Systemd networking
- name: Setup systemd-networkd
hosts: hv00.balsillie.house
become: true
roles:
- name: systemd_networkd
vars:
ansible_host: 192.168.1.106
# Serial console
# - name: Setup serial console
# hosts: hv00_balsillie_house
# become: true
# roles:
# - name: serial_console
# Hypervisor setup
# VM setup

View File

@ -0,0 +1,218 @@
---
- name: Install required python libraries system wide
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- python-cryptography
- python-dnspython
state: latest
update_cache: true
- name: Set certificate path facts
ansible.builtin.set_fact:
acme_certificate_certificate_path: "/etc/ssl/private/{{ acme_certificate_subject }}.pem"
acme_certificate_chain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.chain.pem"
acme_certificate_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.combined.pem"
acme_certificate_csr_path: "/etc/ssl/private/{{ acme_certificate_subject }}.csr"
acme_certificate_fullchain_path: "/etc/ssl/private/{{ acme_certificate_subject }}.fullchain.pem"
acme_certificate_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.key"
acme_certificate_plain_combined_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.combined.pem"
acme_certificate_plain_key_path: "/etc/ssl/private/{{ acme_certificate_subject }}.plain.key"
- name: Create ACME account key directory
ansible.builtin.file:
group: root
mode: '0700'
owner: root
path: /etc/ssl/private/ACME
state: directory
- name: Create ACME account key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ acme_certificate_account_key_passphrase }}"
path: /etc/ssl/private/ACME/account.key
size: 4096
state: present
type: RSA
- name: Generate RSA private key
community.crypto.openssl_privatekey:
cipher: auto
curve: secp384r1
format: auto_ignore
group: root
mode: '0600'
owner: root
passphrase: "{{ ssl_passphrase }}"
path: "{{ acme_certificate_key_path }}"
size: 4096
state: present
type: RSA
register: genrsa_private_key
- name: Generate CSR
community.crypto.openssl_csr:
common_name: "{{ acme_certificate_subject }}"
country_name: "{{ acme_certificate_csr_country }}"
digest: sha256
email_address: "{{ acme_certificate_csr_email }}"
group: root
locality_name: "{{ acme_certificate_csr_locality }}"
mode: '0600'
organization_name: "{{ acme_certificate_csr_organization }}"
owner: root
path: "{{ acme_certificate_csr_path }}"
privatekey_passphrase: "{{ ssl_passphrase }}"
privatekey_path: "{{ acme_certificate_key_path }}"
state: present
state_or_province_name: "{{ acme_certificate_csr_state }}"
use_common_name_for_san: true
- name: Submit ACME certificate request
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
modify_account: true
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
register: challenge
- name: Debug ACME certificate challenge
ansible.builtin.debug:
var: challenge
- name: Proceed if challenge is changed
when:
- challenge is changed
- acme_certificate_subject in challenge.challenge_data
block:
- name: Answer ACME certificate challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: present
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
# zone: "{{ acme_certificate_zone }}"
register: nsupdate_result
- name: Debug nsupdate result
ansible.builtin.debug:
var: nsupdate_result
- name: Retrieve ACME certificate
community.crypto.acme_certificate:
account_email: "{{ acme_certificate_account_email }}"
account_key_passphrase: "{{ acme_certificate_account_key_passphrase }}"
account_key_src: /etc/ssl/private/ACME/account.key
acme_directory: "{{ acme_certificate_directory }}"
acme_version: 2
chain_dest: "{{ acme_certificate_chain_path }}"
challenge: dns-01
csr: "{{ acme_certificate_csr_path }}"
data: "{{ challenge }}"
dest: "{{ acme_certificate_certificate_path }}"
fullchain_dest: "{{ acme_certificate_fullchain_path }}"
select_crypto_backend: cryptography
terms_agreed: true
validate_certs: true
- name: Cleanup ACME challenge
community.general.nsupdate:
key_algorithm: "{{ rfc2136_key_algorithm }}"
key_name: "{{ rfc2136_key_name }}"
key_secret: "{{ rfc2136_key_secret }}"
port: 53
protocol: tcp
record: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].record }}."
server: "{{ rfc2136_server_address }}"
state: absent
ttl: 3600
type: TXT
value: "{{ challenge.challenge_data[acme_certificate_subject]['dns-01'].resource_value }}"
zone: "{{ acme_certificate_zone }}"
- name: Slurp fullchain contents
ansible.builtin.slurp:
src: "{{ acme_certificate_fullchain_path }}"
register: acme_certificate_fullchain_content
- name: Slurp private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_key_path }}"
register: acme_certificate_key_content
- name: Create combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Check if plain key file exists
ansible.builtin.stat:
path: "{{ acme_certificate_plain_key_path }}"
register: plain_key_file
- name: Create a plain text copy of the SSL private key # noqa: no-handler
when: |
genrsa_private_key.changed or
not plain_key_file.stat.exists
ansible.builtin.command:
cmd: openssl rsa -in {{ acme_certificate_key_path }} -passin pass:{{ ssl_passphrase }} -out {{ acme_certificate_plain_key_path }}
changed_when: true
- name: Slurp plain text private key contents
ansible.builtin.slurp:
src: "{{ acme_certificate_plain_key_path }}"
register: acme_certificate_key_content
- name: Create plain text combined cert file
ansible.builtin.template:
dest: "{{ acme_certificate_plain_combined_path }}"
group: root
mode: '0600'
owner: root
src: combined.pem.j2
- name: Dependant services block
when:
- (acme_certificate_restart_services | default([]) | length) >= 1
- challenge is changed
block:
- name: Check state of running services
ansible.builtin.service_facts:
- name: Restart dependant services
when:
- ansible_facts.services[item] is defined
- ansible_facts.services[item].state in ['running','failed']
ansible.builtin.service:
name: "{{ item }}"
state: restarted
loop: "{{ acme_certificate_restart_services }}"

View File

@ -0,0 +1,2 @@
{{ acme_certificate_fullchain_content['content'] | b64decode }}
{{ acme_certificate_key_content['content'] | b64decode }}

View File

@ -44,4 +44,6 @@ pacstrap:
ca-certificates-mozilla
ca-certificates-utils
efibootmgr
grep
grep
mdadm
lvm2

View File

@ -23,6 +23,8 @@
# cryptdevice=UUID=device-UUID:root root=/dev/mapper/root rw
# add efi to /etc/fstab
# mkdir /mnt/mountpoint/etc
# sudo genfstab -L /mnt/mountpoint >> /mnt/mountpoint/etc/fstab
@ -38,8 +40,9 @@
# pacstrap
# pacstrap -K /mnt/root base linux-lts linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup
# sbctl fwupd fwupd-efi dmidecode udisks2
# pacstrap -K /mnt/root base linux-lts linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool qemu-guest-agent arch-install-scripts lsof
# desktop
# pacstrap -K /mnt base linux linux-firmware nano openssh bind bash efibootmgr reflector screen pv pinentry sudo man-db man-pages texinfo ufw nftables intel-ucode e2fsprogs dosfstools curl cryptsetup sbctl sbsigntools fwupd fwupd-efi dmidecode udisks2 usbutils inetutils ethtool arch-install-scripts lsof btrfs-progs plasma-meta plasma-wayland-session kde-system dolphin-plugins
# gen fstab
# genfstab -L /mnt/root >> /mnt/root/etc/fstab
@ -51,6 +54,11 @@
# set hostname
# echo hv00 > /etc/hostname
# TODO add entries to /etc/hosts
# 127.0.0.1 localhost
# ::1 localhost
# 127.0.1.1 static_fqdn
# link timezone
# ln -sf /usr/share/zoneinfo/Australia/Brisbane /etc/localtime
@ -72,15 +80,57 @@
# useradd -u 1000 -U -m -b /home/ -G wheel -s /bin/bash ladmin
# set new user password
# disable root password
# disable root user
# passwd -l root
# usermod -s /sbin/nologin root
# create /etc/kernel/cmdline file
# the uuids are the DISK uuids from /dev/disk/by-uuid, NOT the partuuids
# echo 'cryptdevice=dbbb9fb2-5509-4701-a2bb-5660934a5378:root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# for sd-encrypt hook
# echo 'rd.luks.name=dbbb9fb2-5509-4701-a2bb-5660934a5378=root root=/dev/mapper/root rw' > /etc/kernel/cmdline
# modify mkinitcpio for encryption
# create a default systemd-networkd config
# enable systemd-networkd
# enable sshd
# enable ufw service
# enable ufw firewall
# create ufw config to allow ssh port 22
# modify mkinitcpio presets
# template file?
# output to default efi path ESP/efi/boot/bootx64.efi
# modify mkinitcpio.conf for encryption
# old HOOKS=(base udev autodetect modconf kms keyboard keymap consolefont block filesystems fsck)
# new HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# sed -i 's/^HOOKS=(base udev autodetect modconf block filesystems keyboard fsck)/HOOKS=(base udev autodetect modconf block encrypt filesystems keyboard fsck)/g' /etc/mkinitcpio.conf
# sed -i 's/^HOOKS=(base udev autodetect modconf block filesystems keyboard fsck)/HOOKS=(base udev autodetect modconf block encrypt filesystems keyboard fsck)/g' /etc/mkinitcpio.conf
# geneate sb keys with sbctl
# keys go to /usr/share/secureboot/keys/db/db.pem
# enroll sbctl keys
# add console= option to cmdline file
# create initcpio post hook /etc/initcpio/post/uki-sbsign
# make /etc/initcpio/post/uki-sbsign executable
# chmod +x /etc/initcpio/post/uki-sbsign
# make initcpio
# mkinitcpio -p linux-lts
# vfio and iommu
# add 'intel_iommu=on iommu=pt' to kernel cmdline
# add vfio binding
# vp2420 iGPU = 8086:4555
# add vfio-pci ids to /etc/kernel/cmdline
# vfio-pci.ids=8086:4555
# add vfio modules to mkinitcpio.conf
# MODULES=(vfio_pci vfio vfio_iommu_type1)
# ensure modconf hook is in mkinitcpio.conf
# HOOKS=(base systemd keyboard autodetect modconf kms block sd-encrypt filesystems fsck)
# efibootmgr NO BACKSLASH ON A ROOT FILE
# efibootmgr -c -d /dev/nvme0n1 -p 1 -L "Arch Linux" -l "archlinux.efi"

View File

@ -0,0 +1,24 @@
---
- name: Install arr packages
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: "{{ arr_packages }}"
state: present
update_cache: true
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Start arr services
ansible.builtin.systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- sonarr.service
- radarr.service
- lidarr.service
- prowlarr.service
- bazarr.service

View File

@ -0,0 +1,6 @@
arr_packages:
- sonarr
- radarr
- lidarr
- bazarr
- prowlarr

View File

@ -0,0 +1,50 @@
---
- name: Check if repo public key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- --list-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
register: repo_key_check
failed_when: repo_key_check.rc not in [0, 1]
changed_when: false
- name: Add repo public key to pacman keyring
when: repo_key_check.rc == 1
block:
- name: Import the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --recv-keys
- "{{ aur_repo_client_public_key_fingerprint }}"
- --keyserver
- "{{ aur_repo_client_keyserver }}"
changed_when: true
- name: Trust the repo public key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_client_public_key_fingerprint }}"
changed_when: true
- name: Add home repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[{{ aur_repo_client_repo_name }}]
SigLevel = Required TrustedOnly
Server = {{ aur_repo_client_repo_address }}
create: false
state: present
insertafter: EOF
register: add_pacman_repo
- name: Update pacman database # noqa: no-handler
when: add_pacman_repo.changed
community.general.pacman:
update_cache: true

View File

@ -0,0 +1,6 @@
---
aur_repo_client_repo_name: "home"
aur_repo_client_repo_address: "https://repo.balsillie.house"
aur_repo_client_public_key_fingerprint: DB529158B99DD8311D78CA2FBE6003C744F56EE2
aur_repo_client_keyserver: hkps://keyserver.ubuntu.com

View File

@ -0,0 +1,12 @@
[Unit]
Description=Sync AUR packages
Wants=aur-sync.timer
[Service]
Type=oneshot
ExecStart=/usr/bin/aur sync --no-view --upgrades --no-confirm --clean --rm-deps --sign --database home
User=aur-builder
Group=aur-builder
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,12 @@
[Unit]
Description=Timer that runs aur sync service
Requires=aur-sync.service
[Timer]
Unit=aur-sync.service
OnCalendar=*-*-* 16:00:00
RandomizedDelaySec=120
Persistent=true
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,270 @@
---
- name: Lookup aur_repo_host secret
delegate_to: localhost
become: false
community.hashi_vault.vault_kv1_get:
path: ansible/group_vars/aur_repo_hosts
register: aur_repo_host_secret
- name: Set aur_repo facts
ansible.builtin.set_fact:
aur_repo_private_key: "{{ aur_repo_host_secret.secret.aur_repo_private_key }}"
aur_repo_key_thumbprint: "{{ aur_repo_host_secret.secret.aur_repo_key_thumbprint }}"
- name: Create the makepkg drop-in config file
ansible.builtin.template:
dest: /etc/makepkg.conf.d/makepkg.conf
src: makepkg.conf.j2
owner: root
group: root
mode: "0644"
- name: Create the build user group
ansible.builtin.group:
name: "{{ aur_repo_build_account }}"
system: true
state: present
- name: Create the build user
ansible.builtin.user:
name: "{{ aur_repo_build_account }}"
password: '!'
group: "{{ aur_repo_build_account }}"
comment: "AUR Package Builder"
shell: /sbin/nologin
home: "{{ aur_repo_dir }}"
createhome: true
system: true
state: present
- name: Create the build user sudoer file
ansible.builtin.template:
dest: /etc/sudoers.d/{{ aur_repo_build_account }}
src: aur-sudoer.j2
owner: root
group: root
mode: "0640"
- name: Create the build dirs
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0775"
loop:
- "{{ aur_repo_dir }}"
- "{{ aur_repo_dir }}/packages"
- "{{ aur_repo_dir }}/sources"
- "{{ aur_repo_dir }}/srcpackages"
- /var/log/makepkg
- /tmp/build
- name: Check if the signing key is in build user's keyring
ansible.builtin.command:
cmd: gpg2 --list-secret-key --with-colons {{ aur_repo_key_thumbprint }}
failed_when: key_result.rc not in [0, 2]
changed_when: false
register: key_result
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: GPG key import block
when: key_result.rc == 2
block:
- name: Template out the signing private key
ansible.builtin.template:
dest: "/tmp/build/signing_key.asc"
src: signing_key.asc.j2
owner: "{{ aur_repo_build_account }}"
group: "{{ aur_repo_build_account }}"
mode: "0600"
- name: Import the signing key
ansible.builtin.command:
cmd: gpg2 --import /tmp/build/signing_key.asc
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Delete the signing key
ansible.builtin.file:
path: "/tmp/build/signing_key.asc"
state: absent
- name: Check if aurutils is already installed
ansible.builtin.stat:
follow: true
path: /usr/bin/aur
register: aurutils_stat
- name: Aurutils install block
when: not aurutils_stat.stat.exists
block:
- name: Install makepkg dependencies
community.general.pacman:
name:
- git
- base-devel
state: present
update_cache: true
- name: Clone aurutils
ansible.builtin.git:
depth: 1
dest: /tmp/aurutils
repo: https://aur.archlinux.org/aurutils.git
single_branch: true
version: master
- name: Slurp PKGBUILD contents
ansible.builtin.slurp:
path: /tmp/aurutils/PKGBUILD
register: aurutils_pkgbuild
- name: Parse PKGBUILD into facts
ansible.builtin.set_fact:
aurutils_dependencies: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^depends=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') | split(' ') }}" # noqa: yaml[line-length]
aurutils_pkgver: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgver=).*(?=$)', multiline=True) }}"
aurutils_pkgrel: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgrel=).*(?=$)', multiline=True) }}"
aurutils_arch: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^arch=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') }}"
- name: Install aurutils dependencies
community.general.pacman:
name: "{{ aurutils_dependencies }}"
state: present
reason: dependency
update_cache: false
- name: Build aurutils
ansible.builtin.command:
cmd: makepkg
chdir: /tmp/aurutils
creates: "{{ aur_repo_dir }}/packages/aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar"
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Update repo database
ansible.builtin.command:
argv:
- repo-add
- --prevent-downgrade
- --remove
- --sign
- --key
- "{{ aur_repo_key_thumbprint }}"
- home.db.tar
- aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar
chdir: "{{ aur_repo_dir }}/packages"
changed_when: true
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Check if the signing key is in pacman keyring
ansible.builtin.command:
argv:
- pacman-key
- -l
- "{{ aur_repo_key_thumbprint }}"
failed_when: pacman_key_result.rc not in [0, 1]
changed_when: false
register: pacman_key_result
- name: Pacman key import block
when: pacman_key_result.rc == 1
block:
- name: Import the signing public key to arch keyring
ansible.builtin.command:
argv:
- pacman-key
- -r
- "{{ aur_repo_key_thumbprint }}"
- --keyserver
- hkps://keyserver.ubuntu.com
changed_when: true
- name: Locally sign the imported pacman key
ansible.builtin.command:
argv:
- pacman-key
- --lsign-key
- "{{ aur_repo_key_thumbprint }}"
changed_when: true
- name: Add custom repo block to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
block: |
[home]
SigLevel = Required TrustedOnly
Server = file://{{ aur_repo_dir }}/packages
create: false
state: present
insertafter: EOF
- name: Install aurutils
community.general.pacman:
name: aurutils
state: present
update_cache: true
# - name: Enable the multilib repository
# ansible.builtin.replace:
# path: /etc/pacman.conf
# backup: true
# regexp: '^[#]?\[multilib\]\n[#]?Include = \/etc\/pacman.d\/mirrorlist$'
# replace: '[multilib]\nInclude = /etc/pacman.d/mirrorlist'
# register: multilib_enable
# - name: Update the package database if multilib was enabled # noqa: no-handler
# when: multilib_enable.changed | default(false)
# community.general.pacman:
# update_cache: true
- name: Sync AUR packages
ansible.builtin.command:
cmd: aur sync --no-view -CnrS {{ item }}
loop: "{{ aur_repo_host_packages }}"
register: aur_sync_result
changed_when: (aur_sync_result.stderr_lines | last | replace(':','')) != "sync there is nothing to do"
failed_when: aur_sync_result.rc != 0
vars:
ansible_become_user: "{{ aur_repo_build_account }}"
- name: Add the root www folder if it doesn't exist
ansible.builtin.file:
path: /var/www
state: directory
owner: http
group: http
mode: "0775"
- name: Link the aur repo to the web root
ansible.builtin.file:
src: "{{ aur_repo_dir }}/packages"
path: /var/www{{ aur_repo_dir }}
state: link
- name: Add the aur-sync systemd unit files
ansible.builtin.copy:
src: "{{ item }}"
dest: /usr/lib/systemd/system/
owner: root
group: root
mode: "0644"
loop:
- aur-sync.service
- aur-sync.timer
register: aur_sync_unit_files
- name: Enable and start the aur-sync systemd timer # noqa: no-handler
when: aur_sync_unit_files.changed
ansible.builtin.systemd:
name: aur-sync.timer
enabled: true
state: started
daemon_reload: true

View File

@ -0,0 +1 @@
{{ aur_repo_build_account }} ALL = (root) NOPASSWD: /usr/bin/pacman, /usr/bin/pacsync

View File

@ -0,0 +1,21 @@
# Global Options
OPTIONS=(strip docs !libtool !staticlibs emptydirs zipman purge debug lto autodeps)
MAKEFLAGS="-j{{ (ansible_processor_nproc - 1) }}"
PACKAGER="{{ aur_repo_packager_name }} <{{ aur_repo_packager_email }}>"
# Build Environment
BUILDDIR=/tmp/build
BUILDENV=(!distcc color !ccache check sign)
GPGKEY={{ aur_repo_key_thumbprint }}
# Outputs
PKGDEST={{ aur_repo_dir }}/packages
SRCDEST={{ aur_repo_dir }}/sources
SRCPKGDEST={{ aur_repo_dir }}/srcpackages
LOGDEST=/var/log/makepkg
PKGEXT=".pkg.tar"
SRCEXT=".src.tar"

View File

@ -0,0 +1 @@
{{ aur_repo_private_key }}

View File

@ -0,0 +1,6 @@
---
- name: Restart nginx
ansible.builtin.service:
name: nginx.service
state: restarted

View File

@ -0,0 +1,67 @@
- name: Install certbot package (Archlinux)
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name:
- certbot
- certbot-dns-{{ certbot_dns_plugin }}
state: present
update_cache: true
- name: Install certbot webserver plugin (Archlinux)
when:
- ansible_facts['os_family'] == "Archlinux"
- certbot_webserver_type == 'nginx'
community.general.pacman:
name:
- certbot-nginx
state: present
update_cache: true
- name: Template out the rfc2136 credentials file
when: certbot_dns_plugin == 'rfc2136'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out cloudflare credentials file
when: certbot_dns_plugin == 'cloudflare'
ansible.builtin.template:
src: "{{ certbot_dns_plugin }}.conf.j2"
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
owner: root
group: root
mode: '0600'
- name: Template out the certbot default config
ansible.builtin.template:
src: cli.ini.j2
dest: /etc/letsencrypt/cli.ini
owner: root
group: root
mode: '0644'
- name: Request and install certificates
ansible.builtin.command:
argv:
- certbot
- certonly
- -n
- --dns-{{ certbot_dns_plugin }}
- --dns-{{ certbot_dns_plugin }}-credentials
- /etc/letsencrypt/{{ certbot_dns_plugin }}.conf
- --dns-{{ certbot_dns_plugin }}-propagation-seconds
- "{{ certbot_dns_propagation_seconds | default(10) }}"
- -d
- "{{ item }}"
creates: /etc/letsencrypt/live/{{ item }}/fullchain.pem
loop: "{{ certbot_domains }}"
notify: "{{ certbot_notify | default(omit) }}"
- name: Enable certbot renewal
ansible.builtin.service:
name: certbot-renew.timer
state: started
enabled: true

View File

@ -0,0 +1,3 @@
rsa-key-size = 4096
email = {{ certbot_email }}
agree-tos = true

View File

@ -0,0 +1 @@
dns_cloudflare_api_token = {{ certbot_cloudflare_api_token }}

View File

@ -0,0 +1,6 @@
dns_rfc2136_server = {{ certbot_rfc2136_server }}
dns_rfc2136_port = {{ certbot_rfc2136_port | default(53) }}
dns_rfc2136_name = {{ certbot_rfc2136_key_name }}
dns_rfc2136_secret = {{ certbot_rfc2136_key_secret }}
dns_rfc2136_algorithm = {{ certbot_rfc2136_key_algorithm | upper }}
dns_rfc2136_sign_query = true

View File

@ -0,0 +1,82 @@
---
- name: Install Docker on Archlinux
when: ansible_facts['os_family'] == "Archlinux"
community.general.pacman:
name: docker
state: present
update_cache: true
- name: Add users to docker group
ansible.builtin.user:
name: "{{ item }}"
groups: docker
append: true
loop: "{{ docker_users }}"
- name: Start and enable Docker
ansible.builtin.systemd:
name: docker
state: started
enabled: true
- name: Create Docker networks
when:
- docker_networks is defined
- docker_networks | length > 0
community.docker.docker_network:
attachable: "{{ item.attachable | default(true) }}"
driver: "{{ item.driver | default('bridge') }}"
driver_options: "{{ item.driver_options | default(omit) }}"
enable_ipv6: "{{ item.enable_ipv6 | default(false) }}"
internal: "{{ item.internal | default(false) }}"
ipam_config: "{{ item.ipam | default(omit) }}"
name: "{{ item.name }}"
state: "present"
loop: "{{ docker_networks }}"
- name: Create Docker volumes
when:
- docker_volumes is defined
- docker_volumes | length > 0
community.general.docker_volume:
driver: "{{ item.driver | default('local') }}"
driver_options: "{{ item.driver_options | default({}) }}"
recreate: "never"
state: "present"
volume_name: "{{ item.name }}"
loop: "{{ docker_volumes }}"
- name: Pull Docker images
when:
- docker_images is defined
- docker_images | length > 0
community.docker.docker_image_pull:
name: "{{ item.name }}"
pull: "always"
tag: "{{ item.tag | default('latest') }}"
loop: "{{ docker_images }}"
- name: Create Docker containers
when:
- docker_containers is defined
- docker_containers | length > 0
community.general.docker_container:
auto_remove: "{{ item.auto_remove | default(false) }}"
capabilities: "{{ item.capabilities | default(omit) }}"
command: "{{ item.command | default(omit) }}"
detach: true
domainname: "{{ item.domainname | default(omit) }}"
entrypoint: "{{ item.entrypoint | default(omit) }}"
env: "{{ item.env | default({}) }}"
etc_hosts: "{{ item.etc_hosts | default({}) }}"
hostname: "{{ item.hostname | default(item.name) }}"
image: "{{ item.image }}"
name: "{{ item.name }}"
networks: "{{ item.networks | default(omit) }}"
published_ports: "{{ item.ports | default([]) }}"
restart_policy: "{{ item.restart_policy | default('unless_stopped') }}"
state: 'started'
sysctls: "{{ item.sysctls | default({}) }}"
volumes: "{{ item.volumes | default([]) }}"
loop: "{{ docker_containers }}"

View File

@ -1,16 +1,20 @@
libvirt_packages:
arch:
qemu-base
openbsd-netcat
swtpm
gettext
libvirt
libvirt-python
Archlinux:
- qemu-base
- openbsd-netcat
- swtpm
- gettext
- libvirt
- libvirt-python
- python-lxml
hypervisor:
storage: dir
device: /dev/sda
datasets:
- name: tank/vhds
compression: lz4
encryption: 'off'
device: /dev/sdb
# hypervisor:
# storage: zfs
# datasets:
# - name: tank/vhds
# compression: lz4
# encryption: 'off'

View File

@ -1,12 +1,5 @@
---
- name: Format and mount the libvirt disk if it is not root
when:
- hypervisor.device not in (ansible_mounts | json_query('[?mount == `/`].device'))
- hypervisor.device not in (ansible_mounts | json_query('[?mount == `/var/lib/libvirt`].device'))
ansible.builtin.include_tasks:
file: libvirt_dir_mount.yaml
- name: Create the libvirt storage directories
ansible.builtin.file:
path: "{{ item }}"

View File

@ -12,6 +12,8 @@
part_start: 0%
state: present
# TODO disk encryption
- name: Format filesystem
community.general.filesystem:
device: "{{ hypervisor.device }}1"
@ -19,12 +21,24 @@
resizefs: true
state: present
- name: Stop the libvirt service
- name: Get list of services
ansible.builtin.service_facts:
- name: Stop the libvirt services
when: item in ansible_facts.services
ansible.builtin.service:
name: libvirtd
name: "{{ item }}"
state: stopped
loop:
- libvirtd.service
- name: Check if libvirt storage directory exists
ansible.builtin.stat:
path: /var/lib/libvirt/
register: libvirt_storage
- name: Temp mount and copy block
when: libvirt_storage.stat.exists
block:
- name: Temporarily mount hypervisor storage
@ -42,6 +56,17 @@
remote_src: true
mode: preserve
- name: Remove existing libvirt storage
ansible.builtin.file:
path: /var/lib/libvirt/
state: "{{ item }}"
owner: root
group: root
mode: '0775'
loop:
- absent
- directory
always:
- name: Unmount from temporary mount point
@ -49,17 +74,6 @@
path: /mnt/libvirt_temp/
state: absent
- name: Remove existing libvirt storage
ansible.builtin.file:
path: /var/lib/libvirt/
state: "{{ item }}"
owner: root
group: root
mode: '0775'
loop:
- absent
- directory
- name: Mount hypervisor storage
ansible.posix.mount:
path: /var/lib/libvirt/
@ -69,6 +83,9 @@
boot: true
- name: Start the libvirt service
when: item in ansible_facts.services
ansible.builtin.service:
name: libvirtd
name: "{{ item }}"
state: started
loop:
- libvirtd.service

View File

@ -1,18 +1,32 @@
---
- name: Install libvirt packages (Arch)
when: ansible_os_distribution == 'Archlinux'
- name: Format and mount the libvirt disk if it is not root
when:
- hypervisor.device is defined
- hypervisor.device not in (ansible_mounts | json_query('[?mount == `/var/lib/libvirt`].device'))
ansible.builtin.include_tasks:
file: libvirt_drive_mount.yaml
- name: Install libvirt packages (Archlinux)
when: ansible_distribution == 'Archlinux'
community.general.pacman:
name: "{{ libvirt_packages['Arch'] }}"
name: "{{ libvirt_packages['Archlinux'] }}"
state: present
update_cache: true
- name: Add user to libvirt group
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: libvirt
groups:
- libvirt
- libvirt-qemu
append: true
- name: Load br_netfilter kernel module so sysctl flags can be set
community.general.modprobe:
name: br_netfilter
state: present
- name: Set required sysctl flags for bridging
ansible.posix.sysctl:
name: "{{ item.name }}"
@ -20,7 +34,7 @@
state: present
sysctl_file: /etc/sysctl.d/bridge.conf
sysctl_set: true
value: "{{ item.value }}}}"
value: "{{ item.value }}"
loop:
- name: net.ipv4.ip_forward
value: 1
@ -77,11 +91,11 @@
community.libvirt.virt_pool:
command: facts
- name: Define the standard libvirt storage pools
- name: Define the standard libvirt storage pools # TODO add when condition against existing pools
community.libvirt.virt_pool:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'dir_pool.xml.j2') }}"
xml: "{{ lookup('template', 'dir_libvirt_pool.xml.j2') }}"
loop:
- name: isos
path: /var/lib/libvirt/isos/

View File

@ -0,0 +1,41 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: le-prod-balsillie-house
spec:
acme:
email: le-prod.balsillie-house@balsillie.email
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: account-le-prod-balsillie-house
solvers:
- dns01:
rfc2136:
nameserver: 10.208.240.1:53
tsigKeyName: rndc-house
tsigAlgorithm: HMACSHA256
tsigSecretSecretRef:
name: tsig-keys
key: rndc-house
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: le-prod-balsillie-net
spec:
acme:
email: le-prod.balsillie-net@balsillie.email
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: account-le-prod-balsillie-net
solvers:
- dns01:
rfc2136:
nameserver: 10.208.240.1:53
tsigKeyName: rndc-net
tsigAlgorithm: HMACSHA256
tsigSecretSecretRef:
name: tsig-keys
key: rndc-net

View File

@ -0,0 +1 @@
sudo kubeadm init --control-plane-endpoint=api.cluster.balsillie.house --cri-socket=unix:///run/containerd/containerd.sock --pod-network-cidr="10.208.0.0/16,2400:8907:e002:7c10::/64" --service-cidr="10.224.0.0/16,2400:8907:e002:7c11::/108" --apiserver-advertise-address="10.192.110.91" --apiserver-advertise-address="2400:8907:e002:7c02::91" --apiserver-bind-port=6443 --apiserver-cert-extra-sans="api.cluster.balsillie.house" --apiserver-cert-extra-sans="10.192.110.90" --apiserver-cert-extra-sans="10.192.110.91" --apiserver-cert-extra-sans="2400:8907:e002:7c02::90" --apiserver-cert-extra-sans="2400:8907:e002:7c02::91" --node-name="kube00" --service-dns-domain="cluster.balsillie.house"

View File

@ -0,0 +1,991 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data:
allow-snippet-annotations: "true"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-controller-external
namespace: ingress-nginx
spec:
clusterIP: None
ipFamilies:
- IPv4
- IPv6
ipFamilyPolicy: RequireDualStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: external
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-controller-internal
namespace: ingress-nginx
spec:
clusterIP: None
ipFamilies:
- IPv4
- IPv6
ipFamilyPolicy: RequireDualStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: internal
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-controller-external-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: external
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-controller-internal-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: internal
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-controller-external
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: external
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
spec:
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-external
- --controller-class=k8s.io/ingress-external
- --ingress-class=external
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.8.2@sha256:74834d3d25b336b62cabeb8bf7f1d788706e2cf1cfd64022de4137ade8881ff2
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
runAsUser: 101
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-admission-external
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-controller-internal
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: internal
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
spec:
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-internal
- --controller-class=k8s.io/ingress-internal
- --ingress-class=internal
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.8.2@sha256:74834d3d25b336b62cabeb8bf7f1d788706e2cf1cfd64022de4137ade8881ff2
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
runAsUser: 101
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-admission-internal
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-admission-external-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-admission-external-create
spec:
containers:
- args:
- create
- --host=ingress-controller-external-admission,ingress-controller-external-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-admission-external
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-admission-external-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-admission-external-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-external-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-admission-external
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-admission-internal-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-admission-internal-create
spec:
containers:
- args:
- create
- --host=ingress-controller-internal-admission,ingress-controller-internal-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-admission-internal
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-admission-internal-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-admission-internal-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-internal-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-admission-internal
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 2000
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: external
spec:
controller: k8s.io/ingress-external
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: internal
spec:
controller: k8s.io/ingress-internal
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-external-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-controller-external-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-internal-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-controller-internal-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

View File

@ -0,0 +1,49 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: external
name: ingress-controller-external-v4
namespace: ingress-nginx
spec:
clusterIP: None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: external
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/access: internal
name: ingress-controller-internal-v4
namespace: ingress-nginx
spec:
clusterIP: None
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/access: internal
type: ClusterIP

Some files were not shown because too many files have changed in this diff Show More