Compare commits
54 Commits
5b83607fe0
...
main
Author | SHA1 | Date | |
---|---|---|---|
230d830612 | |||
f843c7eaa3 | |||
076757e1f8 | |||
0b1c18a3a0 | |||
f47ad625da | |||
2b56d30666 | |||
c12dfc18ce | |||
814a642cc0 | |||
8cad395e34 | |||
eb360951a1 | |||
9601aa4937 | |||
81319370b1 | |||
76f6f78112 | |||
9b0edab903 | |||
c377f1a7d1 | |||
7f5a35d936 | |||
7b9f0e0ca5 | |||
a490e4ad92 | |||
6734d78bef | |||
6722ab4138 | |||
e76d1a1f88 | |||
c090cc9cbe | |||
8ab3783a2b | |||
cdf20ba9ef | |||
f0b3388e8d | |||
27e2fc6058 | |||
b622bb29df | |||
bde6a5f208 | |||
85d6fe5056 | |||
098f63fa5b | |||
43fc89a966 | |||
7aa2992228 | |||
1775e24a45 | |||
d6983b4744 | |||
29cb12a2d1 | |||
9464737fe9 | |||
14fc10a10a | |||
fe38bebbd5 | |||
bad78681c6 | |||
c8ab4633ca | |||
627343b50f | |||
2d31a5524f | |||
2981bdb22f | |||
84930795b6 | |||
f068c9710b | |||
afc0b57cfb | |||
7df41b5c8d | |||
2cc78654fe | |||
a6eb508cf0 | |||
85330c8645 | |||
c05f3a845b | |||
3d9241b475 | |||
cb4abe5722 | |||
d0c1bb8717 |
11
.gitignore
vendored
11
.gitignore
vendored
@ -2,6 +2,14 @@
|
||||
**/.terraform/*
|
||||
**/.terraform
|
||||
|
||||
.ansible/
|
||||
.vscode/
|
||||
|
||||
ansible/collections/**
|
||||
|
||||
# registry password file
|
||||
distribution/htpasswd
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
@ -9,6 +17,9 @@
|
||||
# Terraform lock file
|
||||
**/.terraform.lock.hcl
|
||||
|
||||
# Terraform secrets file
|
||||
**/secrets.auto.tfvars
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
10
.sops.yaml
Normal file
10
.sops.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
creation_rules:
|
||||
- path_regex: (secret|secrets)\.(yml|yaml)$
|
||||
unencrypted_regex: ^(apiVersion|kind|name|namespace|type)$
|
||||
kms: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
|
||||
# kms:
|
||||
# - arn: 'arn:aws:kms:us-east-1:140023401248:key/c51c2cc5-4e8e-484d-b2f0-4d4ec2039938'
|
||||
# aws_profile: home
|
||||
age: 'age1k5y5gj5fzpwtjgzqd4n93h4h9ek9jz8898rva5zsgj7zjet97ytq4dtzjs'
|
||||
hc_vault_transit_uri: 'https://vault.balsillie.net:443/v1/sops/keys/krds'
|
||||
|
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@ -23,5 +23,5 @@
|
||||
"checkForMinikubeUpgrade": true,
|
||||
"imageBuildTool": "Docker"
|
||||
},
|
||||
"ansible.python.interpreterPath": "/usr/bin/python3"
|
||||
"ansible.python.interpreterPath": "/usr/bin/python"
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
# code: language=ansible
|
||||
|
||||
aur_repo_packager_name: "Balsillie Family"
|
||||
aur_repo_packager_email: "admin@balsillie.net"
|
||||
aur_repo_dir: "/aur"
|
||||
|
||||
aur_repo_build_account: "aur-builder"
|
||||
|
||||
aur_repo_host_packages:
|
||||
- pikaur
|
||||
- jellyfin-media-player # If you get errors relating to icu, check 'icu' package version and perform a system update
|
||||
- git-credential-keepassxc
|
||||
- docker-credential-secretservice-bin
|
||||
- ventoy-bin
|
||||
- debtap
|
||||
- aurutils
|
||||
- ipmiview
|
||||
- powershell-bin
|
||||
- visual-studio-code-bin
|
||||
- ttf-ms-fonts
|
||||
- brave-bin
|
||||
- teamviewer
|
||||
- vmware-horizon-client
|
@ -0,0 +1,17 @@
|
||||
# code: language=ansible
|
||||
|
||||
# Connection (SSH)
|
||||
|
||||
ansible_connection: ansible.builtin.ssh
|
||||
ansible_ssh_host: dev.balsillie.house
|
||||
ansible_ssh_port: 22
|
||||
ansible_ssh_host_key_checking: false
|
||||
ansible_ssh_pipelining: false
|
||||
ansible_ssh_user: ladmin
|
||||
ansible_ssh_private_key_file: ~/.ssh/conf.d/home/dev.balsillie.house.key
|
||||
|
||||
# Become (sudo)
|
||||
|
||||
ansible_become_method: ansible.builtin.sudo
|
||||
ansible_become_user: root
|
||||
ansible_become_password: "{{ lookup('community.hashi_vault.vault_kv1_get', 'ansible/host_vars/dev.balsillie.house/ansible_connection').secret.ansible_become_password }}" # noqa yaml[line-length]
|
17
ansible/inventory/host_vars/dev.balsillie.house/certbot.yaml
Normal file
17
ansible/inventory/host_vars/dev.balsillie.house/certbot.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
# code: language=ansible
|
||||
|
||||
certbot_rfc2136_server: '10.208.240.1'
|
||||
certbot_rfc2136_key_name: 'rndc-house'
|
||||
certbot_rfc2136_key_algorithm: 'hmac-sha256'
|
||||
|
||||
certbot_cloudflare_api_token: "{{ lookup('community.hashi_vault.vault_kv1_get', 'cloudflare/balsillie.house/dns').secret.api_token }}" # noqa yaml[line-length]
|
||||
|
||||
certbot_dns_propagation_seconds: 15
|
||||
|
||||
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
|
||||
certbot_dns_plugin: 'cloudflare'
|
||||
certbot_email: "certbot.dev@balsillie.email"
|
||||
certbot_acme_server: "acme-v02.api.letsencrypt.org"
|
||||
|
||||
certbot_domains:
|
||||
- repo.balsillie.house
|
@ -0,0 +1,9 @@
|
||||
# code: language=ansible
|
||||
|
||||
nginx_sites:
|
||||
- name: repo.balsillie.house
|
||||
type: site
|
||||
autoindex: 'on'
|
||||
root: /var/www/aur
|
||||
|
||||
nginx_user: "http"
|
@ -4,6 +4,6 @@ ansible_fqdn: hv00.balsillie.house
|
||||
ansible_remote_addr: 10.192.110.100
|
||||
ansible_port: 22
|
||||
ansible_user: ladmin
|
||||
ansible_become_user: root
|
||||
ansible_become_method: sudo
|
||||
static_fqdn: hv00.balsillie.house
|
||||
# ansible_become_user: root
|
||||
ansible_become_method: ansible.builtin.sudo
|
||||
static_fqdn: hv00.balsillie.house
|
||||
|
@ -0,0 +1,8 @@
|
||||
certbot_rfc2136_server: '10.208.240.1'
|
||||
certbot_rfc2136_key_name: 'rndc-house'
|
||||
certbot_rfc2136_key_algorithm: 'hmac-sha256'
|
||||
|
||||
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
|
||||
certbot_dns_plugin: 'rfc2136'
|
||||
certbot_email: "certbot.hv00@balsillie.email"
|
||||
certbot_acme_server: "acme-v02.api.letsencrypt.org"
|
17
ansible/inventory/host_vars/hv00.balsillie.house/nginx.yaml
Normal file
17
ansible/inventory/host_vars/hv00.balsillie.house/nginx.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
nginx_sites:
|
||||
- name: repo.balsillie.house
|
||||
type: site
|
||||
autoindex: 'on'
|
||||
root: /var/www/aur
|
||||
- name: unifi.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 8989
|
||||
- name: hv00.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 9443
|
||||
|
||||
nginx_user: "http"
|
@ -1,2 +0,0 @@
|
||||
---
|
||||
ansible_host: hv00.balsillie.net
|
@ -0,0 +1,9 @@
|
||||
ansible_connection: ssh
|
||||
ansible_host: kodi00.balsillie.house
|
||||
ansible_fqdn: kodi00.balsillie.house
|
||||
ansible_remote_addr: 10.192.210.169
|
||||
ansible_port: 22
|
||||
ansible_user: ladmin
|
||||
ansible_become_user: root
|
||||
ansible_become_method: sudo
|
||||
static_fqdn: kodi00.balsillie.house
|
@ -0,0 +1,8 @@
|
||||
certbot_rfc2136_server: '10.208.240.1'
|
||||
certbot_rfc2136_key_name: 'rndc-house'
|
||||
certbot_rfc2136_key_algorithm: 'hmac-sha256'
|
||||
|
||||
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
|
||||
certbot_dns_plugin: 'rfc2136'
|
||||
certbot_email: "certbot.kodi00@balsillie.email"
|
||||
certbot_acme_server: "acme-v02.api.letsencrypt.org"
|
@ -0,0 +1,81 @@
|
||||
---
|
||||
|
||||
docker_users:
|
||||
- ladmin
|
||||
|
||||
docker_networks:
|
||||
- name: torrent
|
||||
driver: bridge
|
||||
driver_options:
|
||||
# com.docker.network.bridge.name: docker-torrent
|
||||
com.docker.network.bridge.enable_ip_masquerade: true
|
||||
com.docker.network.bridge.enable_icc: true
|
||||
# com.docker.network.container_iface_prefix: container-torrent
|
||||
attachable: true
|
||||
enable_ipv6: false
|
||||
internal: false
|
||||
ipam:
|
||||
- subnet: 192.168.99.0/24
|
||||
gateway: 192.168.99.254
|
||||
|
||||
docker_volumes:
|
||||
- name: torrent-data
|
||||
driver: local
|
||||
driver_options:
|
||||
type: none
|
||||
device: /downloads
|
||||
o: bind
|
||||
- name: torrent-config
|
||||
driver: local
|
||||
driver_options:
|
||||
type: none
|
||||
device: /etc/qbittorrent
|
||||
o: bind
|
||||
|
||||
docker_images:
|
||||
- name: hotio/qbittorrent
|
||||
tag: release
|
||||
|
||||
docker_containers:
|
||||
- name: qbittorrent
|
||||
image: hotio/qbittorrent:release
|
||||
auto_remove: false
|
||||
capabilities:
|
||||
- NET_ADMIN
|
||||
domainname: balsillie.house
|
||||
env:
|
||||
PUID: '968'
|
||||
PGID: '968'
|
||||
UMASK: '002'
|
||||
TZ: Pacific/Auckland
|
||||
WEBUI_PORTS: 8080/tcp
|
||||
VPN_ENABLED: 'true'
|
||||
VPN_CONF: 'wg0'
|
||||
VPN_PROVIDER: 'proton'
|
||||
VPN_LAN_NETWORK: ''
|
||||
VPN_LAN_LEAK_ENABLED: 'false'
|
||||
VPN_EXPOSE_PORTS_ON_LAN: ''
|
||||
VPN_AUTO_PORT_FORWARD: 'true'
|
||||
VPN_AUTO_PORT_FORWARD_TO_PORTS: ''
|
||||
VPN_KEEP_LOCAL_DNS: 'false'
|
||||
VPN_FIREWALL_TYPE: 'nftables'
|
||||
VPN_HEALTHCHECK_ENABLED: 'true'
|
||||
PRIVOXY_ENABLED: 'false'
|
||||
UNBOUND_ENABLED: 'false'
|
||||
etc_hosts:
|
||||
tv.balsillie.house: 192.168.99.254
|
||||
movies.balsillie.house: 192.168.99.254
|
||||
hostname: torrent
|
||||
networks:
|
||||
- name: torrent
|
||||
aliases:
|
||||
- torrent
|
||||
- qbittorrent
|
||||
ipv4_address: 192.168.99.1
|
||||
restart_policy: 'unless-stopped'
|
||||
sysctls:
|
||||
net.ipv4.conf.all.src_valid_mark: 1
|
||||
net.ipv6.conf.all.disable_ipv6: 1
|
||||
volumes:
|
||||
- torrent-config:/config:rw
|
||||
- torrent-data:/downloads:rw
|
@ -0,0 +1,43 @@
|
||||
nginx_sites:
|
||||
- name: tv.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 8989
|
||||
- name: movies.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 7878
|
||||
- name: music.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 8686
|
||||
- name: subs.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 6767
|
||||
- name: index.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 9696
|
||||
- name: torrent.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 192.168.99.1
|
||||
port: 8080
|
||||
- name: jellyfin.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 8096
|
||||
- name: kodi.balsillie.house
|
||||
type: proxy
|
||||
upstream:
|
||||
host: 127.0.0.1
|
||||
port: 8082
|
||||
|
||||
nginx_user: "http"
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
|
||||
sonarr_var: "sonarr_value"
|
@ -0,0 +1,4 @@
|
||||
sshd:
|
||||
auth:
|
||||
password: 'no'
|
||||
pubkey: 'yes'
|
@ -0,0 +1,7 @@
|
||||
torrent_user: kodi
|
||||
torrent_downloads_dir: /downloads
|
||||
|
||||
torrent_wireguard_address: 10.2.0.2
|
||||
torrent_wireguard_dns: 10.2.0.1
|
||||
torrent_wireguard_peer_endpoint: 103.75.11.18
|
||||
torrent_wireguard_peer_public_key: 8Rm0uoG0H9BcSuA67/5gBv8tJgFZXNLm4sqEtkB9Nmw=
|
21
ansible/inventory/host_vars/kodi00.balsillie.house/ufw.yaml
Normal file
21
ansible/inventory/host_vars/kodi00.balsillie.house/ufw.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
ufw_enabled: true
|
||||
|
||||
ufw_rules:
|
||||
- name: "SSH from Local Subnet"
|
||||
port: "22"
|
||||
protocol: "tcp"
|
||||
action: "allow"
|
||||
source: "10.192.210.0/24"
|
||||
destination: "10.192.210.169"
|
||||
- name: "HTTP from Local Subnet"
|
||||
port: "80"
|
||||
protocol: "tcp"
|
||||
action: "allow"
|
||||
source: "10.192.210.0/24"
|
||||
destination: "10.192.210.169"
|
||||
- name: "HTTPS from Local Subnet"
|
||||
port: "443"
|
||||
protocol: "tcp"
|
||||
action: "allow"
|
||||
source: "10.192.210.0/24"
|
||||
destination: "10.192.210.169"
|
1
ansible/inventory/host_vars/localhost/synapse.yaml
Normal file
1
ansible/inventory/host_vars/localhost/synapse.yaml
Normal file
@ -0,0 +1 @@
|
||||
synapse_host_address: matrix.balsillie.net
|
@ -0,0 +1,4 @@
|
||||
ansible_connection: local
|
||||
ansible_user: ladmin
|
||||
ansible_become_user: root
|
||||
ansible_become_method: sudo
|
11
ansible/inventory/host_vars/nuc.balsillie.house/certbot.yaml
Normal file
11
ansible/inventory/host_vars/nuc.balsillie.house/certbot.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
certbot_rfc2136_server: '10.208.240.1'
|
||||
certbot_rfc2136_key_name: 'rndc-house'
|
||||
certbot_rfc2136_key_algorithm: 'hmac-sha256'
|
||||
|
||||
certbot_webserver_type: 'nginx' # 'nginx' or 'apache'
|
||||
certbot_dns_plugin: 'rfc2136'
|
||||
certbot_email: "certbot.kodi00@balsillie.email"
|
||||
certbot_acme_server: "acme-v02.api.letsencrypt.org"
|
||||
|
||||
certbot_domains:
|
||||
- xmr.balsillie.house
|
@ -1,5 +1,8 @@
|
||||
all:
|
||||
children:
|
||||
aur_repo_hosts:
|
||||
hosts:
|
||||
dev.balsillie.house:
|
||||
firewalls:
|
||||
children:
|
||||
opnsense:
|
||||
@ -23,6 +26,7 @@ all:
|
||||
kube00.balsillie.house:
|
||||
lat5420.balsillie.house:
|
||||
lat7490.balsillie.house:
|
||||
nuc.balsillie.house:
|
||||
servers:
|
||||
children:
|
||||
hypervisors:
|
||||
@ -46,16 +50,21 @@ all:
|
||||
hosts:
|
||||
hv00.balsillie.house:
|
||||
kube00.balsillie.house:
|
||||
nas:
|
||||
hosts:
|
||||
nas.balsillie.house:
|
||||
workstations:
|
||||
children:
|
||||
arch:
|
||||
hosts:
|
||||
lat5420.balsillie.house:
|
||||
sff.balsillie.house:
|
||||
kodi00.balsillie.house:
|
||||
nuc.balsillie.house:
|
||||
windows:
|
||||
hosts:
|
||||
lat7490.balsillie.house:
|
||||
win11.balsillie.house:
|
||||
win11.balsillie.house:
|
||||
laptops:
|
||||
hosts:
|
||||
lat5420.balsillie.house:
|
||||
@ -64,3 +73,8 @@ all:
|
||||
hosts:
|
||||
sff.balsillie.house:
|
||||
mp00.balsillie.house:
|
||||
kodi00.balsillie.house:
|
||||
nuc.balsillie.house:
|
||||
kodi:
|
||||
hosts:
|
||||
kodi00.balsillie.house:
|
||||
|
10
ansible/playbooks/home.yml
Normal file
10
ansible/playbooks/home.yml
Normal file
@ -0,0 +1,10 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: AUR Repo
|
||||
hosts: aur_repo_hosts
|
||||
become: true
|
||||
gather_facts: true
|
||||
roles:
|
||||
# - certbot
|
||||
- nginx
|
||||
# - aur_repo_host
|
10
ansible/playbooks/infra/hv00.yaml
Normal file
10
ansible/playbooks/infra/hv00.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
|
||||
- name: Setup core home router
|
||||
hosts:
|
||||
- hv00.balsillie.house
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
# - role: aur_repo_host
|
||||
- role: nginx
|
15
ansible/playbooks/infra/kodi.yaml
Normal file
15
ansible/playbooks/infra/kodi.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
|
||||
- name: Setup Kodi boxes
|
||||
hosts:
|
||||
- kodi00.balsillie.house
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
# - role: sshd
|
||||
# - role: ufw
|
||||
# - role: nginx
|
||||
# - role: aur_repo_client
|
||||
# - role: arr
|
||||
- role: torrent
|
||||
# - role: sonarr
|
9
ansible/playbooks/infra/nuc.yaml
Normal file
9
ansible/playbooks/infra/nuc.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
|
||||
- name: Setup NUC
|
||||
hosts:
|
||||
- nuc.balsillie.house
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- role: certbot
|
@ -1,41 +0,0 @@
|
||||
---
|
||||
|
||||
# Arch install bare metal
|
||||
|
||||
# Systemd networking
|
||||
|
||||
# - name: Setup systemd-networkd
|
||||
# hosts: hv00.balsillie.house
|
||||
# become: true
|
||||
# roles:
|
||||
# - name: systemd_networkd
|
||||
# vars:
|
||||
# ansible_host: 192.168.1.106
|
||||
|
||||
# Serial console
|
||||
|
||||
# - name: Setup serial console
|
||||
# hosts: hv00.balsillie.house
|
||||
# become: true
|
||||
# roles:
|
||||
# - name: serial_console
|
||||
|
||||
# Hypervisor setup
|
||||
|
||||
# - name: Configure hypervisor
|
||||
# hosts: hv00.balsillie.house
|
||||
# gather_facts: true
|
||||
# become: true
|
||||
# roles:
|
||||
# - name: hypervisor
|
||||
|
||||
# SSHd setup
|
||||
|
||||
- name: Configure sshd
|
||||
hosts: hv00.balsillie.house
|
||||
gather_facts: true
|
||||
become: true
|
||||
roles:
|
||||
- name: sshd_setup
|
||||
|
||||
# VM setup
|
1
ansible/playbooks/roles
Symbolic link
1
ansible/playbooks/roles
Symbolic link
@ -0,0 +1 @@
|
||||
../roles
|
44
ansible/playbooks/synapse_delete_empty_rooms.yaml
Normal file
44
ansible/playbooks/synapse_delete_empty_rooms.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
tasks:
|
||||
|
||||
- name: Get room list
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
register: room_list
|
||||
|
||||
- name: Set empty_rooms fact
|
||||
ansible.builtin.set_fact:
|
||||
empty_rooms: "{{ room_list.json.rooms | selectattr('joined_local_members', '==', 0) | list }}"
|
||||
|
||||
- name: Debug empty room count
|
||||
ansible.builtin.debug:
|
||||
msg: "Total empty rooms to delete: {{ empty_rooms | length }}"
|
||||
|
||||
- name: Delete empty rooms
|
||||
when: empty_rooms | length > 0
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room.room_id }}"
|
||||
method: DELETE
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
body_format: json
|
||||
body: {}
|
||||
loop: "{{ empty_rooms }}"
|
||||
loop_control:
|
||||
loop_var: room
|
||||
label: "{{ room.room_id }}"
|
||||
register: purge_ids
|
||||
|
||||
- name: Write purge_ids to file
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ playbook_dir }}/purge_ids_{{ now(utc=false, fmt='%Y-%m-%d_%H-%M-%S') }}.json"
|
||||
content: "{{ purge_ids.results | map(attribute='json.delete_id') | list | to_nice_json }}"
|
||||
mode: "0664"
|
28
ansible/playbooks/synapse_delete_room.yaml
Normal file
28
ansible/playbooks/synapse_delete_room.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
vars_prompt:
|
||||
- name: room_id
|
||||
prompt: "Enter the room ID to delete"
|
||||
private: false
|
||||
tasks:
|
||||
|
||||
- name: Delete room
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/{{ room_id }}"
|
||||
method: DELETE
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
body_format: json
|
||||
body: {}
|
||||
register: purge_id
|
||||
|
||||
- name: Wait for purge to complete
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
19
ansible/playbooks/synapse_get_all_rooms.yaml
Normal file
19
ansible/playbooks/synapse_get_all_rooms.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
tasks:
|
||||
|
||||
- name: Get room details
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms?limit=1000"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
register: result
|
||||
|
||||
- name: Print result
|
||||
ansible.builtin.debug:
|
||||
var: result.json.rooms | map(attribute='room_id') | list
|
19
ansible/playbooks/synapse_get_large_rooms.yaml
Normal file
19
ansible/playbooks/synapse_get_large_rooms.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
tasks:
|
||||
|
||||
- name: Get large rooms
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/statistics/database/rooms"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
register: result
|
||||
|
||||
- name: Print result
|
||||
ansible.builtin.debug:
|
||||
var: result.json
|
44
ansible/playbooks/synapse_get_purge_status.yaml
Normal file
44
ansible/playbooks/synapse_get_purge_status.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
vars_prompt:
|
||||
- name: "purge_ids_file"
|
||||
prompt: "Enter the file name containing the purge ids"
|
||||
private: false
|
||||
tasks:
|
||||
|
||||
- name: Load purge ids
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ playbook_dir }}/{{ purge_ids_file }}"
|
||||
register: purge_ids
|
||||
|
||||
- name: Set purge_ids_list fact
|
||||
ansible.builtin.set_fact:
|
||||
purge_ids_list: "{{ purge_ids.content | b64decode | from_json }}"
|
||||
|
||||
- name: Get purge status
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v2/rooms/delete_status/{{ item }}"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
loop: "{{ purge_ids_list }}"
|
||||
register: purge_status
|
||||
|
||||
- name: Set purge_satus_totals
|
||||
ansible.builtin.set_fact:
|
||||
purge_status_shutting_down: "{{ purge_status.results | selectattr('json.status', '==', 'shutting_down') | list | length }}"
|
||||
purge_status_purging: "{{ purge_status.results | selectattr('json.status', '==', 'purging') | list | length }}"
|
||||
purge_status_complete: "{{ purge_status.results | selectattr('json.status', '==', 'complete') | list | length }}"
|
||||
purge_status_failed: "{{ purge_status.results | selectattr('json.status', '==', 'failed') | list | length }}"
|
||||
|
||||
- name: Print status
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Shutting down: {{ purge_status_shutting_down }}
|
||||
Purging: {{ purge_status_purging }}
|
||||
Complete: {{ purge_status_complete }}
|
||||
Failed: {{ purge_status_failed }}"
|
23
ansible/playbooks/synapse_get_room_details.yaml
Normal file
23
ansible/playbooks/synapse_get_room_details.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Clean Synapse
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
vars_prompt:
|
||||
- name: room_id
|
||||
prompt: "Enter the room ID to fetch"
|
||||
private: false
|
||||
tasks:
|
||||
|
||||
- name: Get room details
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
register: result
|
||||
|
||||
- name: Print result
|
||||
ansible.builtin.debug:
|
||||
var: result.json
|
23
ansible/playbooks/synapse_get_room_members.yaml
Normal file
23
ansible/playbooks/synapse_get_room_members.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
# code: language=ansible
|
||||
|
||||
- name: Room members
|
||||
hosts: localhost
|
||||
connection: local
|
||||
become: false
|
||||
gather_facts: false
|
||||
vars_prompt:
|
||||
- name: room_id
|
||||
prompt: "Enter the room ID to fetch"
|
||||
private: false
|
||||
tasks:
|
||||
|
||||
- name: Get room details
|
||||
ansible.builtin.uri:
|
||||
url: "https://{{ synapse_host_address }}/_synapse/admin/v1/rooms/{{ room_id }}/members"
|
||||
headers:
|
||||
Authorization: "Bearer {{ synapse_admin_token }}"
|
||||
register: result
|
||||
|
||||
- name: Print result
|
||||
ansible.builtin.debug:
|
||||
var: result.json
|
17
ansible/playbooks/truenas.yml
Normal file
17
ansible/playbooks/truenas.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- name: Configure Truenas
|
||||
hosts: truenas
|
||||
become: false
|
||||
tasks:
|
||||
- name: Install required packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- py37-ansible
|
||||
- py37-pip
|
||||
- py37-netifaces
|
||||
- py37-netaddr
|
||||
- py37-requests
|
||||
- py37-yaml
|
24
ansible/roles/arr/tasks/main.yaml
Normal file
24
ansible/roles/arr/tasks/main.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
|
||||
- name: Install arr packages
|
||||
when: ansible_facts['os_family'] == "Archlinux"
|
||||
community.general.pacman:
|
||||
name: "{{ arr_packages }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Start arr services
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
enabled: true
|
||||
loop:
|
||||
- sonarr.service
|
||||
- radarr.service
|
||||
- lidarr.service
|
||||
- prowlarr.service
|
||||
- bazarr.service
|
6
ansible/roles/arr/vars/main.yaml
Normal file
6
ansible/roles/arr/vars/main.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
arr_packages:
|
||||
- sonarr
|
||||
- radarr
|
||||
- lidarr
|
||||
- bazarr
|
||||
- prowlarr
|
50
ansible/roles/aur_repo_client/tasks/main.yaml
Normal file
50
ansible/roles/aur_repo_client/tasks/main.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
|
||||
- name: Check if repo public key is in pacman keyring
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- --list-keys
|
||||
- "{{ aur_repo_client_public_key_fingerprint }}"
|
||||
register: repo_key_check
|
||||
failed_when: repo_key_check.rc not in [0, 1]
|
||||
changed_when: false
|
||||
|
||||
- name: Add repo public key to pacman keyring
|
||||
when: repo_key_check.rc == 1
|
||||
block:
|
||||
|
||||
- name: Import the repo public key
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- --recv-keys
|
||||
- "{{ aur_repo_client_public_key_fingerprint }}"
|
||||
- --keyserver
|
||||
- "{{ aur_repo_client_keyserver }}"
|
||||
changed_when: true
|
||||
|
||||
- name: Trust the repo public key
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- --lsign-key
|
||||
- "{{ aur_repo_client_public_key_fingerprint }}"
|
||||
changed_when: true
|
||||
|
||||
- name: Add home repo block to pacman.conf
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/pacman.conf
|
||||
block: |
|
||||
[{{ aur_repo_client_repo_name }}]
|
||||
SigLevel = Required TrustedOnly
|
||||
Server = {{ aur_repo_client_repo_address }}
|
||||
create: false
|
||||
state: present
|
||||
insertafter: EOF
|
||||
register: add_pacman_repo
|
||||
|
||||
- name: Update pacman database # noqa: no-handler
|
||||
when: add_pacman_repo.changed
|
||||
community.general.pacman:
|
||||
update_cache: true
|
6
ansible/roles/aur_repo_client/vars/main.yaml
Normal file
6
ansible/roles/aur_repo_client/vars/main.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
|
||||
aur_repo_client_repo_name: "home"
|
||||
aur_repo_client_repo_address: "https://repo.balsillie.house"
|
||||
aur_repo_client_public_key_fingerprint: DB529158B99DD8311D78CA2FBE6003C744F56EE2
|
||||
aur_repo_client_keyserver: hkps://keyserver.ubuntu.com
|
12
ansible/roles/aur_repo_host/files/aur-sync.service
Normal file
12
ansible/roles/aur_repo_host/files/aur-sync.service
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Sync AUR packages
|
||||
Wants=aur-sync.timer
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/aur sync --no-view --upgrades --no-confirm --clean --rm-deps --sign --database home
|
||||
User=aur-builder
|
||||
Group=aur-builder
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
12
ansible/roles/aur_repo_host/files/aur-sync.timer
Normal file
12
ansible/roles/aur_repo_host/files/aur-sync.timer
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Timer that runs aur sync service
|
||||
Requires=aur-sync.service
|
||||
|
||||
[Timer]
|
||||
Unit=aur-sync.service
|
||||
OnCalendar=*-*-* 16:00:00
|
||||
RandomizedDelaySec=120
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
270
ansible/roles/aur_repo_host/tasks/main.yaml
Normal file
270
ansible/roles/aur_repo_host/tasks/main.yaml
Normal file
@ -0,0 +1,270 @@
|
||||
---
|
||||
|
||||
- name: Lookup aur_repo_host secret
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
community.hashi_vault.vault_kv1_get:
|
||||
path: ansible/group_vars/aur_repo_hosts
|
||||
register: aur_repo_host_secret
|
||||
|
||||
- name: Set aur_repo facts
|
||||
ansible.builtin.set_fact:
|
||||
aur_repo_private_key: "{{ aur_repo_host_secret.secret.aur_repo_private_key }}"
|
||||
aur_repo_key_thumbprint: "{{ aur_repo_host_secret.secret.aur_repo_key_thumbprint }}"
|
||||
|
||||
- name: Create the makepkg drop-in config file
|
||||
ansible.builtin.template:
|
||||
dest: /etc/makepkg.conf.d/makepkg.conf
|
||||
src: makepkg.conf.j2
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Create the build user group
|
||||
ansible.builtin.group:
|
||||
name: "{{ aur_repo_build_account }}"
|
||||
system: true
|
||||
state: present
|
||||
|
||||
- name: Create the build user
|
||||
ansible.builtin.user:
|
||||
name: "{{ aur_repo_build_account }}"
|
||||
password: '!'
|
||||
group: "{{ aur_repo_build_account }}"
|
||||
comment: "AUR Package Builder"
|
||||
shell: /sbin/nologin
|
||||
home: "{{ aur_repo_dir }}"
|
||||
createhome: true
|
||||
system: true
|
||||
state: present
|
||||
|
||||
- name: Create the build user sudoer file
|
||||
ansible.builtin.template:
|
||||
dest: /etc/sudoers.d/{{ aur_repo_build_account }}
|
||||
src: aur-sudoer.j2
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0640"
|
||||
|
||||
- name: Create the build dirs
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ aur_repo_build_account }}"
|
||||
group: "{{ aur_repo_build_account }}"
|
||||
mode: "0775"
|
||||
loop:
|
||||
- "{{ aur_repo_dir }}"
|
||||
- "{{ aur_repo_dir }}/packages"
|
||||
- "{{ aur_repo_dir }}/sources"
|
||||
- "{{ aur_repo_dir }}/srcpackages"
|
||||
- /var/log/makepkg
|
||||
- /tmp/build
|
||||
|
||||
- name: Check if the signing key is in build user's keyring
|
||||
ansible.builtin.command:
|
||||
cmd: gpg2 --list-secret-key --with-colons {{ aur_repo_key_thumbprint }}
|
||||
failed_when: key_result.rc not in [0, 2]
|
||||
changed_when: false
|
||||
register: key_result
|
||||
vars:
|
||||
ansible_become_user: "{{ aur_repo_build_account }}"
|
||||
|
||||
- name: GPG key import block
|
||||
when: key_result.rc == 2
|
||||
block:
|
||||
|
||||
- name: Template out the signing private key
|
||||
ansible.builtin.template:
|
||||
dest: "/tmp/build/signing_key.asc"
|
||||
src: signing_key.asc.j2
|
||||
owner: "{{ aur_repo_build_account }}"
|
||||
group: "{{ aur_repo_build_account }}"
|
||||
mode: "0600"
|
||||
|
||||
- name: Import the signing key
|
||||
ansible.builtin.command:
|
||||
cmd: gpg2 --import /tmp/build/signing_key.asc
|
||||
changed_when: true
|
||||
vars:
|
||||
ansible_become_user: "{{ aur_repo_build_account }}"
|
||||
|
||||
- name: Delete the signing key
|
||||
ansible.builtin.file:
|
||||
path: "/tmp/build/signing_key.asc"
|
||||
state: absent
|
||||
|
||||
- name: Check if aurutils is already installed
|
||||
ansible.builtin.stat:
|
||||
follow: true
|
||||
path: /usr/bin/aur
|
||||
register: aurutils_stat
|
||||
|
||||
- name: Aurutils install block
|
||||
when: not aurutils_stat.stat.exists
|
||||
block:
|
||||
|
||||
- name: Install makepkg dependencies
|
||||
community.general.pacman:
|
||||
name:
|
||||
- git
|
||||
- base-devel
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Clone aurutils
|
||||
ansible.builtin.git:
|
||||
depth: 1
|
||||
dest: /tmp/aurutils
|
||||
repo: https://aur.archlinux.org/aurutils.git
|
||||
single_branch: true
|
||||
version: master
|
||||
|
||||
- name: Slurp PKGBUILD contents
|
||||
ansible.builtin.slurp:
|
||||
path: /tmp/aurutils/PKGBUILD
|
||||
register: aurutils_pkgbuild
|
||||
|
||||
- name: Parse PKGBUILD into facts
|
||||
ansible.builtin.set_fact:
|
||||
aurutils_dependencies: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^depends=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') | split(' ') }}" # noqa: yaml[line-length]
|
||||
aurutils_pkgver: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgver=).*(?=$)', multiline=True) }}"
|
||||
aurutils_pkgrel: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^pkgrel=).*(?=$)', multiline=True) }}"
|
||||
aurutils_arch: "{{ aurutils_pkgbuild['content'] | b64decode | regex_search('(?<=^arch=\\().*(?=\\)$)', multiline=True) | replace(\"'\", '') }}"
|
||||
|
||||
- name: Install aurutils dependencies
|
||||
community.general.pacman:
|
||||
name: "{{ aurutils_dependencies }}"
|
||||
state: present
|
||||
reason: dependency
|
||||
update_cache: false
|
||||
|
||||
- name: Build aurutils
|
||||
ansible.builtin.command:
|
||||
cmd: makepkg
|
||||
chdir: /tmp/aurutils
|
||||
creates: "{{ aur_repo_dir }}/packages/aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar"
|
||||
vars:
|
||||
ansible_become_user: "{{ aur_repo_build_account }}"
|
||||
|
||||
- name: Update repo database
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- repo-add
|
||||
- --prevent-downgrade
|
||||
- --remove
|
||||
- --sign
|
||||
- --key
|
||||
- "{{ aur_repo_key_thumbprint }}"
|
||||
- home.db.tar
|
||||
- aurutils-{{ aurutils_pkgver }}-{{ aurutils_pkgrel }}-{{ aurutils_arch }}.pkg.tar
|
||||
chdir: "{{ aur_repo_dir }}/packages"
|
||||
changed_when: true
|
||||
vars:
|
||||
ansible_become_user: "{{ aur_repo_build_account }}"
|
||||
|
||||
- name: Check if the signing key is in pacman keyring
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- -l
|
||||
- "{{ aur_repo_key_thumbprint }}"
|
||||
failed_when: pacman_key_result.rc not in [0, 1]
|
||||
changed_when: false
|
||||
register: pacman_key_result
|
||||
|
||||
- name: Pacman key import block
|
||||
when: pacman_key_result.rc == 1
|
||||
block:
|
||||
|
||||
- name: Import the signing public key to arch keyring
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- -r
|
||||
- "{{ aur_repo_key_thumbprint }}"
|
||||
- --keyserver
|
||||
- hkps://keyserver.ubuntu.com
|
||||
changed_when: true
|
||||
|
||||
- name: Locally sign the imported pacman key
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- pacman-key
|
||||
- --lsign-key
|
||||
- "{{ aur_repo_key_thumbprint }}"
|
||||
changed_when: true
|
||||
|
||||
- name: Add custom repo block to pacman.conf
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/pacman.conf
|
||||
block: |
|
||||
[home]
|
||||
SigLevel = Required TrustedOnly
|
||||
Server = file://{{ aur_repo_dir }}/packages
|
||||
create: false
|
||||
state: present
|
||||
insertafter: EOF
|
||||
|
||||
- name: Install aurutils
|
||||
community.general.pacman:
|
||||
name: aurutils
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
# - name: Enable the multilib repository
|
||||
# ansible.builtin.replace:
|
||||
# path: /etc/pacman.conf
|
||||
# backup: true
|
||||
# regexp: '^[#]?\[multilib\]\n[#]?Include = \/etc\/pacman.d\/mirrorlist$'
|
||||
# replace: '[multilib]\nInclude = /etc/pacman.d/mirrorlist'
|
||||
# register: multilib_enable
|
||||
|
||||
# - name: Update the package database if multilib was enabled # noqa: no-handler
|
||||
# when: multilib_enable.changed | default(false)
|
||||
# community.general.pacman:
|
||||
# update_cache: true
|
||||
|
||||
- name: Sync AUR packages
|
||||
ansible.builtin.command:
|
||||
cmd: aur sync --no-view -CnrS {{ item }}
|
||||
loop: "{{ aur_repo_host_packages }}"
|
||||
register: aur_sync_result
|
||||
changed_when: (aur_sync_result.stderr_lines | last | replace(':','')) != "sync there is nothing to do"
|
||||
failed_when: aur_sync_result.rc != 0
|
||||
vars:
|
||||
ansible_become_user: "{{ aur_repo_build_account }}"
|
||||
|
||||
- name: Add the root www folder if it doesn't exist
|
||||
ansible.builtin.file:
|
||||
path: /var/www
|
||||
state: directory
|
||||
owner: http
|
||||
group: http
|
||||
mode: "0775"
|
||||
|
||||
- name: Link the aur repo to the web root
|
||||
ansible.builtin.file:
|
||||
src: "{{ aur_repo_dir }}/packages"
|
||||
path: /var/www{{ aur_repo_dir }}
|
||||
state: link
|
||||
|
||||
- name: Add the aur-sync systemd unit files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: /usr/lib/systemd/system/
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
loop:
|
||||
- aur-sync.service
|
||||
- aur-sync.timer
|
||||
register: aur_sync_unit_files
|
||||
|
||||
- name: Enable and start the aur-sync systemd timer # noqa: no-handler
|
||||
when: aur_sync_unit_files.changed
|
||||
ansible.builtin.systemd:
|
||||
name: aur-sync.timer
|
||||
enabled: true
|
||||
state: started
|
||||
daemon_reload: true
|
1
ansible/roles/aur_repo_host/templates/aur-sudoer.j2
Normal file
1
ansible/roles/aur_repo_host/templates/aur-sudoer.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ aur_repo_build_account }} ALL = (root) NOPASSWD: /usr/bin/pacman, /usr/bin/pacsync
|
21
ansible/roles/aur_repo_host/templates/makepkg.conf.j2
Normal file
21
ansible/roles/aur_repo_host/templates/makepkg.conf.j2
Normal file
@ -0,0 +1,21 @@
|
||||
|
||||
# Global Options
|
||||
|
||||
OPTIONS=(strip docs !libtool !staticlibs emptydirs zipman purge debug lto autodeps)
|
||||
MAKEFLAGS="-j{{ (ansible_processor_nproc - 1) }}"
|
||||
PACKAGER="{{ aur_repo_packager_name }} <{{ aur_repo_packager_email }}>"
|
||||
|
||||
# Build Environment
|
||||
|
||||
BUILDDIR=/tmp/build
|
||||
BUILDENV=(!distcc color !ccache check sign)
|
||||
GPGKEY={{ aur_repo_key_thumbprint }}
|
||||
|
||||
# Outputs
|
||||
|
||||
PKGDEST={{ aur_repo_dir }}/packages
|
||||
SRCDEST={{ aur_repo_dir }}/sources
|
||||
SRCPKGDEST={{ aur_repo_dir }}/srcpackages
|
||||
LOGDEST=/var/log/makepkg
|
||||
PKGEXT=".pkg.tar"
|
||||
SRCEXT=".src.tar"
|
1
ansible/roles/aur_repo_host/templates/signing_key.asc.j2
Normal file
1
ansible/roles/aur_repo_host/templates/signing_key.asc.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ aur_repo_private_key }}
|
0
ansible/roles/aur_repo_host/vars/main.yml
Normal file
0
ansible/roles/aur_repo_host/vars/main.yml
Normal file
6
ansible/roles/certbot/handlers/main.yaml
Normal file
6
ansible/roles/certbot/handlers/main.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Restart nginx
|
||||
ansible.builtin.service:
|
||||
name: nginx.service
|
||||
state: restarted
|
67
ansible/roles/certbot/tasks/main.yaml
Normal file
67
ansible/roles/certbot/tasks/main.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
- name: Install certbot package (Archlinux)
|
||||
when: ansible_facts['os_family'] == "Archlinux"
|
||||
community.general.pacman:
|
||||
name:
|
||||
- certbot
|
||||
- certbot-dns-{{ certbot_dns_plugin }}
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Install certbot webserver plugin (Archlinux)
|
||||
when:
|
||||
- ansible_facts['os_family'] == "Archlinux"
|
||||
- certbot_webserver_type == 'nginx'
|
||||
community.general.pacman:
|
||||
name:
|
||||
- certbot-nginx
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Template out the rfc2136 credentials file
|
||||
when: certbot_dns_plugin == 'rfc2136'
|
||||
ansible.builtin.template:
|
||||
src: "{{ certbot_dns_plugin }}.conf.j2"
|
||||
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0600'
|
||||
|
||||
- name: Template out cloudflare credentials file
|
||||
when: certbot_dns_plugin == 'cloudflare'
|
||||
ansible.builtin.template:
|
||||
src: "{{ certbot_dns_plugin }}.conf.j2"
|
||||
dest: "/etc/letsencrypt/{{ certbot_dns_plugin }}.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0600'
|
||||
|
||||
- name: Template out the certbot default config
|
||||
ansible.builtin.template:
|
||||
src: cli.ini.j2
|
||||
dest: /etc/letsencrypt/cli.ini
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Request and install certificates
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- certbot
|
||||
- certonly
|
||||
- -n
|
||||
- --dns-{{ certbot_dns_plugin }}
|
||||
- --dns-{{ certbot_dns_plugin }}-credentials
|
||||
- /etc/letsencrypt/{{ certbot_dns_plugin }}.conf
|
||||
- --dns-{{ certbot_dns_plugin }}-propagation-seconds
|
||||
- "{{ certbot_dns_propagation_seconds | default(10) }}"
|
||||
- -d
|
||||
- "{{ item }}"
|
||||
creates: /etc/letsencrypt/live/{{ item }}/fullchain.pem
|
||||
loop: "{{ certbot_domains }}"
|
||||
notify: "{{ certbot_notify | default(omit) }}"
|
||||
|
||||
- name: Enable certbot renewal
|
||||
ansible.builtin.service:
|
||||
name: certbot-renew.timer
|
||||
state: started
|
||||
enabled: true
|
3
ansible/roles/certbot/templates/cli.ini.j2
Normal file
3
ansible/roles/certbot/templates/cli.ini.j2
Normal file
@ -0,0 +1,3 @@
|
||||
rsa-key-size = 4096
|
||||
email = {{ certbot_email }}
|
||||
agree-tos = true
|
1
ansible/roles/certbot/templates/cloudflare.conf.j2
Normal file
1
ansible/roles/certbot/templates/cloudflare.conf.j2
Normal file
@ -0,0 +1 @@
|
||||
dns_cloudflare_api_token = {{ certbot_cloudflare_api_token }}
|
6
ansible/roles/certbot/templates/rfc2136.conf.j2
Normal file
6
ansible/roles/certbot/templates/rfc2136.conf.j2
Normal file
@ -0,0 +1,6 @@
|
||||
dns_rfc2136_server = {{ certbot_rfc2136_server }}
|
||||
dns_rfc2136_port = {{ certbot_rfc2136_port | default(53) }}
|
||||
dns_rfc2136_name = {{ certbot_rfc2136_key_name }}
|
||||
dns_rfc2136_secret = {{ certbot_rfc2136_key_secret }}
|
||||
dns_rfc2136_algorithm = {{ certbot_rfc2136_key_algorithm | upper }}
|
||||
dns_rfc2136_sign_query = true
|
82
ansible/roles/docker/tasks/main.yaml
Normal file
82
ansible/roles/docker/tasks/main.yaml
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
|
||||
- name: Install Docker on Archlinux
|
||||
when: ansible_facts['os_family'] == "Archlinux"
|
||||
community.general.pacman:
|
||||
name: docker
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Add users to docker group
|
||||
ansible.builtin.user:
|
||||
name: "{{ item }}"
|
||||
groups: docker
|
||||
append: true
|
||||
loop: "{{ docker_users }}"
|
||||
|
||||
- name: Start and enable Docker
|
||||
ansible.builtin.systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Create Docker networks
|
||||
when:
|
||||
- docker_networks is defined
|
||||
- docker_networks | length > 0
|
||||
community.docker.docker_network:
|
||||
attachable: "{{ item.attachable | default(true) }}"
|
||||
driver: "{{ item.driver | default('bridge') }}"
|
||||
driver_options: "{{ item.driver_options | default(omit) }}"
|
||||
enable_ipv6: "{{ item.enable_ipv6 | default(false) }}"
|
||||
internal: "{{ item.internal | default(false) }}"
|
||||
ipam_config: "{{ item.ipam | default(omit) }}"
|
||||
name: "{{ item.name }}"
|
||||
state: "present"
|
||||
loop: "{{ docker_networks }}"
|
||||
|
||||
- name: Create Docker volumes
|
||||
when:
|
||||
- docker_volumes is defined
|
||||
- docker_volumes | length > 0
|
||||
community.general.docker_volume:
|
||||
driver: "{{ item.driver | default('local') }}"
|
||||
driver_options: "{{ item.driver_options | default({}) }}"
|
||||
recreate: "never"
|
||||
state: "present"
|
||||
volume_name: "{{ item.name }}"
|
||||
loop: "{{ docker_volumes }}"
|
||||
|
||||
- name: Pull Docker images
|
||||
when:
|
||||
- docker_images is defined
|
||||
- docker_images | length > 0
|
||||
community.docker.docker_image_pull:
|
||||
name: "{{ item.name }}"
|
||||
pull: "always"
|
||||
tag: "{{ item.tag | default('latest') }}"
|
||||
loop: "{{ docker_images }}"
|
||||
|
||||
- name: Create Docker containers
|
||||
when:
|
||||
- docker_containers is defined
|
||||
- docker_containers | length > 0
|
||||
community.general.docker_container:
|
||||
auto_remove: "{{ item.auto_remove | default(false) }}"
|
||||
capabilities: "{{ item.capabilities | default(omit) }}"
|
||||
command: "{{ item.command | default(omit) }}"
|
||||
detach: true
|
||||
domainname: "{{ item.domainname | default(omit) }}"
|
||||
entrypoint: "{{ item.entrypoint | default(omit) }}"
|
||||
env: "{{ item.env | default({}) }}"
|
||||
etc_hosts: "{{ item.etc_hosts | default({}) }}"
|
||||
hostname: "{{ item.hostname | default(item.name) }}"
|
||||
image: "{{ item.image }}"
|
||||
name: "{{ item.name }}"
|
||||
networks: "{{ item.networks | default(omit) }}"
|
||||
published_ports: "{{ item.ports | default([]) }}"
|
||||
restart_policy: "{{ item.restart_policy | default('unless_stopped') }}"
|
||||
state: 'started'
|
||||
sysctls: "{{ item.sysctls | default({}) }}"
|
||||
volumes: "{{ item.volumes | default([]) }}"
|
||||
loop: "{{ docker_containers }}"
|
7
ansible/roles/nginx/files/ssl.conf
Normal file
7
ansible/roles/nginx/files/ssl.conf
Normal file
@ -0,0 +1,7 @@
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_cache shared:MozSSL:1m;
|
||||
ssl_session_tickets off;
|
||||
ssl_dhparam ssl/dhparams.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
ssl_prefer_server_ciphers off;
|
6
ansible/roles/nginx/handlers/main.yaml
Normal file
6
ansible/roles/nginx/handlers/main.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Restart nginx
|
||||
ansible.builtin.service:
|
||||
name: nginx.service
|
||||
state: restarted
|
112
ansible/roles/nginx/tasks/main.yaml
Normal file
112
ansible/roles/nginx/tasks/main.yaml
Normal file
@ -0,0 +1,112 @@
|
||||
- name: Install nginx package (Archlinux)
|
||||
when: ansible_facts['os_family'] == "Archlinux"
|
||||
community.general.pacman:
|
||||
name:
|
||||
- nginx
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Create config dirs
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0775"
|
||||
loop:
|
||||
- /etc/nginx/sites-available
|
||||
- /etc/nginx/sites-enabled
|
||||
- /etc/nginx/conf.d
|
||||
- /etc/nginx/ssl
|
||||
|
||||
- name: Copy the ssl configuration
|
||||
ansible.builtin.copy:
|
||||
src: ssl.conf
|
||||
dest: /etc/nginx/ssl/ssl.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Generate dhparams
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- openssl
|
||||
- dhparam
|
||||
- -dsaparam
|
||||
- -outform
|
||||
- PEM
|
||||
- -out
|
||||
- /etc/nginx/ssl/dhparams.pem
|
||||
- 4096
|
||||
creates: /etc/nginx/ssl/dhparams.pem
|
||||
notify: Restart nginx
|
||||
|
||||
# - name: Generate dhparams (alternative)
|
||||
# community.crypto.openssl_dhparam:
|
||||
# group: root
|
||||
# mode: "0644"
|
||||
# owner: root
|
||||
# path: /etc/nginx/ssl/dhparams.pem
|
||||
# size: 4096
|
||||
# state: present
|
||||
|
||||
- name: Set permissions on dhparams
|
||||
ansible.builtin.file:
|
||||
path: /etc/nginx/ssl/dhparams.pem
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Template out nginx base config
|
||||
ansible.builtin.template:
|
||||
src: nginx.conf.j2
|
||||
dest: /etc/nginx/nginx.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Template out nginx reverse proxy configs
|
||||
when: item.type == "proxy"
|
||||
ansible.builtin.template:
|
||||
src: nginx-proxy.conf.j2
|
||||
dest: /etc/nginx/sites-available/{{ item.name }}.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
loop: "{{ nginx_sites }}"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Template out nginx site configs
|
||||
when: item.type == "site"
|
||||
ansible.builtin.template:
|
||||
src: nginx-site.conf.j2
|
||||
dest: /etc/nginx/sites-available/{{ item.name }}.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
loop: "{{ nginx_sites }}"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Enable site configs
|
||||
ansible.builtin.file:
|
||||
path: /etc/nginx/sites-enabled/{{ item.name }}.conf
|
||||
src: /etc/nginx/sites-available/{{ item.name }}.conf
|
||||
state: link
|
||||
loop: "{{ nginx_sites }}"
|
||||
notify: Restart nginx
|
||||
|
||||
# - name: Run certbot role to install certificates
|
||||
# ansible.builtin.include_role:
|
||||
# name: certbot
|
||||
# vars:
|
||||
# certbot_domains: "{{ nginx_sites | map(attribute='name') }}"
|
||||
# certbot_notify: "Restart nginx"
|
||||
|
||||
- name: Start and enable nginx
|
||||
ansible.builtin.service:
|
||||
name: nginx
|
||||
state: started
|
||||
enabled: true
|
17
ansible/roles/nginx/templates/nginx-proxy.conf.j2
Normal file
17
ansible/roles/nginx/templates/nginx-proxy.conf.j2
Normal file
@ -0,0 +1,17 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ item.name }};
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name {{ item.name }};
|
||||
ssl_certificate /etc/letsencrypt/live/{{ item.name }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ item.name }}/privkey.pem;
|
||||
location / {
|
||||
proxy_pass http://{{ item.upstream.host }}:{{ item.upstream.port }};
|
||||
}
|
||||
}
|
19
ansible/roles/nginx/templates/nginx-site.conf.j2
Normal file
19
ansible/roles/nginx/templates/nginx-site.conf.j2
Normal file
@ -0,0 +1,19 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ item.name }};
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name {{ item.name }};
|
||||
ssl_certificate /etc/letsencrypt/live/{{ item.name }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ item.name }}/privkey.pem;
|
||||
index index.html;
|
||||
autoindex {{ item.autoindex | default('off') }};
|
||||
root {{ item.root }};
|
||||
location / {
|
||||
}
|
||||
}
|
48
ansible/roles/nginx/templates/nginx.conf.j2
Normal file
48
ansible/roles/nginx/templates/nginx.conf.j2
Normal file
@ -0,0 +1,48 @@
|
||||
user {{ nginx_user }};
|
||||
worker_processes auto;
|
||||
worker_cpu_affinity auto;
|
||||
|
||||
# include extra config
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
events {
|
||||
multi_accept on;
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
charset utf-8;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
server_tokens off;
|
||||
log_not_found off;
|
||||
types_hash_max_size 4096;
|
||||
client_max_body_size 16M;
|
||||
|
||||
# MIME
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
# Include SSL config
|
||||
include ssl/ssl.conf;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name "_";
|
||||
return 444;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2 default_server;
|
||||
server_name "_";
|
||||
ssl_reject_handshake on;
|
||||
}
|
||||
|
||||
# logging
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
|
||||
# include sites
|
||||
include /etc/nginx/sites-enabled/*.conf;
|
||||
}
|
14
ansible/roles/sshd/tasks/main.yaml
Normal file
14
ansible/roles/sshd/tasks/main.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Template out sshd_config
|
||||
ansible.builtin.template:
|
||||
src: sshd_config.j2
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify:
|
||||
- Restart sshd
|
||||
|
||||
- name: Flush handlers for immediate shhd restart
|
||||
ansible.builtin.meta: flush_handlers
|
@ -1,13 +1,5 @@
|
||||
---
|
||||
|
||||
# - name: Debug ansible facts
|
||||
# ansible.builtin.debug:
|
||||
# msg: "{{ ansible_facts }}"
|
||||
|
||||
# - name: Debug host vars
|
||||
# ansible.builtin.debug:
|
||||
# msg: "{{ hostvars[inventory_hostname]['ansible_fqdn'] }}"
|
||||
|
||||
- name: Ensure ssh config dir exists
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
@ -39,19 +31,6 @@
|
||||
user: "{{ ansible_user }}"
|
||||
state: present
|
||||
|
||||
- name: Template out sshd_config
|
||||
ansible.builtin.template:
|
||||
src: sshd_config.j2
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify:
|
||||
- Restart sshd
|
||||
|
||||
- name: Flush handlers for immediate shhd restart
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Add local ssh client config
|
||||
delegate_to: localhost
|
||||
become: false
|
35
ansible/roles/torrent/tasks/main.yaml
Normal file
35
ansible/roles/torrent/tasks/main.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
|
||||
- name: Create downloads directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ torrent_downloads_dir }}"
|
||||
state: directory
|
||||
owner: "{{ torrent_user }}"
|
||||
group: "{{ torrent_user }}"
|
||||
mode: "0775"
|
||||
|
||||
- name: Create qbittorrent config directory
|
||||
ansible.builtin.file:
|
||||
path: /etc/qbittorrent
|
||||
state: directory
|
||||
owner: "{{ torrent_user }}"
|
||||
group: "{{ torrent_user }}"
|
||||
mode: "0775"
|
||||
|
||||
- name: Template out the wireguard config
|
||||
ansible.builtin.template:
|
||||
dest: /etc/qbittorrent/wg0.conf
|
||||
src: wireguard.conf.j2
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0600"
|
||||
|
||||
- name: Modprobe the wireguard module
|
||||
community.general.modprobe:
|
||||
name: wireguard
|
||||
persistent: present
|
||||
state: present
|
||||
|
||||
- name: Branch to Docker role
|
||||
ansible.builtin.include_role:
|
||||
name: docker
|
11
ansible/roles/torrent/templates/wireguard.conf.j2
Normal file
11
ansible/roles/torrent/templates/wireguard.conf.j2
Normal file
@ -0,0 +1,11 @@
|
||||
[Interface]
|
||||
PrivateKey = {{ torrent_wireguard_private_key }}
|
||||
Address = {{ torrent_wireguard_address }}/32
|
||||
DNS = {{ torrent_wireguard_dns }}
|
||||
MTU = 1420
|
||||
|
||||
[Peer]
|
||||
PublicKey = {{ torrent_wireguard_peer_public_key }}
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
Endpoint = {{ torrent_wireguard_peer_endpoint }}:51820
|
||||
PersistentKeepalive = 25
|
33
ansible/roles/ufw/tasks/main.yaml
Normal file
33
ansible/roles/ufw/tasks/main.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
- name: Install ufw package (Archlinux)
|
||||
when: ansible_facts['os_family'] == "Archlinux"
|
||||
community.general.pacman:
|
||||
name:
|
||||
- ufw
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Add ufw rules
|
||||
community.general.ufw:
|
||||
comment: "{{ item.name }}"
|
||||
direction: 'in'
|
||||
from_ip: "{{ item.source }}"
|
||||
proto: "{{ item.protocol }}"
|
||||
rule: "{{ item.action }}"
|
||||
to_ip: "{{ item.destination }}"
|
||||
to_port: "{{ item.port }}"
|
||||
loop: "{{ ufw_rules }}"
|
||||
|
||||
- name: Enable ufw
|
||||
when: ufw_enabled
|
||||
community.general.ufw:
|
||||
default: "deny"
|
||||
direction: "incoming"
|
||||
logging: "low"
|
||||
state: enabled
|
||||
|
||||
- name: Enable the ufw service
|
||||
when: ufw_enabled
|
||||
ansible.builtin.service:
|
||||
name: ufw
|
||||
state: restarted
|
||||
enabled: true
|
72
compose/backup.yaml
Normal file
72
compose/backup.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
name: backup
|
||||
|
||||
networks:
|
||||
backup:
|
||||
attachable: true
|
||||
driver: macvlan
|
||||
driver_opts:
|
||||
macvlan_mode: bridge
|
||||
parent: enp1s0
|
||||
enable_ipv6: false
|
||||
external: false
|
||||
internal: false
|
||||
ipam:
|
||||
config:
|
||||
- subnet: "10.96.30.0/24"
|
||||
ip_range: "10.96.30.224/28"
|
||||
gateway: "10.96.30.254"
|
||||
name: backup
|
||||
|
||||
services:
|
||||
|
||||
certbot:
|
||||
container_name: certbot
|
||||
image: certbot/dns-cloudflare
|
||||
pull_policy: always
|
||||
restart: "no"
|
||||
networks:
|
||||
backup:
|
||||
ipv4_address: 10.96.30.11
|
||||
link_local_ips: []
|
||||
command: >-
|
||||
certonly --dns-cloudflare
|
||||
--dns-cloudflare-credentials /etc/letsencrypt/credentials.ini
|
||||
--dns-cloudflare-propagation-seconds 20
|
||||
--email certbot-backup@balsillie.email
|
||||
--non-interactive
|
||||
--expand
|
||||
--no-eff-email
|
||||
--agree-tos
|
||||
-d backup.balsillie.house
|
||||
volumes:
|
||||
- /mnt/md/backup/letsencrypt/etc:/etc/letsencrypt
|
||||
- /mnt/md/backup/letsencrypt/var:/var/lib/letsencrypt
|
||||
|
||||
backup:
|
||||
container_name: backup
|
||||
image: restic/rest-server:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
hostname: backup
|
||||
domainname: balsillie.house
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
backup:
|
||||
ipv4_address: 10.96.30.12
|
||||
link_local_ips: []
|
||||
entrypoint: /usr/bin/rest-server
|
||||
command:
|
||||
- --htpasswd-file "/htpasswd"
|
||||
- --path "/backup"
|
||||
- --listen "10.96.30.12:443"
|
||||
- --tls
|
||||
- --tls-cert "/etc/letsencrypt/live/backup.balsillie.house/fullchain.pem"
|
||||
- --tls-key "/etc/letsencrypt/live/backup.balsillie.house/privkey.pem"
|
||||
volumes:
|
||||
- /mnt/md/backup/letsencrypt/etc:/etc/letsencrypt
|
||||
- /mnt/md/backup/restic:/backup
|
||||
- /mnt/md/backup/restic.htpasswd:/htpasswd
|
209
compose/truenas-apps.yaml
Normal file
209
compose/truenas-apps.yaml
Normal file
@ -0,0 +1,209 @@
|
||||
# Docker compose file for truenas apps
|
||||
|
||||
name: "apps"
|
||||
|
||||
# Network
|
||||
|
||||
networks:
|
||||
|
||||
apps:
|
||||
attachable: true
|
||||
driver: macvlan
|
||||
driver_opts:
|
||||
macvlan_mode: bridge
|
||||
parent: eno1
|
||||
enable_ipv6: false
|
||||
external: false
|
||||
internal: false
|
||||
ipam:
|
||||
config:
|
||||
- subnet: "10.96.10.0/24"
|
||||
ip_range: "10.96.10.224/28"
|
||||
gateway: "10.96.10.254"
|
||||
name: apps
|
||||
|
||||
services:
|
||||
|
||||
certbot:
|
||||
container_name: certbot
|
||||
image: certbot/dns-cloudflare
|
||||
pull_policy: always
|
||||
restart: "no"
|
||||
command: >-
|
||||
certonly --dns-cloudflare
|
||||
--dns-cloudflare-credentials /etc/letsencrypt/credentials.ini
|
||||
--dns-cloudflare-propagation-seconds 40
|
||||
--email certbot-apps@balsillie.email
|
||||
--non-interactive
|
||||
--expand
|
||||
--no-eff-email
|
||||
--agree-tos
|
||||
-d adguard.balsillie.house
|
||||
-d unifi.balsillie.house
|
||||
-d s3.balsillie.house
|
||||
-d registry.balsillie.house
|
||||
-d sync-admin.balsillie.house
|
||||
-d jellyfin.balsillie.house
|
||||
-d torrent.balsillie.house
|
||||
-d tv.balsillie.house
|
||||
-d movies.balsillie.house
|
||||
-d music.balsillie.house
|
||||
-d books.balsillie.house
|
||||
-d subs.balsillie.house
|
||||
-d index.balsillie.house
|
||||
-d books.balsillie.house
|
||||
-d library.balsillie.house
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
- 9.9.9.9
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.11
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
- /mnt/hdd/apps/certbot/var:/var/lib/letsencrypt
|
||||
|
||||
adguard:
|
||||
container_name: adguard
|
||||
image: adguard/adguardhome
|
||||
pull_policy: always
|
||||
hostname: adguard
|
||||
domainname: balsillie.house
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.53
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/adguard/work:/opt/adguardhome/work
|
||||
- /mnt/hdd/apps/adguard/conf:/opt/adguardhome/conf
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
|
||||
minio:
|
||||
container_name: minio
|
||||
image: minio/minio
|
||||
pull_policy: always
|
||||
hostname: s3
|
||||
domainname: balsillie.house
|
||||
command:
|
||||
- "server"
|
||||
- "--address"
|
||||
- "s3.balsillie.house:9000"
|
||||
- "--console-address"
|
||||
- "s3.balsillie.house:443"
|
||||
- "--certs-dir"
|
||||
- "/etc/letsencrypt/live/adguard.balsillie.house-0002"
|
||||
- "/data"
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.12
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/s3:/data
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
|
||||
registry:
|
||||
container_name: registry
|
||||
image: distribution/distribution:edge
|
||||
pull_policy: always
|
||||
hostname: registry
|
||||
domainname: balsillie.house
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
environment:
|
||||
REGISTRY_HTTP_SECRET: VfOVfkMqoeZGpfTJqe82rz4clqrR6Wid
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.13
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/registry:/etc/distribution
|
||||
- /mnt/hdd/registry:/var/lib/registry
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
|
||||
nginx:
|
||||
container_name: nginx
|
||||
image: nginx
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
certbot:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.16
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/nginx/default.conf:/etc/nginx/conf.d/default.conf
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
|
||||
sync:
|
||||
container_name: sync
|
||||
image: code.balsillie.net/michael/containers/syncthing:latest
|
||||
pull_policy: always
|
||||
hostname: sync
|
||||
domainname: balsillie.house
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- serve
|
||||
- --home=/config
|
||||
- --gui-address=http://10.96.10.15:8080
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.15
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/syncthing:/config
|
||||
- /mnt/hdd/gallery/sync:/sync
|
||||
|
||||
unifi:
|
||||
container_name: unifi
|
||||
image: goofball222/unifi:8.6.9
|
||||
pull_policy: always
|
||||
hostname: unifi
|
||||
domainname: balsillie.house
|
||||
environment:
|
||||
READENV: 'true'
|
||||
DB_MONGO_LOCAL: 'false'
|
||||
TZ: America/New_York
|
||||
RUN_CHOWN: 'false'
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
DB_MONGO_URI: mongodb://10.96.10.18:27017/unifi
|
||||
STATDB_MONGO_URI: mongodb://10.96.10.18:27017/unifi_stat
|
||||
UNIFI_DB_NAME: unifi
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.17
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/unifi:/usr/lib/unifi/data
|
||||
|
||||
mongodb:
|
||||
image: mongo:5.0
|
||||
container_name: mongodb
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.18
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/mongodb:/data/db
|
161
compose/truenas-media.yaml
Normal file
161
compose/truenas-media.yaml
Normal file
@ -0,0 +1,161 @@
|
||||
# Docker compose file for truenas apps
|
||||
|
||||
name: "media"
|
||||
|
||||
# Network
|
||||
|
||||
networks:
|
||||
apps:
|
||||
external: true
|
||||
name: apps
|
||||
|
||||
services:
|
||||
|
||||
cert-sorter:
|
||||
container_name: cert-sorter
|
||||
image: code.balsillie.net/michael/containers/cert-sorter:latest
|
||||
pull_policy: always
|
||||
restart: 'no'
|
||||
volumes:
|
||||
- /mnt/hdd/apps/certbot/etc:/etc/letsencrypt
|
||||
- /mnt/hdd/apps/cert-1000:/dest/1000
|
||||
network_mode: none
|
||||
|
||||
jellyfin:
|
||||
container_name: jellyfin
|
||||
image: code.balsillie.net/michael/containers/jellyfin:latest
|
||||
pull_policy: always
|
||||
user: jellyfin:jellyfin
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: jellyfin
|
||||
domainname: balsillie.house
|
||||
devices:
|
||||
- /dev/dri/card0:/dev/dri/card0
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128
|
||||
environment:
|
||||
- JELLYFIN_PublishedServerUrl=https://jellyfin.balsillie.house
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.101
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/jellyfin/config:/config
|
||||
- /mnt/hdd/apps/jellyfin/cache:/cache
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
||||
|
||||
qbittorrent:
|
||||
container_name: torrent
|
||||
image: code.balsillie.net/michael/containers/qbittorrent:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: torrent
|
||||
domainname: balsillie.house
|
||||
dns: 10.2.0.1
|
||||
user: ubuntu
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.102
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/torrent:/config
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
||||
|
||||
sonarr:
|
||||
container_name: sonarr
|
||||
image: code.balsillie.net/michael/containers/sonarr:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: tv
|
||||
domainname: balsillie.house
|
||||
user: ubuntu
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.103
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/sonarr:/config
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
||||
|
||||
radarr:
|
||||
container_name: radarr
|
||||
image: code.balsillie.net/michael/containers/radarr:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: movies
|
||||
domainname: balsillie.house
|
||||
user: ubuntu
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.104
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/radarr:/config
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
||||
|
||||
prowlarr:
|
||||
container_name: prowlarr
|
||||
image: code.balsillie.net/michael/containers/prowlarr:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: index
|
||||
domainname: balsillie.house
|
||||
user: ubuntu
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.105
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/prowlarr:/config
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
||||
|
||||
readarr:
|
||||
container_name: readarr
|
||||
image: code.balsillie.net/michael/containers/readarr:latest
|
||||
pull_policy: always
|
||||
depends_on:
|
||||
cert-sorter:
|
||||
condition: service_completed_successfully
|
||||
required: true
|
||||
restart: true
|
||||
restart: unless-stopped
|
||||
hostname: books
|
||||
domainname: balsillie.house
|
||||
user: ubuntu
|
||||
networks:
|
||||
apps:
|
||||
ipv4_address: 10.96.10.106
|
||||
link_local_ips: []
|
||||
volumes:
|
||||
- /mnt/hdd/apps/readarr:/config
|
||||
- /mnt/hdd/media:/media
|
||||
- /mnt/hdd/apps/cert-1000:/cert
|
52
distribution/config.yml
Normal file
52
distribution/config.yml
Normal file
@ -0,0 +1,52 @@
|
||||
version: 0.1
|
||||
log:
|
||||
accesslog:
|
||||
disabled: false
|
||||
level: info
|
||||
formatter: text
|
||||
fields: {}
|
||||
storage:
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
blobdescriptorsize: 5000
|
||||
delete:
|
||||
enabled: true
|
||||
filesystem:
|
||||
rootdirectory: /var/lib/registry
|
||||
maintenance:
|
||||
uploadpurging:
|
||||
enabled: true
|
||||
age: 168h
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
readonly:
|
||||
enabled: false
|
||||
tag:
|
||||
concurrencylimit: 8
|
||||
http:
|
||||
http2:
|
||||
disabled: false
|
||||
h2c:
|
||||
enabled: false
|
||||
addr: 10.96.10.13:443
|
||||
net: tcp
|
||||
host: https://registry.balsillie.house
|
||||
prefix: ''
|
||||
headers:
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
relativeurls: false
|
||||
draintimeout: 30s
|
||||
tls:
|
||||
certificate: /etc/letsencrypt/live/adguard.balsillie.house-0002/fullchain.pem
|
||||
key: /etc/letsencrypt/live/adguard.balsillie.house-0002/privkey.pem
|
||||
minimumtls: tls1.2
|
||||
prometheus:
|
||||
enabled: false
|
||||
# auth:
|
||||
# htpasswd:
|
||||
# realm: BALSILLIE
|
||||
# path: /etc/distribution/htpasswd
|
||||
health:
|
||||
storagedriver:
|
||||
enabled: false
|
46
nginx/truenas.conf
Normal file
46
nginx/truenas.conf
Normal file
@ -0,0 +1,46 @@
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/adguard.balsillie.house-0002/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/adguard.balsillie.house-0002/privkey.pem;
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name sync-admin.balsillie.house;
|
||||
http2 on;
|
||||
location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://10.96.10.15:8080/;
|
||||
proxy_read_timeout 600s;
|
||||
proxy_send_timeout 600s;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name unifi.balsillie.house;
|
||||
http2 on;
|
||||
proxy_ssl_verify off;
|
||||
location /wss/ {
|
||||
proxy_pass https://10.96.10.17:8443;
|
||||
proxy_http_version 1.1;
|
||||
proxy_buffering off;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass https://10.96.10.17:8443/; # The Unifi Controller Port
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forward-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
11
talos/calico/BGPConfiguration.yml
Normal file
11
talos/calico/BGPConfiguration.yml
Normal file
@ -0,0 +1,11 @@
|
||||
# For pfsense check 'Disable eBGP Require Policy' under BGP advanved
|
||||
# https://geek-cookbook.funkypenguin.co.nz/kubernetes/loadbalancer/metallb/pfsense/#configure-frr-bgp-advanced
|
||||
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
asNumber: 64624
|
||||
serviceClusterIPs:
|
||||
- cidr: 10.80.0.0/12
|
7
talos/calico/BGPPeers.yml
Normal file
7
talos/calico/BGPPeers.yml
Normal file
@ -0,0 +1,7 @@
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: router-balsillie-house
|
||||
spec:
|
||||
asNumber: 64625
|
||||
peerIP: 192.168.1.11:179
|
7
talos/calico/apiserver.yml
Normal file
7
talos/calico/apiserver.yml
Normal file
@ -0,0 +1,7 @@
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
11
talos/calico/calicoNodeStatus.yml
Normal file
11
talos/calico/calicoNodeStatus.yml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: projectcalico.org/v3
|
||||
kind: CalicoNodeStatus
|
||||
metadata:
|
||||
name: node00
|
||||
spec:
|
||||
classes:
|
||||
- Agent
|
||||
- BGP
|
||||
- Routes
|
||||
node: node00
|
||||
updatePeriodSeconds: 10
|
8
talos/calico/configMap.yml
Normal file
8
talos/calico/configMap.yml
Normal file
@ -0,0 +1,8 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: "192.168.1.15"
|
||||
KUBERNETES_SERVICE_PORT: '6443'
|
6
talos/calico/felixConfiguration.yml
Normal file
6
talos/calico/felixConfiguration.yml
Normal file
@ -0,0 +1,6 @@
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: FelixConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
CgroupV2Path: /sys/fs/cgroup
|
25
talos/calico/installation.yml
Normal file
25
talos/calico/installation.yml
Normal file
@ -0,0 +1,25 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
variant: Calico
|
||||
cni:
|
||||
type: Calico
|
||||
ipam:
|
||||
type: Calico
|
||||
serviceCIDRs:
|
||||
- 10.80.0.0/12
|
||||
calicoNetwork:
|
||||
bgp: Enabled
|
||||
linuxDataplane: Nftables
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 24
|
||||
cidr: 10.64.0.0/12
|
||||
encapsulation: None
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
16
talos/gen-config.sh
Executable file
16
talos/gen-config.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
talosctl gen config \
|
||||
cluster00 https://cp00.balsillie.house:6443 \
|
||||
--with-secrets secrets.yaml \
|
||||
--config-patch @patches/talos-cluster.yml \
|
||||
--config-patch @patches/mc-all.yml \
|
||||
--config-patch @patches/mc-node00.yml \
|
||||
--output rendered/ \
|
||||
--force
|
||||
|
||||
mkdir -p ~/.talos
|
||||
cp rendered/talosconfig ~/.talos/config
|
||||
|
||||
talosctl config endpoint 192.168.1.15
|
||||
talosctl config node 192.168.1.15
|
32
talos/patches/mc-all.yml
Normal file
32
talos/patches/mc-all.yml
Normal file
@ -0,0 +1,32 @@
|
||||
machine:
|
||||
features:
|
||||
hostDNS:
|
||||
enabled: true
|
||||
forwardKubeDNSToHost: false
|
||||
kubePrism:
|
||||
enabled: false
|
||||
install:
|
||||
wipe: true
|
||||
legacyBIOSSupport: false
|
||||
diskSelector:
|
||||
type: ssd
|
||||
kubelet:
|
||||
extraArgs:
|
||||
rotate-server-certificates: true
|
||||
extraMounts:
|
||||
- destination: /var/local/openebs
|
||||
type: bind
|
||||
source: /var/local/openebs
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
nodeLabels:
|
||||
openebs.io/engine: mayastor
|
||||
sysctls:
|
||||
vm.nr_hugepages: "1024"
|
||||
time:
|
||||
disabled: false
|
||||
servers:
|
||||
- 192.168.1.11
|
||||
- 10.96.10.254
|
19
talos/patches/mc-node00.yml
Normal file
19
talos/patches/mc-node00.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
machine:
|
||||
network:
|
||||
hostname: node00.balsillie.house
|
||||
nameservers:
|
||||
- 192.168.1.11
|
||||
- 10.96.10.254
|
||||
interfaces:
|
||||
- deviceSelector:
|
||||
hardwareAddr: 'f4:4d:30:6e:62:a7'
|
||||
dhcp: false
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: 192.168.1.11
|
||||
- network: 0.0.0.0/0
|
||||
gateway: 10.96.10.254
|
||||
addresses:
|
||||
- 192.168.1.15/24
|
||||
- 10.96.10.30/24
|
88
talos/patches/talos-cluster.yml
Normal file
88
talos/patches/talos-cluster.yml
Normal file
@ -0,0 +1,88 @@
|
||||
---
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
apiServer:
|
||||
admissionControl:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
kind: PodSecurityConfiguration
|
||||
exemptions:
|
||||
namespaces:
|
||||
- openebs
|
||||
- democratic-csi
|
||||
controlPlane:
|
||||
endpoint: https://cp00.balsillie.house:6443
|
||||
localAPIServerPort: 6443
|
||||
clusterName: cluster00.balsillie.house
|
||||
extraManifests:
|
||||
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
|
||||
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
- https://raw.githubusercontent.com/kubernetes/ingress-nginx/refs/tags/controller-v1.11.3/deploy/static/provider/baremetal/deploy.yaml
|
||||
inlineManifests:
|
||||
- name: calico-installation
|
||||
contents: |
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
variant: Calico
|
||||
cni:
|
||||
type: Calico
|
||||
ipam:
|
||||
type: Calico
|
||||
serviceCIDRs:
|
||||
- 10.80.0.0/12
|
||||
calicoNetwork:
|
||||
bgp: Enabled
|
||||
linuxDataplane: Nftables
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 24
|
||||
cidr: 10.64.0.0/12
|
||||
encapsulation: None
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
||||
- name: calico-apiserver
|
||||
contents: |
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
- name: calico-bgpconfig
|
||||
contents: |
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
asNumber: 64624
|
||||
serviceClusterIPs:
|
||||
- cidr: 10.80.0.0/12
|
||||
- name: calico-bgppeer
|
||||
contents: |
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: router-balsillie-house
|
||||
spec:
|
||||
asNumber: 64625
|
||||
peerIP: 192.168.1.11:179
|
||||
network:
|
||||
cni:
|
||||
name: custom
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
|
||||
dnsDomain: cluster00.balsillie.house
|
||||
podSubnets:
|
||||
- 10.64.0.0/12
|
||||
serviceSubnets:
|
||||
- 10.80.0.0/12
|
||||
proxy:
|
||||
mode: nftables
|
||||
disabled: false
|
||||
extraArgs:
|
||||
proxy-mode: nftables
|
583
talos/rendered/controlplane.yaml
Normal file
583
talos/rendered/controlplane.yaml
Normal file
@ -0,0 +1,583 @@
|
||||
version: v1alpha1 # Indicates the schema used to decode the contents.
|
||||
debug: false # Enable verbose logging to the console.
|
||||
persist: true
|
||||
# Provides machine specific configuration options.
|
||||
machine:
|
||||
type: controlplane # Defines the role of the machine within the cluster.
|
||||
token: ubp3st.gmb0565erkwo722t # The `token` is used by a machine to join the PKI of the cluster.
|
||||
# The root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBc3QvY3BtNEliZnhzS3d3VGxHMFNtREFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qUXhNakEwTVRneU1qQTFXaGNOTXpReE1qQXlNVGd5TWpBMVdqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQU9aVVU3Vzh3OXcwR0l4cmVxVitNQ3JrUENmQS9keWdJMGtVCkJDQTkyTjhNbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRksvei83WWVqamt0VFAwcgppcFFNa2hxK3hNU1pNQVVHQXl0bGNBTkJBTDNJTDk4b3NkeDVPTGpQeEZFcXRTK0NOeWhPS2RFMGU5S3ZnQi9VCmpaT3VEMWE3Zmx3Q1grTVVrdk1qMnBEUER1eTN1Tko5Ym41ZFdPMldmdWFwdEFrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJR1NncnJIR3JZSThYeGlRa0dxOUtJd3hzWkdha1BTb21GczJSQlV4Y2hRRgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
|
||||
# Extra certificate subject alternative names for the machine's certificate.
|
||||
certSANs: []
|
||||
# # Uncomment this to enable SANs.
|
||||
# - 10.0.0.10
|
||||
# - 172.16.0.10
|
||||
# - 192.168.0.10
|
||||
|
||||
# Used to provide additional options to the kubelet.
|
||||
kubelet:
|
||||
image: ghcr.io/siderolabs/kubelet:v1.31.2 # The `image` field is an optional reference to an alternative kubelet image.
|
||||
# The `extraArgs` field is used to provide additional flags to the kubelet.
|
||||
extraArgs:
|
||||
rotate-server-certificates: "true"
|
||||
# The `extraMounts` field is used to add additional mounts to the kubelet container.
|
||||
extraMounts:
|
||||
- destination: /var/local/openebs # Destination is the absolute path where the mount will be placed in the container.
|
||||
type: bind # Type specifies the mount kind.
|
||||
source: /var/local/openebs # Source specifies the source path of the mount.
|
||||
# Options are fstab style mount options.
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
|
||||
disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
|
||||
|
||||
# # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
|
||||
# clusterDNS:
|
||||
# - 10.96.0.10
|
||||
# - 169.254.2.53
|
||||
|
||||
# # The `extraConfig` field is used to provide kubelet configuration overrides.
|
||||
# extraConfig:
|
||||
# serverTLSBootstrap: true
|
||||
|
||||
# # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration.
|
||||
# credentialProviderConfig:
|
||||
# apiVersion: kubelet.config.k8s.io/v1
|
||||
# kind: CredentialProviderConfig
|
||||
# providers:
|
||||
# - apiVersion: credentialprovider.kubelet.k8s.io/v1
|
||||
# defaultCacheDuration: 12h
|
||||
# matchImages:
|
||||
# - '*.dkr.ecr.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.*.amazonaws.com.cn'
|
||||
# - '*.dkr.ecr-fips.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov'
|
||||
# - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov'
|
||||
# name: ecr-credential-provider
|
||||
|
||||
# # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
|
||||
# nodeIP:
|
||||
# # The `validSubnets` field configures the networks to pick kubelet node IP from.
|
||||
# validSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# - '!10.0.0.3/32'
|
||||
# - fdc7::/16
|
||||
# Provides machine specific network configuration options.
|
||||
network:
|
||||
hostname: node00.balsillie.house # Used to statically set the hostname for the machine.
|
||||
# `interfaces` is used to define the network interface configuration.
|
||||
interfaces:
|
||||
- # Picks a network device using the selector.
|
||||
deviceSelector:
|
||||
hardwareAddr: f4:4d:30:6e:62:a7 # Device hardware address, supports matching by wildcard.
|
||||
# Assigns static IP addresses to the interface.
|
||||
addresses:
|
||||
- 192.168.1.15/24
|
||||
# A list of routes associated with the interface.
|
||||
routes:
|
||||
- network: 0.0.0.0/0 # The route's network (destination).
|
||||
gateway: 192.168.1.11 # The route's gateway (if empty, creates link scope route).
|
||||
dhcp: false # Indicates if DHCP should be used to configure the interface.
|
||||
|
||||
# # The interface name.
|
||||
# interface: enp0s3
|
||||
|
||||
# # Bond specific options.
|
||||
# bond:
|
||||
# # The interfaces that make up the bond.
|
||||
# interfaces:
|
||||
# - enp2s0
|
||||
# - enp2s1
|
||||
# # Picks a network device using the selector.
|
||||
# deviceSelectors:
|
||||
# - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
|
||||
# driver: virtio # Kernel driver, supports matching by wildcard.
|
||||
# mode: 802.3ad # A bond option.
|
||||
# lacpRate: fast # A bond option.
|
||||
|
||||
# # Bridge specific options.
|
||||
# bridge:
|
||||
# # The interfaces that make up the bridge.
|
||||
# interfaces:
|
||||
# - enxda4042ca9a51
|
||||
# - enxae2a6774c259
|
||||
# # A bridge option.
|
||||
# stp:
|
||||
# enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
|
||||
|
||||
# # DHCP specific options.
|
||||
# dhcpOptions:
|
||||
# routeMetric: 1024 # The priority of all routes received via DHCP.
|
||||
|
||||
# # Wireguard specific configuration.
|
||||
|
||||
# # wireguard server example
|
||||
# wireguard:
|
||||
# privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# listenPort: 51111 # Specifies a device's listening port.
|
||||
# # Specifies a list of peer configurations to apply to a device.
|
||||
# peers:
|
||||
# - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
|
||||
# # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# allowedIPs:
|
||||
# - 192.168.1.0/24
|
||||
# # wireguard peer example
|
||||
# wireguard:
|
||||
# privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # Specifies a list of peer configurations to apply to a device.
|
||||
# peers:
|
||||
# - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
|
||||
# persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
|
||||
# # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# allowedIPs:
|
||||
# - 192.168.1.0/24
|
||||
|
||||
# # Virtual (shared) IP address configuration.
|
||||
|
||||
# # layer2 vip example
|
||||
# vip:
|
||||
# ip: 172.16.199.55 # Specifies the IP address to be used.
|
||||
# Used to statically set the nameservers for the machine.
|
||||
nameservers:
|
||||
- 192.168.1.11
|
||||
|
||||
# # Allows for extra entries to be added to the `/etc/hosts` file
|
||||
# extraHostEntries:
|
||||
# - ip: 192.168.1.100 # The IP of the host.
|
||||
# # The host alias.
|
||||
# aliases:
|
||||
# - example
|
||||
# - example.domain.tld
|
||||
|
||||
# # Configures KubeSpan feature.
|
||||
# kubespan:
|
||||
# enabled: true # Enable the KubeSpan feature.
|
||||
# Used to provide instructions for installations.
|
||||
install:
|
||||
disk: /dev/sda # The disk used for installations.
|
||||
# Look up disk using disk attributes like model, size, serial and others.
|
||||
diskSelector:
|
||||
type: ssd # Disk Type.
|
||||
|
||||
# # Disk size.
|
||||
|
||||
# # Select a disk which size is equal to 4GB.
|
||||
# size: 4GB
|
||||
# # Select a disk which size is greater than 1TB.
|
||||
# size: '> 1TB'
|
||||
# # Select a disk which size is less or equal than 2TB.
|
||||
# size: <= 2TB
|
||||
|
||||
# # Disk bus path.
|
||||
# busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0
|
||||
# busPath: /pci0000:00/*
|
||||
image: ghcr.io/siderolabs/installer:v1.8.3 # Allows for supplying the image used to perform the installation.
|
||||
wipe: true # Indicates if the installation disk should be wiped at installation time.
|
||||
legacyBIOSSupport: false # Indicates if MBR partition should be marked as bootable (active).
|
||||
|
||||
# # Allows for supplying extra kernel args via the bootloader.
|
||||
# extraKernelArgs:
|
||||
# - talos.platform=metal
|
||||
# - reboot=k
|
||||
|
||||
# # Allows for supplying additional system extension images to install on top of base Talos image.
|
||||
# extensions:
|
||||
# - image: ghcr.io/siderolabs/gvisor:20220117.0-v1.0.0 # System extension image.
|
||||
# Used to configure the machine's time settings.
|
||||
time:
|
||||
disabled: false # Indicates if the time service is disabled for the machine.
|
||||
# description: |
|
||||
servers:
|
||||
- 192.168.1.11
|
||||
# Used to configure the machine's sysctls.
|
||||
sysctls:
|
||||
vm.nr_hugepages: "1024"
|
||||
# Used to configure the machine's container image registry mirrors.
|
||||
registries: {}
|
||||
# # Specifies mirror configuration for each registry host namespace.
|
||||
# mirrors:
|
||||
# ghcr.io:
|
||||
# # List of endpoints (URLs) for registry mirrors to use.
|
||||
# endpoints:
|
||||
# - https://registry.insecure
|
||||
# - https://ghcr.io/v2/
|
||||
|
||||
# # Specifies TLS & auth configuration for HTTPS image registries.
|
||||
# config:
|
||||
# registry.insecure:
|
||||
# # The TLS configuration for the registry.
|
||||
# tls:
|
||||
# insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
|
||||
#
|
||||
# # # Enable mutual TLS authentication with the registry.
|
||||
# # clientIdentity:
|
||||
# # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
#
|
||||
# # # The auth configuration for this registry.
|
||||
# # auth:
|
||||
# # username: username # Optional registry authentication.
|
||||
# # password: password # Optional registry authentication.
|
||||
|
||||
# Features describe individual Talos features that can be switched on or off.
|
||||
features:
|
||||
rbac: true # Enable role-based access control (RBAC).
|
||||
stableHostname: true # Enable stable default hostname.
|
||||
apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
|
||||
diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
|
||||
# KubePrism - local proxy/load balancer on defined port that will distribute
|
||||
kubePrism:
|
||||
enabled: false # Enable KubePrism support - will start local load balancing proxy.
|
||||
port: 7445 # KubePrism port.
|
||||
# Configures host DNS caching resolver.
|
||||
hostDNS:
|
||||
enabled: true # Enable host DNS caching resolver.
|
||||
forwardKubeDNSToHost: false # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods.
|
||||
|
||||
# # Configure Talos API access from Kubernetes pods.
|
||||
# kubernetesTalosAPIAccess:
|
||||
# enabled: true # Enable Talos API access from Kubernetes pods.
|
||||
# # The list of Talos API roles which can be granted for access from Kubernetes pods.
|
||||
# allowedRoles:
|
||||
# - os:reader
|
||||
# # The list of Kubernetes namespaces Talos API access is available from.
|
||||
# allowedKubernetesNamespaces:
|
||||
# - kube-system
|
||||
# Configures the node labels for the machine.
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers: ""
|
||||
openebs.io/engine: mayastor
|
||||
|
||||
# # Provides machine specific control plane configuration options.
|
||||
|
||||
# # ControlPlane definition example.
|
||||
# controlPlane:
|
||||
# # Controller manager machine specific configuration options.
|
||||
# controllerManager:
|
||||
# disabled: false # Disable kube-controller-manager on the node.
|
||||
# # Scheduler machine specific configuration options.
|
||||
# scheduler:
|
||||
# disabled: true # Disable kube-scheduler on the node.
|
||||
|
||||
# # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
|
||||
|
||||
# # nginx static pod.
|
||||
# pods:
|
||||
# - apiVersion: v1
|
||||
# kind: pod
|
||||
# metadata:
|
||||
# name: nginx
|
||||
# spec:
|
||||
# containers:
|
||||
# - image: nginx
|
||||
# name: nginx
|
||||
|
||||
# # Used to partition, format and mount additional disks.
|
||||
|
||||
# # MachineDisks list example.
|
||||
# disks:
|
||||
# - device: /dev/sdb # The name of the disk to use.
|
||||
# # A list of partitions to create on the disk.
|
||||
# partitions:
|
||||
# - mountpoint: /var/mnt/extra # Where to mount the partition.
|
||||
#
|
||||
# # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk.
|
||||
|
||||
# # # Human readable representation.
|
||||
# # size: 100 MB
|
||||
# # # Precise value in bytes.
|
||||
# # size: 1073741824
|
||||
|
||||
# # Allows the addition of user specified files.
|
||||
|
||||
# # MachineFiles usage example.
|
||||
# files:
|
||||
# - content: '...' # The contents of the file.
|
||||
# permissions: 0o666 # The file's permissions in octal.
|
||||
# path: /tmp/file.txt # The path of the file.
|
||||
# op: append # The operation to use
|
||||
|
||||
# # The `env` field allows for the addition of environment variables.
|
||||
|
||||
# # Environment variables definition examples.
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: info
|
||||
# GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
|
||||
# https_proxy: http://SERVER:PORT/
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: error
|
||||
# https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
|
||||
# env:
|
||||
# https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
|
||||
|
||||
# # Used to configure the machine's sysfs.
|
||||
|
||||
# # MachineSysfs usage example.
|
||||
# sysfs:
|
||||
# devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
|
||||
|
||||
# # Machine system disk encryption configuration.
|
||||
# systemDiskEncryption:
|
||||
# # Ephemeral partition encryption.
|
||||
# ephemeral:
|
||||
# provider: luks2 # Encryption provider to use for the encryption.
|
||||
# # Defines the encryption keys generation and storage method.
|
||||
# keys:
|
||||
# - # Deterministically generated key from the node UUID and PartitionLabel.
|
||||
# nodeID: {}
|
||||
# slot: 0 # Key slot number for LUKS2 encryption.
|
||||
#
|
||||
# # # KMS managed encryption key.
|
||||
# # kms:
|
||||
# # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
|
||||
#
|
||||
# # # Cipher kind to use for the encryption. Depends on the encryption provider.
|
||||
# # cipher: aes-xts-plain64
|
||||
|
||||
# # # Defines the encryption sector size.
|
||||
# # blockSize: 4096
|
||||
|
||||
# # # Additional --perf parameters for the LUKS2 encryption.
|
||||
# # options:
|
||||
# # - no_read_workqueue
|
||||
# # - no_write_workqueue
|
||||
|
||||
# # Configures the udev system.
|
||||
# udev:
|
||||
# # List of udev rules to apply to the udev system
|
||||
# rules:
|
||||
# - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
|
||||
|
||||
# # Configures the logging system.
|
||||
# logging:
|
||||
# # Logging destination.
|
||||
# destinations:
|
||||
# - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
|
||||
# format: json_lines # Logs format.
|
||||
|
||||
# # Configures the kernel.
|
||||
# kernel:
|
||||
# # Kernel modules to load.
|
||||
# modules:
|
||||
# - name: brtfs # Module name.
|
||||
|
||||
# # Configures the seccomp profiles for the machine.
|
||||
# seccompProfiles:
|
||||
# - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
|
||||
# # The `value` field is used to provide the seccomp profile.
|
||||
# value:
|
||||
# defaultAction: SCMP_ACT_LOG
|
||||
|
||||
# # Configures the node annotations for the machine.
|
||||
|
||||
# # node annotations example.
|
||||
# nodeAnnotations:
|
||||
# customer.io/rack: r13a25
|
||||
|
||||
# # Configures the node taints for the machine. Effect is optional.
|
||||
|
||||
# # node taints example.
|
||||
# nodeTaints:
|
||||
# exampleTaint: exampleTaintValue:NoSchedule
|
||||
# Provides cluster specific configuration options.
|
||||
cluster:
|
||||
id: OmdWk7fWVxSMf_1pjy_vG3LD_LpzBcJJ4gfyg7Du-1A= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
|
||||
secret: XVz/kRfKSE9ID7nb2QLW+DafhGHaLj+cXs9DlADVUQc= # Shared secret of cluster (base64 encoded random 32 bytes).
|
||||
# Provides control plane specific configuration options.
|
||||
controlPlane:
|
||||
endpoint: https://cp00.balsillie.house:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
|
||||
localAPIServerPort: 6443 # The port that the API server listens on internally.
|
||||
clusterName: cluster00.balsillie.house # Configures the cluster's name.
|
||||
# Provides cluster specific network configuration options.
|
||||
network:
|
||||
# The CNI used.
|
||||
cni:
|
||||
name: custom # Name of CNI to use.
|
||||
# URLs containing manifests to apply for the CNI.
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
|
||||
dnsDomain: cluster00.balsillie.house # The domain used by Kubernetes DNS.
|
||||
# The pod subnet CIDR.
|
||||
podSubnets:
|
||||
- 10.64.0.0/12
|
||||
# The service subnet CIDR.
|
||||
serviceSubnets:
|
||||
- 10.80.0.0/12
|
||||
token: cpn9u3.wyqt1zpotvuczv27 # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
|
||||
secretboxEncryptionSecret: jDt8ma1yKNwghesliMWeVUvMKfbd8B6P5F7n5sogO4k= # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
# The base64 encoded root certificate authority used by Kubernetes.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lSQVBoRnFMK05xZ3JWSnprWkJUOUliSUl3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlOREV5TURReE9ESXlNRFZhRncwek5ERXlNREl4T0RJeQpNRFZhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVFMbS8vMjJTVWp5elFFMVhzSEY3dGpWR1plME9UQnNTWUE1VjdxSkFEaWlNZEhRL1pnZTlpMGY4SzkKbzI2UmFxazBXaXdaMmVxUWo2bzhOMWN6Mmdwd28yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGQkgvbkVibWgwWFg4aGhaSWI4L0JLRjZYMUN6TUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFEcGdrQkoKN2wzMlpjQmZXYlNzMEd3UU1FSWtjQjBlSXhOMDVtbjZVYlFHUEFJaEFJZlY1MG43Qi9nT1dtYjFVSExPNUMwTgpwaTFwS0lGU0p3aWFwYkxFeGYzOAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSURwelhDRFhkWERTZHdHN2Jpb3h1Q3RWK1FmK09XSE40RmtOckg0eUpjOUxvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFQzV2Lzl0a2xJOHMwQk5WN0J4ZTdZMVJtWHREa3diRW1BT1ZlNmlRQTRvakhSMFAyWUh2WQp0SC9DdmFOdWtXcXBORm9zR2RucWtJK3FQRGRYTTlvS2NBPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
# The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
|
||||
aggregatorCA:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJYekNDQVFhZ0F3SUJBZ0lSQUppZ1ZCUXNkN3ZTdVJYWVgyWHExQW93Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5ERXlNRFF4T0RJeU1EVmFGdzB6TkRFeU1ESXhPREl5TURWYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVRvQWg5ckl6cDRjdiszc2RGOStTUlo0RnJJMFRXU3dOTlZFZXNsUUhTQ01PZElYNDFpCnYwRWtVVUJpeEhNS29ENUowcWJWNlpxbmVQblYvdG14SWpiYm8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGSW84M1ZlODlWQVk0aVVXbUs3cE95WGdJa25LTUFvR0NDcUdTTTQ5QkFNQ0EwY0FNRVFDCklIV092bnNTREpnY1pMaitZdmkrdUM1MGk2N1RPWnl0VzQ2bDZvaHUyMVJCQWlCUGdLZDh1a0N0NWpVdmdnSmMKaXcwQmVmVGxDTEZNeGQ5cnBGSGRhUVhtMXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUttT0p3eG8rbnUyWEZGaGUvQUVxUXJrcjRtKy9ZMFB6Wkl5YXRvOE1XbTlvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFNkFJZmF5TTZlSEwvdDdIUmZma2tXZUJheU5FMWtzRFRWUkhySlVCMGdqRG5TRitOWXI5QgpKRkZBWXNSekNxQStTZEttMWVtYXAzajUxZjdac1NJMjJ3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
# The base64 encoded private key for service account token generation.
|
||||
serviceAccount:
|
||||
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBenFEcnFHbk54RHhENTRTRk5PcEpGYy9qaGszSHBsUUkweEg3SDdFdkV5WjZFRUowCkYzQWZJRVo5MkdCSHRPK0QvNk9WZEFabEdBaTFSYTZNU2k1V3lhNFZHUVFwNHBxbG1zZFc1TFJmQXVRdmQyUk4KVVAxMUhVTG5TZ3RHeThYLzFnR25Td2gyVGVDbmJwYWI3NUMxNlBZSnBYQ1pROXFqYjhLM2FHREpXZXhBS1NCQQpGUXNjM3ZUc0JKTlQ0MlRyYmI1TGpKMWhKbDJQbWxEYWM1OGtxUXQxNHlLdnZ5L1J5ekNpQkN3bXRmUldWWkxBCjBubGFSMVZsWlZWZUQ1YnlvN0IrejRQMS9BVUQ2L3pMMUdWWjVzMTU2NGRVNDhNV0VFZHNpaUdrK2tlSityU2UKTjg3OWdaOXQvNlUrcTRWRjVneWpLcHNPMkkxdHk0N3JZcjUySEFGNXBjNWZDRXdJN1VlVWFRbmNIRmFrM09MSApuVzRuNWVmYjlmWWJTT0RkMG1qNjV0anRwTVYxRWoveWhHOFZBd0RQeDlSRUJhbTZ3V25EZkhoR1U5OTQ0Mm5DCnJPaDUwcUVPbE40aTdxWkRlVzNjazNCNGdBR1dnSEY4eFBnTGpVWGZnV1J0RWJacitRU0xKZ0JVcmhYM2FvS3UKaEJObDBMWmd3bHg2Nm5MeHF4UFRrazNnaWQ4OVlzaUhpaURUbDRBZER6b2ZKY2Znc095RmRuYm5MbHRxTmg5RwpRUFhZVEoyWFdkTlRveEhZditsRDRDYmVyUU1wdm9VUkQxOThkdDJaVTlLQ0s2MFh2enhwa3g3WURmVm5UUWM0CjlaMGtmeUd2ZkpJUzF1M3RFYWI2enJZYlltR2ErZUVXRFBPalZYUDg4UVFkMEFGQTdhamRablliejNFQ0F3RUEKQVFLQ0FnRUF4eTU2WElzNWJrd0Z6VHlDeCtKOGQwbEI4Nm93cGRabDkwbGpBVWJrc1JvckdsZGszMThXc2g4aAptcDRkSVg2dGlKVmhNVW9rMDRrNWJLVjQyTjFZamV1QXJPL0ZqSW8wejBJUE1rSHBRTGxFUkZ2czZBQ2JqNHg1CnRuZXVWQ3BXbHk1YTNNcVllRFB5VW5sZWNnM1dQVzVIR0RLMW02SThZRFNjWmtLK2RpOTJIZTdYdHpGU01GNTYKMFdKUHdHajlSWG5tQy9OOXZlVDgwKzNTMXhmMjEvbVROMU90aEtnTkhVaVJOV0oySW5vWmF6NGZJQVVaRUtCeQpNTCtjLzN3c0E3elVVU2ZNekpidEUrUkpmRUlDNnFqeU9TZm9uZkhsNStWaDdMbkg5ZmRXTzlVMm0yZXVHZkRjCnNVM1NYcmNIRFY3clB0dmY4TUpNcjZmaW9mb2xrTllPNDFnYmIyUTZBSzg5TGRmZTF4bGRwYzJhMTBYcm12b3QKK0wzODNWcWFaamVnc1FXNmpzc3BFWWtRWjBVbCs3Rzd0dkVMUVpMMjhiUmRXK0JPcTlBOXYyd3d6RHhiNFFESApuc0k5MWpEMUt5czBtZjFBcXpqdU1yMWRGMEx5czhBcXR2N2ZGTjlhbS9DWjhFY2ROUXBMSkdLVTRNNjhGRE5xCmVwVUZUSW9PWFdMaytieVBCYi9sU3lVVW04MSs4aXJmQXFuTEFka3NpYytDZHRxYU8vZnBXRzlwdzJVVi8rN3cKTjAwbjJrVTRLdEUyd1pLVmlQUnlQNkgvUjFKY2pEejJBbVMxZ3NXdGRvcmxXbm51OXdWL0s5Z0ZCTXMwRUh4VQpnSzdGeGRFdlhFYVdKZExsTk9QaEZtSldwU0o1Q2tlT0R2UDBDaDY0cWNhWVJ0cTgySkVDZ2dFQkFOQndYU21ZCm9yWmJlMEFQVHQrZzNVVElSR3BueFV3YWNmVkRQY3FUYmkxZzh6ZzBtbWVwNVY0YkJTZkR3RDhUNmNDNVl6ZFkKUlJHekdkNGphZEs0Yk94ZWlIL2JPTmlNRFVRTVRnS2c5a3VqbXZMcGM3OVVMZFVmNUFIQzQrNWNvamQ5OTMwegp0eWNLY04zODNSbTF5RG94M3poNVY0VGVaMnlGMUNmMU1NVzlkcUl6aVozdjUyTm9OSk1VUWRDUXdhRFk0WUhmCnFlbHppaDlaendrNk8rQi9VTkUyRkhOR1F3S1EzTHlRbTBDdXFhS2d1T1d3b3N3S29tMW8wdGNFcTY2MzFzS0oKeXY0SFFES2FnMlBDbDcvb2l6Z1lQc01Sd0xLWU9MelJ4MmhtMWdFV2p5THNpZHlxdHhNYUJqWlhPNFhJSGE4bAo2QVdJNjNzeWpLY2toeDBDZ2dFQkFQM0d6eXQzMktRa2RMWUNmNUZpN1EwN0pFUnNEKy8waTFOcnYzNGF6SlRNCkVjaFhPM2RHbXo4cm5CWE41R3F4R1p1cm56bTV0WER6dkNwK01PMGxvU2lIaHh5V3JhMGdjRXJFYXM4NENoVlgKaWtvYzh6OWttSHRLTGYyUElCeFlEa0VpSlJUS2hyRHpxWkdNSjE1UFEwR2w3eFRUbURucUtBdWxoYU9sdGh6OApPSzk4TDIvSElXVmlGOEQ3WVorVW40SVJkM3pIV1VNVUN2KzFlaWtlbHY4TUs5amtXSDFVVGtFM0RMWDNLQ0lpCmFHclJRYWxoOVlJckNQOU51R3BhTFhHWWg5ZXNRemdpYVFpQklxeUFpY2lQNmJDL3JSYlFFTXRHZGhpSkhvQzcKMytmV0FkS2U0UG5ZZWpGaXZaQk4rWFFXT0pHaFlPVVZlS2dYNC83bnRXVUNnZ0VCQUpnN3lMOE5uS0VsTldJMwpDL3NtYVJUSVU3ZUhMTUVGZTMrTzZiMWZhaE80Z0Vybm1ISG4wdjZsSzJHOGhWZE1ldWZvUG45NGQ1R1N4bXpJClhveXBaOXhHdXRqdXlwalZ5UWFQR2hhdmF0TVpuWnlXUnBSUnJkdS9tKzV4WWJtQWJIV3RDYW1tc0xqUjFsYzcKVEZ4dnVOcFAvR0VwdG5MellJUVJSajhjdzg5WUxpSVBGQTZHM2U5cmR5S0lvL3pwREpJbWRLVS9Mc0N6UXdqYwp0ZXBlWHNiN0ZaS0hOZTV2UFpmTkozcU0vdWNCNmlFOTAyem1VakJHQUJWNmZxck4zWG9SQ01neWpWQjFDVDBzClBwUUkxbjR5SUNRTENTTzlmb2l4eHBhWmZGTlM4REFCUXU5VjVPYUd4SWt0ODZXU2xvV0IvN1FoTWxHQWk4UUIKeWxwMHdqMENnZ0VBSFRWZnJOb2JaakVIK2RicHhkc0QrNnpkN0w0ZlMzZnkvVVArZzA0a1U4LzFFQUR5U0RVcApNeWtheXV1cXFaaitvSXN0UldDWmdJbWRFNnI0aUtMM0hJb1V3L2FYd01CbURFVDFJaXFLQnBEUHl0M3dKVkMzCnVDRCtrQnVFRDU4VzdPbzRLRjl6QnpUVVJIdEVJTGV3dndLUk5PMXhyT0RGZ1NtbzZ2L3NxUU5pcHRmOVFibEMKbjcrZGlrbWtuOUJrVjMwWktwUnJNcXhBNDlPSVh1azFhZCtGd0czdnZXVVJxTEhrVmFFL3prWDFqSzNQQitiTgpjaER2OVNxRjJqL00wVEZFR1UrcllPK2M5U3lmeUFqM1VzMDFrWEZPUTd6bzRleVhOUlc3SEFnRHFsUllXMXlMCitsV3ZJcllCcVhWQUM5dHU0VndZb2VWSndOY1hGOFNsZlFLQ0FRQTcyS2lJc3AySkNLVEVMU1oyVVRqWkVjaXEKWEp1Q0VJTE0yNTlZalh1MVJXYlMvQkFaTWc5ZVRiUnQ3ZGpCYTRIWVRBQ2ZybzI0RjRjWjluM1FYQXRnWUIxOAp5Q0NnRzFIbUxadkRyUmlrWXpPTXRNaTFReGp6L3lGOUc5ZVJHeUxlYzRNNnR5TUhVVEpMRnA1TnhLODZPYnpZClV3VTRlYlJwK1I2aVg4dVFyY1RxOHFCYzA5Yk1NN1R5cmx5bk1lZXZoNFBZdXorNGpIWnBUZG1aMHY2UGxXN2MKSkthUjQxc0QrWVBIM24zU085MXMvVGdwQXJ1OG9QQklkVzVaMHN5VVViRWpTYytzbGRGSUQxejBUNDJ6bTdNWApHZGF6bUZaU0Y3REphL3NNU1lZTVhlR0NHMWdMTUhUb3g1NldiMHF0UzluR3dYaEkrNGFYUTFJWnpIS1EKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K
|
||||
# API server specific configuration options.
|
||||
apiServer:
|
||||
image: registry.k8s.io/kube-apiserver:v1.31.2 # The container image used in the API server manifest.
|
||||
# Extra certificate subject alternative names for the API server's certificate.
|
||||
certSANs:
|
||||
- cp00.balsillie.house
|
||||
disablePodSecurityPolicy: true # Disable PodSecurityPolicy in the API server and default manifests.
|
||||
# Configure the API server admission plugins.
|
||||
admissionControl:
|
||||
- name: PodSecurity # Name is the name of the admission controller.
|
||||
# Configuration is an embedded configuration object to be used as the plugin's
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
defaults:
|
||||
audit: restricted
|
||||
audit-version: latest
|
||||
enforce: baseline
|
||||
enforce-version: latest
|
||||
warn: restricted
|
||||
warn-version: latest
|
||||
exemptions:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- openebs
|
||||
- democratic-csi
|
||||
runtimeClasses: []
|
||||
usernames: []
|
||||
kind: PodSecurityConfiguration
|
||||
# Configure the API server audit policy.
|
||||
auditPolicy:
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Metadata
|
||||
# Controller manager server specific configuration options.
|
||||
controllerManager:
|
||||
image: registry.k8s.io/kube-controller-manager:v1.31.2 # The container image used in the controller manager manifest.
|
||||
# Kube-proxy server-specific configuration options
|
||||
proxy:
|
||||
disabled: false # Disable kube-proxy deployment on cluster bootstrap.
|
||||
image: registry.k8s.io/kube-proxy:v1.31.2 # The container image used in the kube-proxy manifest.
|
||||
mode: nftables # proxy mode of kube-proxy.
|
||||
# Extra arguments to supply to kube-proxy.
|
||||
extraArgs:
|
||||
proxy-mode: nftables
|
||||
# Scheduler server specific configuration options.
|
||||
scheduler:
|
||||
image: registry.k8s.io/kube-scheduler:v1.31.2 # The container image used in the scheduler manifest.
|
||||
# Configures cluster member discovery.
|
||||
discovery:
|
||||
enabled: true # Enable the cluster membership discovery feature.
|
||||
# Configure registries used for cluster member discovery.
|
||||
registries:
|
||||
# Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
|
||||
kubernetes:
|
||||
disabled: true # Disable Kubernetes discovery registry.
|
||||
# Service registry is using an external service to push and pull information about cluster members.
|
||||
service: {}
|
||||
# # External service endpoint.
|
||||
# endpoint: https://discovery.talos.dev/
|
||||
# Etcd specific configuration options.
|
||||
etcd:
|
||||
# The `ca` is the root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNPZ0F3SUJBZ0lRQWVTcWJmNXNDa2QrdHA0OXJpdktVVEFLQmdncWhrak9QUVFEQWpBUE1RMHcKQ3dZRFZRUUtFd1JsZEdOa01CNFhEVEkwTVRJd05ERTRNakl3TlZvWERUTTBNVEl3TWpFNE1qSXdOVm93RHpFTgpNQXNHQTFVRUNoTUVaWFJqWkRCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQkcrZXJpaCtQNGNvCjhqZTJqcjZRUDcrVW5xb1lPaTcxNkZaVXViNU5TVlFnTDlON0tPc3J2d3RTY0hPV0JYV1RjUTRQc2krV1lRbEYKRWRGTFFSMTR2NkdqWVRCZk1BNEdBMVVkRHdFQi93UUVBd0lDaERBZEJnTlZIU1VFRmpBVUJnZ3JCZ0VGQlFjRApBUVlJS3dZQkJRVUhBd0l3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVTU0rS251d1FHN2ZUCnE5c2VKQVhBdlBEaDNKd3dDZ1lJS29aSXpqMEVBd0lEU1FBd1JnSWhBSXdyQWlxd05sRy9ZNjludk52cmUrcW4KVDR1Zzl2WTZrLzF6K1RiTC85a01BaUVBMGxaK21obWVDTHlDU2hKUitUUjAwQUVRdkliQmxJUXRxVjMwSS9jdQoyN3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUY2cUdkQy9kc2wxRStYSnllVTBPazZrUTY4a3pWeERoQ0tHWFN4QjNMSDFvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFYjU2dUtINC9oeWp5TjdhT3ZwQS92NVNlcWhnNkx2WG9WbFM1dmsxSlZDQXYwM3NvNnl1LwpDMUp3YzVZRmRaTnhEZyt5TDVaaENVVVIwVXRCSFhpL29RPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
# # The container image used to create the etcd service.
|
||||
# image: gcr.io/etcd-development/etcd:v3.5.16
|
||||
|
||||
# # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
|
||||
# advertisedSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# A list of urls that point to additional manifests.
|
||||
extraManifests:
|
||||
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
|
||||
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
- https://raw.githubusercontent.com/kubernetes/ingress-nginx/refs/tags/controller-v1.11.3/deploy/static/provider/baremetal/deploy.yaml
|
||||
# A list of inline Kubernetes manifests.
|
||||
inlineManifests:
|
||||
- name: calico-installation # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
variant: Calico
|
||||
cni:
|
||||
type: Calico
|
||||
ipam:
|
||||
type: Calico
|
||||
serviceCIDRs:
|
||||
- 10.80.0.0/12
|
||||
calicoNetwork:
|
||||
bgp: Enabled
|
||||
linuxDataplane: Nftables
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 24
|
||||
cidr: 10.64.0.0/12
|
||||
encapsulation: None
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
||||
- name: calico-apiserver # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
- name: calico-bgpconfig # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
asNumber: 64624
|
||||
serviceClusterIPs:
|
||||
- cidr: 10.80.0.0/12
|
||||
- name: calico-bgppeer # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: router-balsillie-house
|
||||
spec:
|
||||
asNumber: 64625
|
||||
peerIP: 192.168.1.11:179
|
||||
allowSchedulingOnControlPlanes: true # Allows running workload on control-plane nodes.
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # Core DNS specific configuration options.
|
||||
# coreDNS:
|
||||
# image: registry.k8s.io/coredns/coredns:v1.11.3 # The `image` field is an override to the default coredns image.
|
||||
|
||||
# # External cloud provider configuration.
|
||||
# externalCloudProvider:
|
||||
# enabled: true # Enable external cloud provider.
|
||||
# # A list of urls that point to additional manifests for an external cloud provider.
|
||||
# manifests:
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
|
||||
|
||||
# # A map of key value pairs that will be added while fetching the extraManifests.
|
||||
# extraManifestHeaders:
|
||||
# Token: "1234567"
|
||||
# X-ExtraInfo: info
|
||||
|
||||
# # Settings for admin kubeconfig generation.
|
||||
# adminKubeconfig:
|
||||
# certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
|
7
talos/rendered/talosconfig
Normal file
7
talos/rendered/talosconfig
Normal file
@ -0,0 +1,7 @@
|
||||
context: cluster00
|
||||
contexts:
|
||||
cluster00:
|
||||
endpoints: []
|
||||
ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBc3QvY3BtNEliZnhzS3d3VGxHMFNtREFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qUXhNakEwTVRneU1qQTFXaGNOTXpReE1qQXlNVGd5TWpBMVdqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQU9aVVU3Vzh3OXcwR0l4cmVxVitNQ3JrUENmQS9keWdJMGtVCkJDQTkyTjhNbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRksvei83WWVqamt0VFAwcgppcFFNa2hxK3hNU1pNQVVHQXl0bGNBTkJBTDNJTDk4b3NkeDVPTGpQeEZFcXRTK0NOeWhPS2RFMGU5S3ZnQi9VCmpaT3VEMWE3Zmx3Q1grTVVrdk1qMnBEUER1eTN1Tko5Ym41ZFdPMldmdWFwdEFrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJLVENCM0tBREFnRUNBaEVBMytSdmZzZW5qSDk2cldNSUJsRFhQREFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qUXhNakE1TWpJd056TTBXaGNOTWpVeE1qQTVNakl3TnpNMFdqQVRNUkV3RHdZRApWUVFLRXdodmN6cGhaRzFwYmpBcU1BVUdBeXRsY0FNaEFNZXVSaHNsRHhRMVpHTmNRQmlldWVRQmhJNG8vNkFOClIxQW1tSm80MVlwZm8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0I0QXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0h3WURWUjBqQkJnd0ZvQVVyL1AvdGg2T09TMU0vU3VLbEF5U0dyN0V4Smt3QlFZREsyVndBMEVBaHVSRwp4VzJOSmlaMVJidG1RY0hVY3dWYW5BOUtETzV4aGZObHFpRXZRSWl3UmdlblJBRjFobkp2ejZ0d2lXT2tXZFF0CjNNTWprMDVVQ3BXV2g3NUJDdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJTVBtcWV2cnBuZm1hVGNMM3JhRU8rK0JDdGNkTFhoMmRMdy9ZSGVqY0xBSAotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
|
592
talos/rendered/worker.yaml
Normal file
592
talos/rendered/worker.yaml
Normal file
@ -0,0 +1,592 @@
|
||||
version: v1alpha1 # Indicates the schema used to decode the contents.
|
||||
debug: false # Enable verbose logging to the console.
|
||||
persist: true
|
||||
# Provides machine specific configuration options.
|
||||
machine:
|
||||
type: worker # Defines the role of the machine within the cluster.
|
||||
token: ubp3st.gmb0565erkwo722t # The `token` is used by a machine to join the PKI of the cluster.
|
||||
# The root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBc3QvY3BtNEliZnhzS3d3VGxHMFNtREFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qUXhNakEwTVRneU1qQTFXaGNOTXpReE1qQXlNVGd5TWpBMVdqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQU9aVVU3Vzh3OXcwR0l4cmVxVitNQ3JrUENmQS9keWdJMGtVCkJDQTkyTjhNbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRksvei83WWVqamt0VFAwcgppcFFNa2hxK3hNU1pNQVVHQXl0bGNBTkJBTDNJTDk4b3NkeDVPTGpQeEZFcXRTK0NOeWhPS2RFMGU5S3ZnQi9VCmpaT3VEMWE3Zmx3Q1grTVVrdk1qMnBEUER1eTN1Tko5Ym41ZFdPMldmdWFwdEFrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: ""
|
||||
# Extra certificate subject alternative names for the machine's certificate.
|
||||
certSANs: []
|
||||
# # Uncomment this to enable SANs.
|
||||
# - 10.0.0.10
|
||||
# - 172.16.0.10
|
||||
# - 192.168.0.10
|
||||
|
||||
# Used to provide additional options to the kubelet.
|
||||
kubelet:
|
||||
image: ghcr.io/siderolabs/kubelet:v1.31.2 # The `image` field is an optional reference to an alternative kubelet image.
|
||||
# The `extraArgs` field is used to provide additional flags to the kubelet.
|
||||
extraArgs:
|
||||
rotate-server-certificates: "true"
|
||||
# The `extraMounts` field is used to add additional mounts to the kubelet container.
|
||||
extraMounts:
|
||||
- destination: /var/local/openebs # Destination is the absolute path where the mount will be placed in the container.
|
||||
type: bind # Type specifies the mount kind.
|
||||
source: /var/local/openebs # Source specifies the source path of the mount.
|
||||
# Options are fstab style mount options.
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
|
||||
disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
|
||||
|
||||
# # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
|
||||
# clusterDNS:
|
||||
# - 10.96.0.10
|
||||
# - 169.254.2.53
|
||||
|
||||
# # The `extraConfig` field is used to provide kubelet configuration overrides.
|
||||
# extraConfig:
|
||||
# serverTLSBootstrap: true
|
||||
|
||||
# # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration.
|
||||
# credentialProviderConfig:
|
||||
# apiVersion: kubelet.config.k8s.io/v1
|
||||
# kind: CredentialProviderConfig
|
||||
# providers:
|
||||
# - apiVersion: credentialprovider.kubelet.k8s.io/v1
|
||||
# defaultCacheDuration: 12h
|
||||
# matchImages:
|
||||
# - '*.dkr.ecr.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.*.amazonaws.com.cn'
|
||||
# - '*.dkr.ecr-fips.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov'
|
||||
# - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov'
|
||||
# name: ecr-credential-provider
|
||||
|
||||
# # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
|
||||
# nodeIP:
|
||||
# # The `validSubnets` field configures the networks to pick kubelet node IP from.
|
||||
# validSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# - '!10.0.0.3/32'
|
||||
# - fdc7::/16
|
||||
# Provides machine specific network configuration options.
|
||||
network:
|
||||
hostname: node00.balsillie.house # Used to statically set the hostname for the machine.
|
||||
# `interfaces` is used to define the network interface configuration.
|
||||
interfaces:
|
||||
- # Picks a network device using the selector.
|
||||
deviceSelector:
|
||||
hardwareAddr: f4:4d:30:6e:62:a7 # Device hardware address, supports matching by wildcard.
|
||||
# Assigns static IP addresses to the interface.
|
||||
addresses:
|
||||
- 192.168.1.15/24
|
||||
# A list of routes associated with the interface.
|
||||
routes:
|
||||
- network: 0.0.0.0/0 # The route's network (destination).
|
||||
gateway: 192.168.1.11 # The route's gateway (if empty, creates link scope route).
|
||||
dhcp: false # Indicates if DHCP should be used to configure the interface.
|
||||
|
||||
# # The interface name.
|
||||
# interface: enp0s3
|
||||
|
||||
# # Bond specific options.
|
||||
# bond:
|
||||
# # The interfaces that make up the bond.
|
||||
# interfaces:
|
||||
# - enp2s0
|
||||
# - enp2s1
|
||||
# # Picks a network device using the selector.
|
||||
# deviceSelectors:
|
||||
# - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# - hardwareAddr: '*:f0:ab' # Device hardware address, supports matching by wildcard.
|
||||
# driver: virtio # Kernel driver, supports matching by wildcard.
|
||||
# mode: 802.3ad # A bond option.
|
||||
# lacpRate: fast # A bond option.
|
||||
|
||||
# # Bridge specific options.
|
||||
# bridge:
|
||||
# # The interfaces that make up the bridge.
|
||||
# interfaces:
|
||||
# - enxda4042ca9a51
|
||||
# - enxae2a6774c259
|
||||
# # A bridge option.
|
||||
# stp:
|
||||
# enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
|
||||
|
||||
# # DHCP specific options.
|
||||
# dhcpOptions:
|
||||
# routeMetric: 1024 # The priority of all routes received via DHCP.
|
||||
|
||||
# # Wireguard specific configuration.
|
||||
|
||||
# # wireguard server example
|
||||
# wireguard:
|
||||
# privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# listenPort: 51111 # Specifies a device's listening port.
|
||||
# # Specifies a list of peer configurations to apply to a device.
|
||||
# peers:
|
||||
# - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
|
||||
# # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# allowedIPs:
|
||||
# - 192.168.1.0/24
|
||||
# # wireguard peer example
|
||||
# wireguard:
|
||||
# privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # Specifies a list of peer configurations to apply to a device.
|
||||
# peers:
|
||||
# - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
|
||||
# persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
|
||||
# # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# allowedIPs:
|
||||
# - 192.168.1.0/24
|
||||
|
||||
# # Virtual (shared) IP address configuration.
|
||||
|
||||
# # layer2 vip example
|
||||
# vip:
|
||||
# ip: 172.16.199.55 # Specifies the IP address to be used.
|
||||
# Used to statically set the nameservers for the machine.
|
||||
nameservers:
|
||||
- 192.168.1.11
|
||||
|
||||
# # Allows for extra entries to be added to the `/etc/hosts` file
|
||||
# extraHostEntries:
|
||||
# - ip: 192.168.1.100 # The IP of the host.
|
||||
# # The host alias.
|
||||
# aliases:
|
||||
# - example
|
||||
# - example.domain.tld
|
||||
|
||||
# # Configures KubeSpan feature.
|
||||
# kubespan:
|
||||
# enabled: true # Enable the KubeSpan feature.
|
||||
# Used to provide instructions for installations.
|
||||
install:
|
||||
disk: /dev/sda # The disk used for installations.
|
||||
# Look up disk using disk attributes like model, size, serial and others.
|
||||
diskSelector:
|
||||
type: ssd # Disk Type.
|
||||
|
||||
# # Disk size.
|
||||
|
||||
# # Select a disk which size is equal to 4GB.
|
||||
# size: 4GB
|
||||
# # Select a disk which size is greater than 1TB.
|
||||
# size: '> 1TB'
|
||||
# # Select a disk which size is less or equal than 2TB.
|
||||
# size: <= 2TB
|
||||
|
||||
# # Disk bus path.
|
||||
# busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0
|
||||
# busPath: /pci0000:00/*
|
||||
image: ghcr.io/siderolabs/installer:v1.8.3 # Allows for supplying the image used to perform the installation.
|
||||
wipe: true # Indicates if the installation disk should be wiped at installation time.
|
||||
legacyBIOSSupport: false # Indicates if MBR partition should be marked as bootable (active).
|
||||
|
||||
# # Allows for supplying extra kernel args via the bootloader.
|
||||
# extraKernelArgs:
|
||||
# - talos.platform=metal
|
||||
# - reboot=k
|
||||
|
||||
# # Allows for supplying additional system extension images to install on top of base Talos image.
|
||||
# extensions:
|
||||
# - image: ghcr.io/siderolabs/gvisor:20220117.0-v1.0.0 # System extension image.
|
||||
# Used to configure the machine's time settings.
|
||||
time:
|
||||
disabled: false # Indicates if the time service is disabled for the machine.
|
||||
# description: |
|
||||
servers:
|
||||
- 192.168.1.11
|
||||
# Used to configure the machine's sysctls.
|
||||
sysctls:
|
||||
vm.nr_hugepages: "1024"
|
||||
# Used to configure the machine's container image registry mirrors.
|
||||
registries: {}
|
||||
# # Specifies mirror configuration for each registry host namespace.
|
||||
# mirrors:
|
||||
# ghcr.io:
|
||||
# # List of endpoints (URLs) for registry mirrors to use.
|
||||
# endpoints:
|
||||
# - https://registry.insecure
|
||||
# - https://ghcr.io/v2/
|
||||
|
||||
# # Specifies TLS & auth configuration for HTTPS image registries.
|
||||
# config:
|
||||
# registry.insecure:
|
||||
# # The TLS configuration for the registry.
|
||||
# tls:
|
||||
# insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
|
||||
#
|
||||
# # # Enable mutual TLS authentication with the registry.
|
||||
# # clientIdentity:
|
||||
# # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
#
|
||||
# # # The auth configuration for this registry.
|
||||
# # auth:
|
||||
# # username: username # Optional registry authentication.
|
||||
# # password: password # Optional registry authentication.
|
||||
|
||||
# Features describe individual Talos features that can be switched on or off.
|
||||
features:
|
||||
rbac: true # Enable role-based access control (RBAC).
|
||||
stableHostname: true # Enable stable default hostname.
|
||||
apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
|
||||
diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
|
||||
# KubePrism - local proxy/load balancer on defined port that will distribute
|
||||
kubePrism:
|
||||
enabled: false # Enable KubePrism support - will start local load balancing proxy.
|
||||
port: 7445 # KubePrism port.
|
||||
# Configures host DNS caching resolver.
|
||||
hostDNS:
|
||||
enabled: true # Enable host DNS caching resolver.
|
||||
forwardKubeDNSToHost: false # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods.
|
||||
|
||||
# # Configure Talos API access from Kubernetes pods.
|
||||
# kubernetesTalosAPIAccess:
|
||||
# enabled: true # Enable Talos API access from Kubernetes pods.
|
||||
# # The list of Talos API roles which can be granted for access from Kubernetes pods.
|
||||
# allowedRoles:
|
||||
# - os:reader
|
||||
# # The list of Kubernetes namespaces Talos API access is available from.
|
||||
# allowedKubernetesNamespaces:
|
||||
# - kube-system
|
||||
# Configures the node labels for the machine.
|
||||
nodeLabels:
|
||||
openebs.io/engine: mayastor
|
||||
|
||||
# # Provides machine specific control plane configuration options.
|
||||
|
||||
# # ControlPlane definition example.
|
||||
# controlPlane:
|
||||
# # Controller manager machine specific configuration options.
|
||||
# controllerManager:
|
||||
# disabled: false # Disable kube-controller-manager on the node.
|
||||
# # Scheduler machine specific configuration options.
|
||||
# scheduler:
|
||||
# disabled: true # Disable kube-scheduler on the node.
|
||||
|
||||
# # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
|
||||
|
||||
# # nginx static pod.
|
||||
# pods:
|
||||
# - apiVersion: v1
|
||||
# kind: pod
|
||||
# metadata:
|
||||
# name: nginx
|
||||
# spec:
|
||||
# containers:
|
||||
# - image: nginx
|
||||
# name: nginx
|
||||
|
||||
# # Used to partition, format and mount additional disks.
|
||||
|
||||
# # MachineDisks list example.
|
||||
# disks:
|
||||
# - device: /dev/sdb # The name of the disk to use.
|
||||
# # A list of partitions to create on the disk.
|
||||
# partitions:
|
||||
# - mountpoint: /var/mnt/extra # Where to mount the partition.
|
||||
#
|
||||
# # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk.
|
||||
|
||||
# # # Human readable representation.
|
||||
# # size: 100 MB
|
||||
# # # Precise value in bytes.
|
||||
# # size: 1073741824
|
||||
|
||||
# # Allows the addition of user specified files.
|
||||
|
||||
# # MachineFiles usage example.
|
||||
# files:
|
||||
# - content: '...' # The contents of the file.
|
||||
# permissions: 0o666 # The file's permissions in octal.
|
||||
# path: /tmp/file.txt # The path of the file.
|
||||
# op: append # The operation to use
|
||||
|
||||
# # The `env` field allows for the addition of environment variables.
|
||||
|
||||
# # Environment variables definition examples.
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: info
|
||||
# GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
|
||||
# https_proxy: http://SERVER:PORT/
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: error
|
||||
# https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
|
||||
# env:
|
||||
# https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
|
||||
|
||||
# # Used to configure the machine's sysfs.
|
||||
|
||||
# # MachineSysfs usage example.
|
||||
# sysfs:
|
||||
# devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
|
||||
|
||||
# # Machine system disk encryption configuration.
|
||||
# systemDiskEncryption:
|
||||
# # Ephemeral partition encryption.
|
||||
# ephemeral:
|
||||
# provider: luks2 # Encryption provider to use for the encryption.
|
||||
# # Defines the encryption keys generation and storage method.
|
||||
# keys:
|
||||
# - # Deterministically generated key from the node UUID and PartitionLabel.
|
||||
# nodeID: {}
|
||||
# slot: 0 # Key slot number for LUKS2 encryption.
|
||||
#
|
||||
# # # KMS managed encryption key.
|
||||
# # kms:
|
||||
# # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
|
||||
#
|
||||
# # # Cipher kind to use for the encryption. Depends on the encryption provider.
|
||||
# # cipher: aes-xts-plain64
|
||||
|
||||
# # # Defines the encryption sector size.
|
||||
# # blockSize: 4096
|
||||
|
||||
# # # Additional --perf parameters for the LUKS2 encryption.
|
||||
# # options:
|
||||
# # - no_read_workqueue
|
||||
# # - no_write_workqueue
|
||||
|
||||
# # Configures the udev system.
|
||||
# udev:
|
||||
# # List of udev rules to apply to the udev system
|
||||
# rules:
|
||||
# - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
|
||||
|
||||
# # Configures the logging system.
|
||||
# logging:
|
||||
# # Logging destination.
|
||||
# destinations:
|
||||
# - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
|
||||
# format: json_lines # Logs format.
|
||||
|
||||
# # Configures the kernel.
|
||||
# kernel:
|
||||
# # Kernel modules to load.
|
||||
# modules:
|
||||
# - name: brtfs # Module name.
|
||||
|
||||
# # Configures the seccomp profiles for the machine.
|
||||
# seccompProfiles:
|
||||
# - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
|
||||
# # The `value` field is used to provide the seccomp profile.
|
||||
# value:
|
||||
# defaultAction: SCMP_ACT_LOG
|
||||
|
||||
# # Configures the node annotations for the machine.
|
||||
|
||||
# # node annotations example.
|
||||
# nodeAnnotations:
|
||||
# customer.io/rack: r13a25
|
||||
|
||||
# # Configures the node taints for the machine. Effect is optional.
|
||||
|
||||
# # node taints example.
|
||||
# nodeTaints:
|
||||
# exampleTaint: exampleTaintValue:NoSchedule
|
||||
# Provides cluster specific configuration options.
|
||||
cluster:
|
||||
id: OmdWk7fWVxSMf_1pjy_vG3LD_LpzBcJJ4gfyg7Du-1A= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
|
||||
secret: XVz/kRfKSE9ID7nb2QLW+DafhGHaLj+cXs9DlADVUQc= # Shared secret of cluster (base64 encoded random 32 bytes).
|
||||
# Provides control plane specific configuration options.
|
||||
controlPlane:
|
||||
endpoint: https://cp00.balsillie.house:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
|
||||
localAPIServerPort: 6443 # The port that the API server listens on internally.
|
||||
clusterName: cluster00.balsillie.house # Configures the cluster's name.
|
||||
# Provides cluster specific network configuration options.
|
||||
network:
|
||||
# The CNI used.
|
||||
cni:
|
||||
name: custom # Name of CNI to use.
|
||||
# URLs containing manifests to apply for the CNI.
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
|
||||
dnsDomain: cluster00.balsillie.house # The domain used by Kubernetes DNS.
|
||||
# The pod subnet CIDR.
|
||||
podSubnets:
|
||||
- 10.64.0.0/12
|
||||
# The service subnet CIDR.
|
||||
serviceSubnets:
|
||||
- 10.80.0.0/12
|
||||
token: cpn9u3.wyqt1zpotvuczv27 # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
|
||||
# The base64 encoded root certificate authority used by Kubernetes.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpekNDQVRDZ0F3SUJBZ0lSQVBoRnFMK05xZ3JWSnprWkJUOUliSUl3Q2dZSUtvWkl6ajBFQXdJd0ZURVQKTUJFR0ExVUVDaE1LYTNWaVpYSnVaWFJsY3pBZUZ3MHlOREV5TURReE9ESXlNRFZhRncwek5ERXlNREl4T0RJeQpNRFZhTUJVeEV6QVJCZ05WQkFvVENtdDFZbVZ5Ym1WMFpYTXdXVEFUQmdjcWhrak9QUUlCQmdncWhrak9QUU1CCkJ3TkNBQVFMbS8vMjJTVWp5elFFMVhzSEY3dGpWR1plME9UQnNTWUE1VjdxSkFEaWlNZEhRL1pnZTlpMGY4SzkKbzI2UmFxazBXaXdaMmVxUWo2bzhOMWN6Mmdwd28yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXdIUVlEVlIwbApCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPCkJCWUVGQkgvbkVibWgwWFg4aGhaSWI4L0JLRjZYMUN6TUFvR0NDcUdTTTQ5QkFNQ0Ewa0FNRVlDSVFEcGdrQkoKN2wzMlpjQmZXYlNzMEd3UU1FSWtjQjBlSXhOMDVtbjZVYlFHUEFJaEFJZlY1MG43Qi9nT1dtYjFVSExPNUMwTgpwaTFwS0lGU0p3aWFwYkxFeGYzOAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: ""
|
||||
# API server specific configuration options.
|
||||
apiServer:
|
||||
# Configure the API server admission plugins.
|
||||
admissionControl:
|
||||
- name: PodSecurity # Name is the name of the admission controller.
|
||||
# Configuration is an embedded configuration object to be used as the plugin's
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
exemptions:
|
||||
namespaces:
|
||||
- openebs
|
||||
- democratic-csi
|
||||
kind: PodSecurityConfiguration
|
||||
|
||||
# # The container image used in the API server manifest.
|
||||
# image: registry.k8s.io/kube-apiserver:v1.31.2
|
||||
|
||||
# # Configure the API server audit policy.
|
||||
# auditPolicy:
|
||||
# apiVersion: audit.k8s.io/v1
|
||||
# kind: Policy
|
||||
# rules:
|
||||
# - level: Metadata
|
||||
# Kube-proxy server-specific configuration options
|
||||
proxy:
|
||||
disabled: false # Disable kube-proxy deployment on cluster bootstrap.
|
||||
mode: nftables # proxy mode of kube-proxy.
|
||||
# Extra arguments to supply to kube-proxy.
|
||||
extraArgs:
|
||||
proxy-mode: nftables
|
||||
|
||||
# # The container image used in the kube-proxy manifest.
|
||||
# image: registry.k8s.io/kube-proxy:v1.31.2
|
||||
# Configures cluster member discovery.
|
||||
discovery:
|
||||
enabled: true # Enable the cluster membership discovery feature.
|
||||
# Configure registries used for cluster member discovery.
|
||||
registries:
|
||||
# Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
|
||||
kubernetes:
|
||||
disabled: true # Disable Kubernetes discovery registry.
|
||||
# Service registry is using an external service to push and pull information about cluster members.
|
||||
service: {}
|
||||
# # External service endpoint.
|
||||
# endpoint: https://discovery.talos.dev/
|
||||
# A list of urls that point to additional manifests.
|
||||
extraManifests:
|
||||
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
|
||||
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
- https://raw.githubusercontent.com/kubernetes/ingress-nginx/refs/tags/controller-v1.11.3/deploy/static/provider/baremetal/deploy.yaml
|
||||
# A list of inline Kubernetes manifests.
|
||||
inlineManifests:
|
||||
- name: calico-installation # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
variant: Calico
|
||||
cni:
|
||||
type: Calico
|
||||
ipam:
|
||||
type: Calico
|
||||
serviceCIDRs:
|
||||
- 10.80.0.0/12
|
||||
calicoNetwork:
|
||||
bgp: Enabled
|
||||
linuxDataplane: Nftables
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 24
|
||||
cidr: 10.64.0.0/12
|
||||
encapsulation: None
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
||||
- name: calico-apiserver # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
- name: calico-bgpconfig # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
asNumber: 64624
|
||||
serviceClusterIPs:
|
||||
- cidr: 10.80.0.0/12
|
||||
- name: calico-bgppeer # Name of the manifest.
|
||||
contents: | # Manifest contents as a string.
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: router-balsillie-house
|
||||
spec:
|
||||
asNumber: 64625
|
||||
peerIP: 192.168.1.11:179
|
||||
allowSchedulingOnControlPlanes: true # Allows running workload on control-plane nodes.
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# secretboxEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
|
||||
|
||||
# # AggregatorCA example.
|
||||
# aggregatorCA:
|
||||
# crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
|
||||
# # The base64 encoded private key for service account token generation.
|
||||
|
||||
# # AggregatorCA example.
|
||||
# serviceAccount:
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
|
||||
# # Controller manager server specific configuration options.
|
||||
# controllerManager:
|
||||
# image: registry.k8s.io/kube-controller-manager:v1.31.2 # The container image used in the controller manager manifest.
|
||||
# # Extra arguments to supply to the controller manager.
|
||||
# extraArgs:
|
||||
# feature-gates: ServerSideApply=true
|
||||
|
||||
# # Scheduler server specific configuration options.
|
||||
# scheduler:
|
||||
# image: registry.k8s.io/kube-scheduler:v1.31.2 # The container image used in the scheduler manifest.
|
||||
# # Extra arguments to supply to the scheduler.
|
||||
# extraArgs:
|
||||
# feature-gates: AllBeta=true
|
||||
|
||||
# # Etcd specific configuration options.
|
||||
# etcd:
|
||||
# image: gcr.io/etcd-development/etcd:v3.5.16 # The container image used to create the etcd service.
|
||||
# # The `ca` is the root certificate authority of the PKI.
|
||||
# ca:
|
||||
# crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
# # Extra arguments to supply to etcd.
|
||||
# extraArgs:
|
||||
# election-timeout: "5000"
|
||||
# # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
|
||||
# advertisedSubnets:
|
||||
# - 10.0.0.0/8
|
||||
|
||||
# # Core DNS specific configuration options.
|
||||
# coreDNS:
|
||||
# image: registry.k8s.io/coredns/coredns:v1.11.3 # The `image` field is an override to the default coredns image.
|
||||
|
||||
# # External cloud provider configuration.
|
||||
# externalCloudProvider:
|
||||
# enabled: true # Enable external cloud provider.
|
||||
# # A list of urls that point to additional manifests for an external cloud provider.
|
||||
# manifests:
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
|
||||
|
||||
# # A map of key value pairs that will be added while fetching the extraManifests.
|
||||
# extraManifestHeaders:
|
||||
# Token: "1234567"
|
||||
# X-ExtraInfo: info
|
||||
|
||||
# # Settings for admin kubeconfig generation.
|
||||
# adminKubeconfig:
|
||||
# certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
|
136
talos/talos-patch.yaml
Normal file
136
talos/talos-patch.yaml
Normal file
@ -0,0 +1,136 @@
|
||||
---
|
||||
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controlPlane:
|
||||
endpoint: https://cp00.balsillie.house:6443
|
||||
localAPIServerPort: 6443
|
||||
clusterName: cluster00.balsillie.house
|
||||
extraManifests:
|
||||
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/main/deploy/standalone-install.yaml
|
||||
- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
# - https://github.com/kubernetes-csi/csi-driver-nfs/blob/v4.9.0/deploy/example/pv-nfs-csi.yaml # TODO follow link and add individual manifests
|
||||
# - https://raw.githubusercontent.com/kubernetes/ingress-nginx/refs/tags/controller-v1.11.3/deploy/static/provider/baremetal/deploy.yaml
|
||||
inlineManifests:
|
||||
- name: calico-installation
|
||||
contents: |
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
variant: Calico
|
||||
cni:
|
||||
type: Calico
|
||||
ipam:
|
||||
type: Calico
|
||||
serviceCIDRs:
|
||||
- 10.80.0.0/12
|
||||
calicoNetwork:
|
||||
bgp: Enabled
|
||||
linuxDataplane: Nftables
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- name: default-ipv4-ippool
|
||||
blockSize: 24
|
||||
cidr: 10.64.0.0/12
|
||||
encapsulation: None
|
||||
natOutgoing: Disabled
|
||||
nodeSelector: all()
|
||||
- name: calico-apiserver
|
||||
contents: |
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
- name: calico-bgpconfig
|
||||
contents: |
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
asNumber: 64624
|
||||
serviceClusterIPs:
|
||||
- cidr: 10.80.0.0/12
|
||||
serviceExternalIPs:
|
||||
- cidr: 10.96.20.0/24
|
||||
- name: calico-bgppeer
|
||||
contents: |
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: router-balsillie-house
|
||||
spec:
|
||||
asNumber: 64625
|
||||
peerIP: 192.168.1.11:179
|
||||
network:
|
||||
cni:
|
||||
name: custom
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
|
||||
dnsDomain: cluster00.balsillie.house
|
||||
podSubnets:
|
||||
- 10.64.0.0/12
|
||||
serviceSubnets:
|
||||
- 10.80.0.0/12
|
||||
proxy:
|
||||
mode: nftables
|
||||
disabled: false
|
||||
extraArgs:
|
||||
proxy-mode: nftables
|
||||
machine:
|
||||
disks:
|
||||
- device: /dev/nvme0n1
|
||||
# partitions:
|
||||
# - mountpoint: /var/mnt/storage
|
||||
features:
|
||||
hostDNS:
|
||||
enabled: true
|
||||
forwardKubeDNSToHost: false
|
||||
install:
|
||||
wipe: true
|
||||
legacyBIOSSupport: false
|
||||
diskSelector:
|
||||
type: ssd
|
||||
kubelet:
|
||||
extraArgs:
|
||||
rotate-server-certificates: true
|
||||
extraMounts:
|
||||
- destination: /var/local/openebs
|
||||
type: bind
|
||||
source: /var/local/openebs
|
||||
options:
|
||||
- rbind
|
||||
- rshared
|
||||
- rw
|
||||
# - destination: /var/dev/nvme0n1
|
||||
# type: bind
|
||||
# source: /dev/nvme0n1
|
||||
# options:
|
||||
# - bind
|
||||
# - rshared
|
||||
# - rw
|
||||
network:
|
||||
hostname: node00.balsillie.house
|
||||
nameservers:
|
||||
- 192.168.1.11
|
||||
interfaces:
|
||||
- deviceSelector:
|
||||
hardwareAddr: 'f4:4d:30:6e:62:a7'
|
||||
dhcp: false
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: 192.168.1.11
|
||||
addresses:
|
||||
- 192.168.1.15/24
|
||||
nodeLabels:
|
||||
openebs.io/engine: mayastor
|
||||
sysctls:
|
||||
vm.nr_hugepages: "1024"
|
||||
time:
|
||||
disabled: false
|
||||
servers:
|
||||
- 192.168.1.11
|
||||
|
9
talos/volume-config.yaml
Normal file
9
talos/volume-config.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: v1alpha1
|
||||
kind: VolumeConfig
|
||||
name: mayastor # Name of the volume.
|
||||
# The provisioning describes how the volume is provisioned.
|
||||
provisioning:
|
||||
# The disk selector expression.
|
||||
diskSelector:
|
||||
match: disk.transport == "nvme" # The Common Expression Language (CEL) expression to match the disk.
|
||||
maxSize: 501GiB
|
128
terraform/aws/kms/main.tf
Normal file
128
terraform/aws/kms/main.tf
Normal file
@ -0,0 +1,128 @@
|
||||
terraform {
|
||||
required_version = ">= 1.8.7"
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 5.82.2"
|
||||
}
|
||||
}
|
||||
backend "local" {
|
||||
# path = pathexpand("~/Backups/tfstate/cloudflare.tfstate")
|
||||
}
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
resource "aws_iam_user" "vault_user" {
|
||||
name = "vault-unseal-user"
|
||||
}
|
||||
|
||||
resource "aws_iam_user" "sops_user" {
|
||||
name = "sops-user"
|
||||
}
|
||||
|
||||
resource "aws_iam_access_key" "vault_user_key" {
|
||||
user = aws_iam_user.vault_user.name
|
||||
}
|
||||
|
||||
resource "aws_iam_access_key" "sops_user_key" {
|
||||
user = aws_iam_user.sops_user.name
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "vault" {
|
||||
description = "Hashicorp Vault auto unseal key"
|
||||
key_usage = "ENCRYPT_DECRYPT"
|
||||
customer_master_key_spec = "SYMMETRIC_DEFAULT"
|
||||
deletion_window_in_days = 30
|
||||
is_enabled = true
|
||||
multi_region = false
|
||||
enable_key_rotation = false
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "sops" {
|
||||
description = "SOPS operational key"
|
||||
key_usage = "ENCRYPT_DECRYPT"
|
||||
customer_master_key_spec = "SYMMETRIC_DEFAULT"
|
||||
deletion_window_in_days = 30
|
||||
is_enabled = true
|
||||
multi_region = false
|
||||
enable_key_rotation = false
|
||||
}
|
||||
|
||||
resource "aws_kms_alias" "vault" {
|
||||
name = "alias/hashicorp-vault-unseal"
|
||||
target_key_id = aws_kms_key.vault.key_id
|
||||
}
|
||||
resource "aws_kms_alias" "sops" {
|
||||
name = "alias/sops"
|
||||
target_key_id = aws_kms_key.vault.key_id
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "vault_policy" {
|
||||
name = "vault-unseal-policy"
|
||||
user = aws_iam_user.vault_user.name
|
||||
policy = jsonencode(
|
||||
{
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Action = [
|
||||
"kms:Decrypt",
|
||||
"kms:DescribeKey",
|
||||
"kms:Encrypt"
|
||||
]
|
||||
Resource = aws_kms_key.vault.arn
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "sops_policy" {
|
||||
name = "sops-policy"
|
||||
user = aws_iam_user.sops_user.name
|
||||
policy = jsonencode(
|
||||
{
|
||||
Version = "2012-10-17"
|
||||
Statement = [
|
||||
{
|
||||
Effect = "Allow"
|
||||
Action = [
|
||||
"kms:Decrypt",
|
||||
"kms:DescribeKey",
|
||||
"kms:Encrypt"
|
||||
]
|
||||
Resource = aws_kms_key.sops.arn
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
output "vault_access_key_id" {
|
||||
value = aws_iam_access_key.vault_user_key.id
|
||||
}
|
||||
|
||||
output "vault_secret_access_key" {
|
||||
value = nonsensitive(aws_iam_access_key.vault_user_key.secret)
|
||||
}
|
||||
|
||||
output "vault_kms_key_id" {
|
||||
value = aws_kms_key.vault.key_id
|
||||
}
|
||||
|
||||
output "sops_access_key_id" {
|
||||
value = aws_iam_access_key.sops_user_key.id
|
||||
}
|
||||
|
||||
output "sops_secret_access_key" {
|
||||
value = nonsensitive(aws_iam_access_key.sops_user_key.secret)
|
||||
}
|
||||
|
||||
output "sops_kms_key_id" {
|
||||
value = aws_kms_key.sops.key_id
|
||||
}
|
||||
|
86
terraform/cloudflare/main.tf
Normal file
86
terraform/cloudflare/main.tf
Normal file
@ -0,0 +1,86 @@
|
||||
# terraform file to create dns resource in cloudflare
|
||||
terraform {
|
||||
required_version = ">= 1.8.0"
|
||||
required_providers {
|
||||
cloudflare = {
|
||||
source = "cloudflare/cloudflare"
|
||||
version = "~> 4.44"
|
||||
}
|
||||
}
|
||||
backend "s3" {
|
||||
endpoints = {
|
||||
s3 = "https://s3.balsillie.house:9000"
|
||||
}
|
||||
region = "home"
|
||||
bucket = "terraform"
|
||||
use_path_style = true
|
||||
key = "cloudflare/terraform.tfstate"
|
||||
shared_credentials_files = ["~/.aws/credentials"]
|
||||
profile = "terraform"
|
||||
skip_credentials_validation = true
|
||||
skip_region_validation = true
|
||||
skip_requesting_account_id = true
|
||||
}
|
||||
}
|
||||
|
||||
provider "cloudflare" {
|
||||
api_token = var.api_token
|
||||
}
|
||||
|
||||
data "cloudflare_accounts" "default" {}
|
||||
|
||||
locals {
|
||||
dns_records = {
|
||||
for index, record in distinct(var.dns_records) : # 'distint' removes duplicate values from a list
|
||||
tostring(index) => record
|
||||
}
|
||||
mx_records = {
|
||||
for index, record in distinct(var.mx_records) : # 'distint' removes duplicate values from a list
|
||||
tostring(index) => record
|
||||
}
|
||||
}
|
||||
|
||||
import {
|
||||
to = cloudflare_zone.balsillie_net
|
||||
id = var.zone_id
|
||||
}
|
||||
|
||||
import {
|
||||
to = cloudflare_zone_dnssec.balsillie_net
|
||||
id = var.zone_id
|
||||
}
|
||||
|
||||
resource "cloudflare_zone" "balsillie_net" {
|
||||
account_id = data.cloudflare_accounts.default.accounts[0].id
|
||||
zone = "balsillie.net"
|
||||
paused = false
|
||||
plan = "free"
|
||||
type = "full"
|
||||
}
|
||||
|
||||
resource "cloudflare_zone_dnssec" "balsillie_net" {
|
||||
zone_id = cloudflare_zone.balsillie_net.id
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "dns_records" {
|
||||
for_each = local.dns_records
|
||||
zone_id = cloudflare_zone.balsillie_net.id
|
||||
proxied = false
|
||||
name = each.value.name
|
||||
type = each.value.type
|
||||
content = each.value.content
|
||||
ttl = each.value.ttl
|
||||
}
|
||||
|
||||
resource "cloudflare_record" "mx_records" {
|
||||
for_each = local.mx_records
|
||||
zone_id = cloudflare_zone.balsillie_net.id
|
||||
proxied = false
|
||||
name = each.value.name
|
||||
type = each.value.type
|
||||
content = each.value.content
|
||||
priority = each.value.priority
|
||||
ttl = each.value.ttl
|
||||
}
|
||||
|
||||
# TODO update the SOA record when dns_records resource was changed
|
4
terraform/cloudflare/secrets.auto.example
Normal file
4
terraform/cloudflare/secrets.auto.example
Normal file
@ -0,0 +1,4 @@
|
||||
# Rename this file to "secrets.auto.tfvars"
|
||||
|
||||
api_token = "CHANGE ME"
|
||||
zone_id = "CHANGE ME"
|
48
terraform/cloudflare/variable_definitions.tf
Normal file
48
terraform/cloudflare/variable_definitions.tf
Normal file
@ -0,0 +1,48 @@
|
||||
variable "api_token" {
|
||||
description = "Cloudflare account API token"
|
||||
type = string
|
||||
default = ""
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "zone_id" {
|
||||
description = "The DNS zone id as siplayed in cloudflare dashboard."
|
||||
type = string
|
||||
default = ""
|
||||
sensitive = false
|
||||
}
|
||||
|
||||
variable "dns_records" {
|
||||
description = "DNS A records to create"
|
||||
type = list(object({
|
||||
name = string
|
||||
type = string
|
||||
content = string
|
||||
ttl = number
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "mx_records" {
|
||||
description = "DNS MX records to create"
|
||||
type = list(object({
|
||||
name = string
|
||||
type = string
|
||||
content = string
|
||||
priority = number
|
||||
ttl = number
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "caa_records" {
|
||||
description = "DNS CAA records to create"
|
||||
type = list(object({
|
||||
name = string
|
||||
type = string
|
||||
content = string
|
||||
# priority = number
|
||||
ttl = number
|
||||
}))
|
||||
default = []
|
||||
}
|
38
terraform/cloudflare/variables.auto.tfvars
Normal file
38
terraform/cloudflare/variables.auto.tfvars
Normal file
@ -0,0 +1,38 @@
|
||||
zone_id = "affce43bd72967adbe9ac9cc32c4532b"
|
||||
|
||||
dns_records = [
|
||||
{ name = "@", type = "TXT", content = "\"v=spf1 +a:wan.balsillie.house -all\"", ttl = 60 },
|
||||
{ name = "@", type = "TXT", content = "\"openpgp4fpr:2362b71cc210e435244d63dae81ed7810d966cd4\"", ttl = 60 },
|
||||
{ name = "_dmarc", type = "TXT", content = "\"v=DMARC1; p=reject; rua=mailto:postmaster@balsillie.net; ruf=mailto:postmaster@balsillie.net; sp=reject; fo=1; aspf=s; adkim=s; ri=259200\"", ttl = 60 },
|
||||
{ name = "_mta-sts", type = "TXT", content = "\"v=STSv1; id=1734552187\"", ttl = 60 },
|
||||
{ name = "_smtp._tls", type = "TXT", content = "\"v=TLSRPTv1; rua=mailto:postmaster@balsillie.net\"", ttl = 60 },
|
||||
{ name = "mail._domainkey", type = "TXT", content = "\"v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC+URc62p2hXgTgt+1NEo8tCm1SWYPXlnsO9vQdz3SqM6SUXyV/nuLzHQBriJwEnL7sXlmMvfu7JkY4wx/q4nZUVqJ6P8tV4qqRTlPYf9EOtzdPetvz24NVcI8Jh1qo06K/JXTPwGssSDnacfC6B14Q06JPC+1Kx28pOu8XLZSJpwIDAQAB\"", ttl = 60 },
|
||||
{ name = "@", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "account", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "auth", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "auth-admin", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "autoconfig", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "cloud", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "code", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "im", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "imap", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "matrix", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "matrix-auth", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "matrix-federation", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "mta-sts", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "notify", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "office", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "openpgpkey", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "smtp", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "social", type = "CNAME", content = "wan.balsillie.house", ttl = 60 },
|
||||
{ name = "gallery", type = "CNAME", content = "wan.balsillie.house", ttl = 60 }
|
||||
]
|
||||
|
||||
mx_records = [
|
||||
{ name = "@", type = "MX", content = "smtp.balsillie.net.", priority = 0, ttl = 60 }
|
||||
]
|
||||
|
||||
caa_records = [
|
||||
{ name = "@", type = "CAA", content = "0 issuewild ';'", ttl = 60 },
|
||||
{ name = "@", type = "CAA", content = "0 issue \"letsencrypt.org\"", ttl = 60 }
|
||||
]
|
21
todo/aur_repo.todo
Normal file
21
todo/aur_repo.todo
Normal file
@ -0,0 +1,21 @@
|
||||
✔ add aur-builder sudoers entry @done(24-04-23 15:22)
|
||||
|
||||
aur-builder ALL = (root) NOPASSWD: /usr/bin/pacman, /usr/bin/pacsync
|
||||
|
||||
✔ add aur sync command @done(24-04-23 14:09)
|
||||
|
||||
sudo -u aur-builder aur sync --no-view -CnrS sonarr
|
||||
|
||||
✔ enable aur sync service/timer @done(24-04-23 15:22)
|
||||
|
||||
|
||||
aur sync switch break down:
|
||||
--no-view Do not present build files for inspection
|
||||
--upgrades -u Update all obsolete packages
|
||||
--no-confirm -n Do not wait for unser input when installing or removing build dependencies
|
||||
--clean -C Clean up build files after building
|
||||
--rm-deps -r Remove dependencies installed by makepkg
|
||||
--sign -S Sign built packages
|
||||
--database -d Use the specified repository, per it's name in pacman.conf
|
||||
|
||||
aur sync --no-view --upgrades --no-confirm --clean --rm-deps --sign --database home
|
1
todo/torrent.todo
Normal file
1
todo/torrent.todo
Normal file
@ -0,0 +1 @@
|
||||
☐ Fix nginx reverse proxy config
|
Reference in New Issue
Block a user