1
0

zfs node work in prep for open-ebs zfs

This commit is contained in:
Michael Balsillie 2023-01-10 02:19:24 +10:00
parent 5eb52e7adb
commit 14a126afa0
9 changed files with 248 additions and 1 deletions

View File

@ -5,4 +5,34 @@
ansible_connection: ssh
ansible_become_method: sudo
ansible_become_user: root
ansible_port: 22
ansible_port: 22
zfs_packages:
- zfs-utils
- zfs-dkms
- mbuffer
- smartmontools
- linux-lts-headers
zfs_key_id: DDF7DB817396A49B2A2723F7403BD972F75D9D76
zfs_pools:
- name: ssd
ashift: 13
type: ""
disks: /dev/vde
datasets:
- name: ssd
encrypt: false
- name: ssd/data
encrypt: false
- name: ssd/data/open-ebs
encrypt: false
# - name: hdd
# ashift: 12
# type: mirror
# disks: /dev/sda /dev/sdb
# datasets:
# - name: hdd
# encrypt: false
# - name: hdd/data
# encrypt: true
# - name: hdd/data/open-ebs
# encrypt: false

View File

@ -0,0 +1,15 @@
---
aur_zfs_key_fingerprint: '4F3BA9AB6D1F8D683DC2DFB56AD860EED4598027'
zfs_prereq_packages:
- gnupg
- linux-lts-headers
zfs_arc_min: '1073741824'
zfs_arc_max: '4294967296'
zfs_zpool_ashift: '12'
zfs_zpool_name: ssd
zfs_zpool_compression: lz4
zfs_zpool_type: mirror
zfs_zpool_disk_a: /dev/disk/by-id/ata-Samsung_SSD_850_PRO_2TB_S3D4NX0J503633V
zfs_zpool_disk_b: /dev/disk/by-id/ata-Samsung_SSD_850_PRO_2TB_S3D4NX0J708201E
zfs_backup_dataset: ssd/backup

View File

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE+6ruP8XcCD3nWS9z0hp+Hnf6pxoL1nF4I0L9g9/3Sr zfs-recv@lab.balsillie.net

View File

@ -0,0 +1,147 @@
---
- name: check if zfs kernel module exists
ansible.builtin.stat:
path: /lib/modules/{{ ansible_kernel }}/updates/dkms/zfs.ko.zst
register: zfs_module
- name: zfs install block
when: not zfs_module.stat.exists
block:
- name: add zfs repo to pacman.conf
ansible.builtin.blockinfile:
path: /etc/pacman.conf
state: present
insertafter: "EOF"
block: |
[archzfs]
Server = http://mirror.sum7.eu/archlinux/archzfs/$repo/x86_64
Server = https://mirror.biocrafting.net/archlinux/archzfs/$repo/x86_64
- name: download archzfs repo key
ansible.builtin.uri:
dest: "/tmp/{{ zfs_key_id }}"
url: https://archzfs.com/archzfs.gpg
creates: "/tmp/{{ zfs_key_id }}"
- name: install and lsign zfs key
ansible.builtin.shell:
cmd: "{{ item }}"
with_items:
- pacman-key --add /tmp/{{ zfs_key_id }}
- pacman-key --lsign-key {{ zfs_key_id }}
- name: update system
community.general.pacman:
upgrade: true
update_cache: true
- name: install zfs dkms module
community.general.pacman:
name: "{{ zfs_packages }}"
state: latest
- name: set zfs module parameters
ansible.builtin.template:
src: zfs.conf.j2
dest: /etc/modprobe.d/zfs.conf
owner: root
group: root
mode: 0664
- name: load zfs module
community.general.modprobe:
name: zfs
state: present
- name: enable zfs services
become: true
ansible.builtin.service:
name: "{{ item }}"
state: started
enabled: yes
loop:
- zfs-import-cache.service
- zfs-mount.service
- zfs.target
- name: reboot system
ansible.builtin.reboot:
post_reboot_delay: 90
- name: gather existing zpool facts
community.general.zpool_facts:
# - name: create zpool list
# ansible.builtin.set_fact:
# zpool_list: []
# - name: add zpools to list
# ansible.builtin.set_fact:
# zpool_list: "{{ zpool_list + [item.name] }}"
# with_items:
# "{{ ansible_zfs_pools }}"
- name: create zpools
ansible.builtin.shell:
cmd: |
zpool create \
-o ashift={{ item.ashift }} \
-o autotrim=on \
-o cachefile=/etc/zfs/zpool.cache \
-O acltype=posixacl \
-O atime=off \
-O xattr=sa \
-O mountpoint=none \
-O canmount=off \
-O devices=off \
-O compression={{ item.compression }} \
{{ item.name }} {{ item.type }} {{ item.disks }}
when: item.name not in ( ansible_zfs_pools | selectattr("name") | list )
with_items:
"{{ zfs_pools }}"
- name: gather existing zfs dataset facts
community.general.zfs_facts:
name: "{{ item.name }}"
recurse: true
type: filesystem
with_items:
"{{ zfs_pools }}"
- name: create zfs datasets
loop: "{{ zfs_pools | subelements('datasets') }}"
community.general.zfs:
name: "{{ item.1.name }}"
state: present
extra_zfs_properties:
canmount: off
mountpoint: none
primarycache: none
secondarycache: none
reservation: none
refreservation: none
dedup: off
encryption: off
volmode: dev
devices: off
atime: off
when: item.1.name not in ( ansible_zfs_datasets | selectattr("name") | list )
# Adjust offset from 1H to 1D in zfs-scrub-monthly@{{ zfs-pool }}.timer
# TODO enable/start zfs-scrub-monthly@{{ zfs-pool }}.timer
# TODO configure /etc/zfs/zed.d/zed.rc
# TODO enable/start zfs-zed.service
# TODO possible configure /etc/conf.d/smartdargs
# TODO configure /etc/smartd.conf
# TODO enable/start smartd.service

View File

@ -0,0 +1,8 @@
Key-Type: 1
Key-Length: 3072
Subkey-Type: 1
Subkey-Length: 3072
Name-Real: Local Administrator
Name-Email: root@{{ ansible_host }}
Expire-Date: 0
%no-protection

View File

@ -0,0 +1 @@
DEVICESCAN -a -o on -S on -s (S/../.././03|L/../01/./04) -W 4,45,60 -m notificationrecipient@emailprovider.com -M exec /usr/local/bin/smartdnotify.sh

View File

@ -0,0 +1,27 @@
#! /bin/bash
# Create a file to store the message in. Give it a unique filename to avoid issues if this script runs twice simultaneously
msgFile="/root/$(uuidgen).txt"
# Clear file, set subject line and formatting
echo "Subject: $SMARTD_SUBJECT" > $msgFile
echo "Content-Type: text/html" >> $msgFile
echo "<html>" >> $msgFile
echo "<style type=\"text/css\">pre { font-family:monospace }</style>" >> $msgFile
echo "<body>" >> $msgFile
echo "<pre>" >> $msgFile
# Save the email message from STDIN:
cat >> $msgFile
# Append the output of zpool status:
/usr/sbin/zpool status >> $msgFile
# Append the output of smartctl -a to the message:
/usr/sbin/smartctl -a "$SMARTD_DEVICE" >> $msgFile
# Close HTML tags
echo "</pre></body></html>" >> $msgFile
# Now email the message to the user at address ADD:
/bin/msmtp "$SMARTD_ADDRESS" < $msgFile

View File

@ -0,0 +1,2 @@
options zfs zfs_arc_min={{ zfs_arc_min }}
options zfs zfs_arc_max={{ zfs_arc_max }}

View File

@ -0,0 +1,16 @@
created a zfs zvol on hv00 ssd
sudo zfs create -V 100G \
-o compression=off \
-o volblocksize=16k \
-o reservation=none \
-o refreservation=none \
-o primarycache=metadata \
-o secondarycache=none \
-o volmode=dev \
nvme/libvirt/vhds/data/kube0
added zfs storage pool to libvirt
add zvol as disk (vde)
add zfs repo to kube01
create zpool from vde