mirror of
https://git.lapiole.org/dani/ansible-roles.git
synced 2025-07-30 11:15:42 +02:00
Update to 2021-12-01 19:13
This commit is contained in:
67
roles/zfs/defaults/main.yml
Normal file
67
roles/zfs/defaults/main.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
|
||||
# Those belong to zfs_common, they are just a reminder here
|
||||
# zfs_arc_min: 10%
|
||||
# zfs_arc_max: 50%
|
||||
|
||||
# You can define arbitrary param here
|
||||
zfs_mod_params: {}
|
||||
|
||||
# Under this are tuning for this role
|
||||
|
||||
# How often to scrub pools. Can be hourly, daily, weekly, monthly, yearly
|
||||
# or any other valid systemd time expression
|
||||
zfs_scrub_freq: monthly
|
||||
|
||||
# How often to trim pools
|
||||
zfs_trim_freq: weekly
|
||||
|
||||
# auto snapshot param
|
||||
# How many "frequent" snapshots to keep. They are taken every 15 minutes
|
||||
zfs_snap_frequently: 8
|
||||
# How many hourly snapshots to keep
|
||||
zfs_snap_hourly: 6
|
||||
# How many daily snapshot to keep
|
||||
zfs_snap_daily: 7
|
||||
# How many weekly snapshots to keep
|
||||
zfs_snap_weekly: 2
|
||||
# How many monthly snapshot to keep
|
||||
zfs_snap_monthly: 2
|
||||
# How many yearly snapshot to keep
|
||||
zfs_snap_yearly: 0
|
||||
|
||||
# List of ZFS volume to snapshots
|
||||
zfs_snap: []
|
||||
# You can override some freq settings here, eg
|
||||
# zfs_snap:
|
||||
# - name: tank/files
|
||||
# frequently: 30
|
||||
# monthly: 0
|
||||
# - name: tank/work
|
||||
# frequently: 5
|
||||
# hourly: 12
|
||||
# weekly: 4
|
||||
|
||||
# Send/Receive param
|
||||
# If zfs_repl_recv is True, a zfs-recv user will be created, to be used for receiving ZFS streams
|
||||
zfs_repl_recv: False
|
||||
# List of SSH keys allowed for ZFS receive
|
||||
zfs_repl_authorized_keys: []
|
||||
# List of IP addresses allowed for ZFS receive
|
||||
zfs_repl_src_ip: []
|
||||
|
||||
# For ZFS sender, configure which vol to send
|
||||
zfs_repl: []
|
||||
# zfs_repl:
|
||||
# - dataset: zpool
|
||||
# dest: zfs-recv@10.33.255.253:zpool
|
||||
# id: stor-bkp # a uniq string id, used to name the service. It must be uniq, and one will be created if not defined
|
||||
# force: True # Force deletion of dest dataset if no snapshot matches
|
||||
# compress: lz4
|
||||
# recursive: True
|
||||
# ssh_cipher: aes128-ctr
|
||||
# ssh_port: 22
|
||||
# bw_limit: 0 # eg 10m for 10 MB/sec
|
||||
# skip_parent: True
|
||||
# syncoid_opts: ""
|
||||
# freq: hourly|daily|weekly|monthly (or any valid systemd-timer expression for OnCalendar)
|
9
roles/zfs/files/z_resume_scrubs
Normal file
9
roles/zfs/files/z_resume_scrubs
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Resume all suspended pool scrubs
|
||||
for ZPOOL in $(zpool list -H -o name); do
|
||||
if [ $(zpool status $ZPOOL | grep -c 'scrub paused') -ge 1 ]; then
|
||||
echo scrub paused for pool $ZPOOL, resuming it
|
||||
zpool scrub $ZPOOL
|
||||
fi
|
||||
done
|
9
roles/zfs/files/z_suspend_scrubs
Normal file
9
roles/zfs/files/z_suspend_scrubs
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Suspend any running pool scrubs
|
||||
for ZPOOL in $(zpool list -H -o name); do
|
||||
if [ $(zpool status $ZPOOL | grep -c 'scrub in progress') -ge 1 ]; then
|
||||
echo scrub running for pool $ZPOOL, suspending it
|
||||
zpool scrub -p $ZPOOL
|
||||
fi
|
||||
done
|
8
roles/zfs/handlers/main.yml
Normal file
8
roles/zfs/handlers/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
|
||||
- name: restart syncoid
|
||||
systemd: name=syncoid-{{ item.id }}.timer state=restarted
|
||||
loop: "{{ zfs_repl }}"
|
||||
|
||||
- name: restart sanoid
|
||||
systemd: name=sanoid.timer state=restarted
|
5
roles/zfs/meta/main.yml
Normal file
5
roles/zfs/meta/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: repo_zfs
|
||||
when: ansible_os_family == 'RedHat'
|
||||
- role: zfs_common
|
46
roles/zfs/tasks/install_Debian.yml
Normal file
46
roles/zfs/tasks/install_Debian.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
|
||||
- when: ansible_distribution_major_version is version('11', '<')
|
||||
block:
|
||||
- name: Install dependencies
|
||||
apt:
|
||||
name:
|
||||
- libcapture-tiny-perl
|
||||
- libconfig-inifiles-perl
|
||||
- pv
|
||||
- lzop
|
||||
- mbuffer
|
||||
|
||||
- name: Install sanoid
|
||||
apt: deb=http://ftp.fr.debian.org/debian/pool/main/s/sanoid/sanoid_2.0.3-4_all.deb
|
||||
|
||||
tags: zfs
|
||||
|
||||
- when: ansible_distribution_major_version is version('11', '>=')
|
||||
apt: name=sanoid
|
||||
tags: zfs
|
||||
|
||||
|
||||
- name: Setup sanoid service and timer
|
||||
template: src=sanoid.{{ item }}.j2 dest=/etc/systemd/system/sanoid.{{ item }}
|
||||
loop:
|
||||
- service
|
||||
- timer
|
||||
register: zfs_sanoid_units
|
||||
notify: restart sanoid
|
||||
tags: zfs
|
||||
|
||||
- name: Remove sanoid-prune dependency
|
||||
file: path=/etc/systemd/system/sanoid.service.wants/sanoid-prune.service state=absent
|
||||
register: zfs_sanoid_prune
|
||||
tags: zfs
|
||||
|
||||
- name: Disable cronjob
|
||||
copy: content='# Cron job disabled, service is managed by a systemd timer' dest=/etc/cron.d/sanoid
|
||||
tags: zfs
|
||||
|
||||
- name: Reload systemd
|
||||
systemd: daemon_reload=True
|
||||
when: zfs_sanoid_units.results | selectattr('changed','equalto',True) | list | length > 0 or zfs_sanoid_prune.changed
|
||||
tags: zfs
|
||||
|
10
roles/zfs/tasks/install_RedHat.yml
Normal file
10
roles/zfs/tasks/install_RedHat.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
|
||||
- name: Install packages
|
||||
yum:
|
||||
name:
|
||||
- zfs
|
||||
- sanoid
|
||||
when: ansible_os_family == 'RedHat'
|
||||
tags: zfs
|
||||
|
151
roles/zfs/tasks/main.yml
Normal file
151
roles/zfs/tasks/main.yml
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
|
||||
- include: install_{{ ansible_os_family }}.yml
|
||||
|
||||
- name: load ZFS
|
||||
modprobe: name=zfs
|
||||
tags: zfs
|
||||
|
||||
- name: Create sanoid conf dir
|
||||
file: path=/etc/sanoid/ state=directory
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy sanoid configuration
|
||||
template: src=sanoid.conf.j2 dest=/etc/sanoid/sanoid.conf
|
||||
tags: zfs
|
||||
|
||||
- name: Enable sanoid timer
|
||||
systemd: name=sanoid.timer state=started enabled=True
|
||||
tags: zfs
|
||||
|
||||
- import_tasks: ../includes/create_system_user.yml
|
||||
vars:
|
||||
- user: zfs-recv
|
||||
- comment: ZFS Receiver account
|
||||
- shell: /bin/bash
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy sudo fragment for zfs-recv
|
||||
template: src=recv-sudo.j2 dest=/etc/sudoers.d/zfs_recv owner=root group=root mode=440 validate='visudo -cf %s'
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy pool scrub suspend and resume scripts
|
||||
copy: src={{ item }} dest=/usr/local/bin/{{ item }} mode=755
|
||||
loop:
|
||||
- z_suspend_scrubs
|
||||
- z_resume_scrubs
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy ssh keys for zfs-recv
|
||||
authorized_key:
|
||||
user: zfs-recv
|
||||
key: "{{ zfs_repl_recv | ternary(zfs_repl_authorized_keys | join(\"\n\"), '') }}"
|
||||
key_options: "no-port-forwarding,no-pty,no-X11-forwarding,no-agent-forwarding,from=\"{{ zfs_repl_src_ip | join(',') }}\""
|
||||
exclusive: True
|
||||
tags: zfs
|
||||
|
||||
- name: Add an id to replication jobs (if not defined)
|
||||
set_fact:
|
||||
zfs_repl_with_id: "{{ zfs_repl_with_id | default([]) + [ item | combine({ 'id': item.id | default(item.dataset | regex_replace('[^a-zA-Z0-9]', '-') + '-' + item.dest | regex_replace('[^a-zA-Z0-9]', '-')) }, recursive=True) ] }}"
|
||||
loop: "{{ zfs_repl }}"
|
||||
tags: zfs
|
||||
- set_fact: zfs_repl={{ zfs_repl_with_id | default([]) }}
|
||||
tags: zfs
|
||||
|
||||
- name: List existing syncoid units
|
||||
shell: find /etc/systemd/system/ -name syncoid-*.service -o -name syncoid-*.timer | xargs -n1 basename | perl -pe 's/syncoid\-(.*)\.(service|timer)$/$1/'
|
||||
register: zfs_existing_syncoid_units
|
||||
changed_when: False
|
||||
tags: zfs
|
||||
|
||||
- name: Disable unmanaged syncoid timer
|
||||
systemd: name=syncoid-{{ item }}.timer state=stopped enabled=False
|
||||
loop: "{{ zfs_existing_syncoid_units.stdout_lines }}"
|
||||
when: item not in (zfs_repl | map(attribute='id') | list)
|
||||
failed_when: False # unmanaged units might not have been picked up by a daemon-reload
|
||||
tags: zfs
|
||||
|
||||
- name: Remove unmanaged syncoid units
|
||||
file: path=/etc/systemd/system/syncoid-{{ item.0 }}.{{ item.1 }} state=absent
|
||||
with_nested:
|
||||
- "{{ zfs_existing_syncoid_units.stdout_lines }}"
|
||||
- [ 'service', 'timer' ]
|
||||
when: item.0 not in (zfs_repl | map(attribute='id') | list)
|
||||
register: zfs_syncoid_removed_units
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy syncoid units
|
||||
template:
|
||||
src: syncoid.{{ item.0 }}.j2
|
||||
dest: /etc/systemd/system/syncoid-{{ item.1.id }}.{{ item.0 }}
|
||||
with_nested:
|
||||
- [ 'service', 'timer' ]
|
||||
- "{{ zfs_repl }}"
|
||||
register: zfs_syncoid_units
|
||||
notify: restart syncoid
|
||||
tags: zfs
|
||||
|
||||
- name: Reload systemd
|
||||
systemd: daemon_reload=True
|
||||
when: zfs_syncoid_units.results | selectattr('changed', 'equalto', True) | list | length > 0 or zfs_syncoid_removed_units.results | selectattr('changed', 'equalto', True) | list | length > 0
|
||||
tags: zfs
|
||||
|
||||
- name: Handle syncoid timer units
|
||||
systemd: name=syncoid-{{ item.id }}.timer state=started enabled=True
|
||||
loop: "{{ zfs_repl }}"
|
||||
tags: zfs
|
||||
|
||||
- name: Deploy ZFS scrub and trim template units
|
||||
template: src={{ item }}.j2 dest=/etc/systemd/system/{{ item }}
|
||||
loop:
|
||||
- zfs-scrub@.service
|
||||
- zfs-scrub@.timer
|
||||
- zfs-trim@.service
|
||||
- zfs-trim@.timer
|
||||
register: zfs_units
|
||||
tags: zfs
|
||||
|
||||
- name: Reload systemd
|
||||
systemd: daemon_reload=True
|
||||
when: zfs_units.changed
|
||||
tags: zfs
|
||||
|
||||
- name: List ZFS pools
|
||||
command: zpool list -H -o name
|
||||
register: zfs_existing_pools
|
||||
changed_when: False
|
||||
tags: zfs
|
||||
|
||||
- name: Enable ZFS scrub and trim timers
|
||||
systemd: name=zfs-{{ item.1 }}@{{ item.0 }}.timer state=started enabled=True
|
||||
with_nested:
|
||||
- "{{ zfs_existing_pools.stdout_lines }}"
|
||||
- ['scrub', 'trim']
|
||||
tags: zfs
|
||||
|
||||
- name: List ZFS scrub and trim timers
|
||||
shell: find /etc/systemd/system/timers.target.wants/ -maxdepth 1 -mindepth 1 -type l \( -name zfs-scrub@\*.timer -o -name zfs-trim@\*.timer \) -exec basename "{}" \; | sed 's/zfs-.*@\(.*\)\.timer/\1/'
|
||||
register: zfs_pool_timers
|
||||
changed_when: False
|
||||
tags: zfs
|
||||
|
||||
- name: Disable ZFS scrub and trim timers for non existing pools
|
||||
systemd: name=zfs-{{ item.1 }}@{{ item.0 }}.timer state=stopped enabled=False
|
||||
with_nested:
|
||||
- "{{ zfs_pool_timers.stdout_lines | difference(zfs_existing_pools.stdout_lines) }}"
|
||||
- ['scrub', 'trim']
|
||||
tags: zfs
|
||||
|
||||
- name: Fetch bash_completion support
|
||||
get_url:
|
||||
url: https://raw.githubusercontent.com/openzfs/zfs/master/contrib/bash_completion.d/zfs.in
|
||||
dest: /tmp/zfs_bash_completion
|
||||
tags: zfs
|
||||
|
||||
- name: Install bash_completion
|
||||
shell: sed -e 's/@sbindir@/\/sbin/' /tmp/zfs_bash_completion > /etc/bash_completion.d/zfs
|
||||
args:
|
||||
warn: False
|
||||
changed_when: False
|
||||
tags: zfs
|
||||
|
6
roles/zfs/templates/recv-sudo.j2
Normal file
6
roles/zfs/templates/recv-sudo.j2
Normal file
@@ -0,0 +1,6 @@
|
||||
# {{ ansible_managed }}
|
||||
{% if zfs_repl_recv %}
|
||||
Defaults:zfs-recv !requiretty
|
||||
Cmnd_Alias ZFS_RECV = /sbin/zfs
|
||||
zfs-recv ALL=(root) NOPASSWD: ZFS_RECV
|
||||
{% endif %}
|
19
roles/zfs/templates/sanoid.conf.j2
Normal file
19
roles/zfs/templates/sanoid.conf.j2
Normal file
@@ -0,0 +1,19 @@
|
||||
{% for zfs in zfs_snap %}
|
||||
[{{ zfs.name }}]
|
||||
use_template = default
|
||||
recursive = {{ zfs.recursive | default('yes') }}
|
||||
{% for key in zfs.keys() | list | difference(['name','recursive']) %}
|
||||
{{ key }} = {{ zfs[key] }}
|
||||
{% endfor %}
|
||||
|
||||
{% endfor %}
|
||||
|
||||
[template_default]
|
||||
frequently = {{ zfs_snap_frequently }}
|
||||
hourly = {{ zfs_snap_hourly }}
|
||||
daily = {{ zfs_snap_daily }}
|
||||
weekly = {{ zfs_snap_weekly }}
|
||||
monthly = {{ zfs_snap_monthly }}
|
||||
yearly = {{ zfs_snap_yearly }}
|
||||
autosnap = {{ zfs_repl_recv | ternary('no', 'yes') }}
|
||||
autoprune = yes
|
9
roles/zfs/templates/sanoid.service.j2
Normal file
9
roles/zfs/templates/sanoid.service.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Snapshot ZFS Pool
|
||||
Requires=zfs.target
|
||||
After=zfs.target
|
||||
|
||||
[Service]
|
||||
Environment=TZ=UTC
|
||||
Type=oneshot
|
||||
ExecStart=/usr/sbin/sanoid --cron
|
10
roles/zfs/templates/sanoid.timer.j2
Normal file
10
roles/zfs/templates/sanoid.timer.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Run Sanoid
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*:0/1
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
|
10
roles/zfs/templates/syncoid.service.j2
Normal file
10
roles/zfs/templates/syncoid.service.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Sync ZFS datasets
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
PrivateTmp=yes
|
||||
ExecStart=/sbin/syncoid --identifier={{ item.1.id }}{% if item.1.force | default(True) %} --force-delete {% endif %}{% if item.1.compress | default('lz4') is string %}--compress={{ item.1.compress | default('lz4') }} {%elif item.1.compress | default('lz4') == True %}--compress {% endif %}{% if item.1.recursive | default(True) == True %}--recursive{% endif %} {% if item.1.skip_parent | default(True) == True %}--skip-parent {% endif %} --sshcipher={{ item.1.ssh_cipher | default('aes128-ctr') }} {% if item.1.ssh_port is defined %}--sshport={{ item.1.ssh_port }} {% endif %}{% if item.1.bw_limit is defined %}--source-bwlimit={{ item.1.bw_limit }} {% endif %}{{ item.1.syncoid_opts | default('') }} {{ item.1.dataset }} {{ item.1.dest }}
|
||||
{% if item.1.max_duration is defined %}
|
||||
TimeoutSec={{ item.1.max_duration }}
|
||||
{% endif %}
|
8
roles/zfs/templates/syncoid.timer.j2
Normal file
8
roles/zfs/templates/syncoid.timer.j2
Normal file
@@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Sync ZFS datasets
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ item.1.freq | default('daily') }}
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
9
roles/zfs/templates/zfs-scrub@.service.j2
Normal file
9
roles/zfs/templates/zfs-scrub@.service.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=ZFS pool scrub
|
||||
Requires=zfs.target
|
||||
After=zfs.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=-/sbin/zpool scrub -s %I
|
||||
ExecStart=/sbin/zpool scrub %I
|
9
roles/zfs/templates/zfs-scrub@.timer.j2
Normal file
9
roles/zfs/templates/zfs-scrub@.timer.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Regular ZFS pool scrub
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ zfs_scrub_freq }}
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
9
roles/zfs/templates/zfs-trim@.service.j2
Normal file
9
roles/zfs/templates/zfs-trim@.service.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=ZFS pool trim
|
||||
Requires=zfs.target
|
||||
After=zfs.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
PrivateTmp=yes
|
||||
ExecStart=/sbin/zpool trim %I
|
9
roles/zfs/templates/zfs-trim@.timer.j2
Normal file
9
roles/zfs/templates/zfs-trim@.timer.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Regular ZFS pool trim
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ zfs_trim_freq }}
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
Reference in New Issue
Block a user