archzfs repo to pacman.conftee -a /etc/pacman.conf << 'EOF'
[archzfs]
SigLevel = Optional TrustAll
Server = https://archzfs.com/$repo/$arch
Server = https://mirror.sum7.eu/archlinux/archzfs/$repo/$arch
Server = https://mirror.biocrafting.net/archlinux/archzfs/$repo/$arch
Server = https://mirror.in.themindsmaze.com/archzfs/$repo/$arch
EOF
INST_TZ=/usr/share/zoneinfo/CET
INST_HOST='put-your-hostname-here'
INST_LINVAR='linux'
ls -d /dev/disk/by-id/* | grep -v part
DISK=/dev/disk/by-id/nvme-foo_NVMe_bar_512GB
INST_MNT=$(mktemp -d)
INST_MNT=/mnt
INST_UUID=$(dd if=/dev/urandom of=/dev/stdout bs=1 count=100 2>/dev/null \
| tr -dc 'a-z0-9' \
| cut -c-6)
At this point you can overwrite the disk with random data for best privacy.
Partition the disk into a boot partition and a root partition. Boot partition should be 1-2GiB in size, and you can use the rest for the root partition.
Mark the boot partition as ef and the root partition as bf.
From now on, I'll assume that your boot partition is ${DISK}1 or
${DISK}-part1, and the root partition is ${DISK}2 or
${DISK}-part2, depending on how you chose your disk variable.
Set the partition variables:
DISK_BOOT=${DISK}1
DISK_ROOT=${DISK}2
You need to mark the boot partition as boot and lvm (assuming the boot
partition is the first partition on the disk):
parted -s $DISK set 1 boot on
parted -s $DISK set 1 lvm on
To make sure that the right kernel modules are loaded, you can run:
cryptsetup benchmark
When that completes, run the following command to create and format the
LUKS partition (feel free to change the encryption parameters):
cryptsetup --verbose \
--type luks1 \
--cipher aes-xts-plain64 \
--key-size 512 \
--hash sha512 \
--iter-time 10000 \
--use-random \
--verify-passphrase \
luksFormat $DISK_BOOT
After that, open and mount the partition using the device mapper
cryptsetup luksOpen $DISK_BOOT lvm-boot
Now create a physical volume using the Logical Volume Manager (LVM) and the
previously used id lvm-boot as follows:
pvcreate /dev/mapper/lvm-boot
Having the physical volume, it is possible to create a logical volume group
named lvmBoot as follows:
vgcreate lvmBoot /dev/mapper/lvm-boot
And having the logical volume group, the logical volumes can be created as
follows:
lvcreate --contiguous y --extents +100%FREE lvmBoot --name volBoot
Now format the boot partition:
mkfs.fat -n BOOT /dev/lvmBoot/volBoot
Create the encrypted root pool:
zpool create \
-o ashift=12 \
-O acltype=posixacl \
-O canmount=off \
-O compression=zstd \
-O dnodesize=auto \
-O normalization=formD \
-O relatime=on \
-O xattr=sa \
-O mountpoint=/ \
-R $INST_MNT \
-O encryption=aes-256-gcm \
-O keylocation=prompt \
-O keyformat=passphrase \
rpool_$INST_UUID \
$DISK_ROOT
Create container datasets:
zfs create -o canmount=off -o mountpoint=none rpool_$INST_UUID/ROOT
zfs create -o canmount=off -o mountpoint=none rpool_$INST_UUID/DATA
Create root filesystem dataset:
zfs create -o mountpoint=/ -o canmount=noauto rpool_$INST_UUID/ROOT/default
Mount root filesystem dataset and boot partition:
zfs mount rpool_$INST_UUID/ROOT/default
mkdir $INST_MNT/boot
mount /dev/lvmBoot/volBoot $INST_MNT/boot
Create datasets to separate user data from root filesystem:
zfs create -o mountpoint=/ -o canmount=off rpool_$INST_UUID/DATA/default
for i in {usr,var,var/lib};
do
zfs create -o canmount=off rpool_$INST_UUID/DATA/default/$i
done
for i in {home,root,srv,usr/local,var/log,var/spool,var/tmp};
do
zfs create -o canmount=on rpool_$INST_UUID/DATA/default/$i
done
chmod 750 $INST_MNT/root
chmod 1777 $INST_MNT/var/tmp
Install base packages
basestrap $INST_MNT base vim grub connman connman-runit runit elogind-runit rsm
tee -a $INST_MNT/etc/pacman.conf << 'EOF'
[archzfs]
SigLevel = Optional TrustAll
Server = https://archzfs.com/$repo/$arch
Server = https://mirror.sum7.eu/archlinux/archzfs/$repo/$arch
Server = https://mirror.biocrafting.net/archlinux/archzfs/$repo/$arch
Server = https://mirror.in.themindsmaze.com/archzfs/$repo/$arch
EOF
basestrap $INST_MNT $INST_LINVAR ${INST_LINVAR}-headers
basestrap $INST_MNT archzfs-dkms lvm2 cryptsetup
basestrap $INST_MNT device-mapper-runit lvm2-runit cryptsetup-runit
If your computer has hardware that requires firmware to run
basestrap $INST_MNT linux-firmware
Add boot partition to the fstab:
fstabgen -U $INST_MNT | grep "/boot" >> $INST_MNT/etc/fstab
Configure mkinitcpio:
mv $INST_MNT/etc/mkinitcpio.conf $INST_MNT/etc/mkinitcpio.conf.original
tee $INST_MNT/etc/mkinitcpio.conf <<EOF
HOOKS=(base udev autodetect modconf block encrypt keyboard lvm2 zfs filesystems \
fsck)
EOF
Set the hostname:
echo $INST_HOST > $INST_MNT/etc/hostname
Set the time zone:
ln -sf $INST_TZ $INST_MNT/etc/localtime
Locale:
echo "en_US.UTF-8 UTF-8" >> $INST_MNT/etc/locale.gen
echo "LANG=en_US.UTF-8" >> $INST_MNT/etc/locale.conf
Chroot:
artix-chroot $INST_MNT /usr/bin/env DISK=$DISK DISK_ROOT=$DISK_ROOT \
DISK_BOOT=$DISK_BOOT INST_UUID=$INST_UUID bash --login
Mount datasets on boot (sleep loop is there to prevent runit from
constantly rerunning the service):
mkdir -p /etc/runit/sv/zfs-mount-all
tee /etc/runit/sv/zfs-mount-all/run <<EOF
#!/bin/sh
zfs mount -a
while true
do
sleep 10
done
EOF
chmod +x /etc/runit/sv/zfs-mount-all/run
ln -s /etc/runit/sv/zfs-mount-all /etc/runit/runsvdir/default
Generate locales:
locale-gen
Generate zpool.cache:
zpool set cachefile=/etc/zfs/zpool.cache rpool_$INST_UUID
Set the root password:
passwd
Generate initramfs:
mkinitcpio -P
Change /etc/default/grub:
sed -i.bak "s|GRUB_CMDLINE_LINUX=.*|GRUB_CMDLINE_LINUX=\"cryptdevice=UUID=$(blkid -s UUID -o value $DISK_BOOT):lvm-boot loglevel=3 quiet net.ifnames=0\"|" /etc/default/grub
echo 'GRUB_ENABLE_CRYPTODISK="y"' >> /etc/default/grub
If GRUB doesn't detect the root pool, you might need to do the following change:
sed -i "s|rpool=.*|rpool=\`zdb -l \${GRUB_DEVICE} \| grep -E '\\\\bname:' \| cut -d\\\' -f 2\`|" /etc/grub.d/10_linux
When persistent device names /dev/disk/by-id/* are used with
ZFS, GRUB will fail to resolve the path of the boot pool device. Error:
# /usr/bin/grub-probe: error: failed to get canonical path of `/dev/virtio-pci-0000:06:00.0-part3'.
The solution is:
echo 'export ZPOOL_VDEV_NAME_PATH=YES' >> /etc/profile
source /etc/profile
Install GRUB to the disk:
grub-install $DISK
Generate GRUB config:
grub-mkconfig -o /boot/grub/grub.cfg
Start the network and device-mapper daemons on boot:
ln -s /etc/runit/sv/dmeventd /etc/runit/runsvdir/default
ln -s /etc/runit/sv/connmand /etc/runit/runsvdir/default
Exit the chroot, unmount the partitions and reboot:
exit
zfs snapshot -r rpool_$INST_UUID/ROOT/default@install
umount $INST_MNT/boot
zpool export -a
reboot