diff --git a/example/zfs-with-vdevs.nix b/example/zfs-with-vdevs.nix new file mode 100644 index 00000000..a61680a6 --- /dev/null +++ b/example/zfs-with-vdevs.nix @@ -0,0 +1,113 @@ +{ + disko.devices = { + disk = { + x = { + type = "disk"; + device = "/dev/sdx"; + content = { + type = "gpt"; + partitions = { + ESP = { + size = "64M"; + type = "EF00"; + content = { + type = "filesystem"; + format = "vfat"; + mountpoint = "/boot"; + }; + }; + zfs = { + size = "100%"; + content = { + type = "zfs"; + pool = "zroot"; + }; + }; + }; + }; + }; + y = { + type = "disk"; + device = "/dev/sdy"; + content = { + type = "gpt"; + partitions = { + zfs = { + size = "100%"; + content = { + type = "zfs"; + pool = "zroot"; + }; + }; + }; + }; + }; + z = { + type = "disk"; + device = "/dev/sdz"; + content = { + type = "gpt"; + partitions = { + zfs = { + size = "100%"; + content = { + type = "zfs"; + pool = "zroot"; + }; + }; + }; + }; + }; + cache = { + type = "disk"; + device = "/dev/vdc"; + content = { + type = "gpt"; + partitions = { + zfs = { + size = "100%"; + content = { + type = "zfs"; + pool = "zroot"; + }; + }; + }; + }; + }; + }; + zpool = { + zroot = { + type = "zpool"; + mode = { + topology = { + type = "topology"; + vdev = [ + { + mode = "mirror"; + members = [ "x" "y" ]; + } + ]; + special = { + members = [ "z" ]; + }; + cache = [ "cache" ]; + }; + }; + + rootFsOptions = { + compression = "zstd"; + "com.sun:auto-snapshot" = "false"; + }; + mountpoint = "/"; + datasets = { + # See examples/zfs.nix for more comprehensive usage. + zfs_fs = { + type = "zfs_fs"; + mountpoint = "/zfs_fs"; + options."com.sun:auto-snapshot" = "true"; + }; + }; + }; + }; + }; +} diff --git a/lib/default.nix b/lib/default.nix index 58467f60..08a5e56e 100644 --- a/lib/default.nix +++ b/lib/default.nix @@ -197,6 +197,7 @@ let || isAttrsOfSubmodule o # TODO don't hardcode diskoLib.subType options. || n == "content" || n == "partitions" || n == "datasets" || n == "swap" + || n == "mode" ); in lib.toShellVars diff --git a/lib/types/zpool.nix b/lib/types/zpool.nix index f690c984..0510b940 100644 --- a/lib/types/zpool.nix +++ b/lib/types/zpool.nix @@ -1,4 +1,15 @@ { config, options, lib, diskoLib, rootMountPoint, ... }: +let + # TODO: Consider expanding to handle `disk` `file` and `draid` mode options. + modeOptions = [ + "" + "mirror" + "raidz" + "raidz1" + "raidz2" + "raidz3" + ]; +in { options = { name = lib.mkOption { @@ -13,14 +24,76 @@ description = "Type"; }; mode = lib.mkOption { - type = lib.types.enum [ - "" - "mirror" - "raidz" - "raidz2" - "raidz3" - ]; default = ""; + type = (lib.types.oneOf [ + (lib.types.enum modeOptions) + (lib.types.attrsOf (diskoLib.subType { + types = { + topology = + let + vdev = lib.types.submodule ({ name, ... }: { + options = { + mode = lib.mkOption { + type = lib.types.enum modeOptions; + default = ""; + description = "Mode of the zfs vdev"; + }; + members = lib.mkOption { + type = lib.types.listOf lib.types.str; + description = "Members of the vdev"; + }; + }; + }); + parent = config; + in + lib.types.submodule + ({ config, name, ... }: { + options = { + type = lib.mkOption { + type = lib.types.enum [ "topology" ]; + default = "topology"; + internal = true; + description = "Type"; + }; + # zfs device types + vdev = lib.mkOption { + type = lib.types.listOf vdev; + default = [ ]; + description = '' + A list of storage vdevs. See + https://openzfs.github.io/openzfs-docs/man/master/7/zpoolconcepts.7.html#Virtual_Devices_(vdevs) + for details. + ''; + example = [{ + mode = "mirror"; + members = [ "x" "y" "/dev/sda1" ]; + }]; + }; + special = lib.mkOption { + type = lib.types.nullOr vdev; + default = null; + description = '' + A vdev definition for a special device. See + https://openzfs.github.io/openzfs-docs/man/master/7/zpoolconcepts.7.html#special + for details. + ''; + }; + cache = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = null; + description = '' + A dedicated zfs cache device (L2ARC). See + https://openzfs.github.io/openzfs-docs/man/master/7/zpoolconcepts.7.html#Cache_Devices + for details. + ''; + }; + # TODO: Consider supporting log, spare, and dedup options. + }; + }); + }; + extraArgs.parent = config; + })) + ]); description = "Mode of the ZFS pool"; }; options = lib.mkOption { @@ -60,42 +133,88 @@ }; _create = diskoLib.mkCreateOption { inherit config options; - default = '' - readarray -t zfs_devices < <(cat "$disko_devices_dir"/zfs_${config.name}) - # Try importing the pool without mounting anything if it exists. - # This allows us to set mounpoints. - if zpool import -N -f '${config.name}' || zpool list '${config.name}'; then - echo "not creating zpool ${config.name} as a pool with that name already exists" >&2 - else - continue=1 - for dev in "''${zfs_devices[@]}"; do - if ! blkid "$dev" >/dev/null; then - # blkid fails, so device seems empty - : - elif (blkid "$dev" -o export | grep '^PTUUID='); then - echo "device $dev already has a partuuid, skipping creating zpool ${config.name}" >&2 - continue=0 - elif (blkid "$dev" -o export | grep '^TYPE=zfs_member'); then - # zfs_member is a zfs partition, so we try to add the device to the pool - : - elif (blkid "$dev" -o export | grep '^TYPE='); then - echo "device $dev already has a partition, skipping creating zpool ${config.name}" >&2 - continue=0 + default = + let + formatOutput = mode: members: '' + entries+=("${mode}=${ + lib.concatMapStringsSep " " + (d: if lib.strings.hasPrefix "/" d then d else "/dev/disk/by-partlabel/disk-${d}-zfs") members + }") + ''; + formatVdev = vdev: formatOutput vdev.mode vdev.members; + hasTopology = !(builtins.isString config.mode); + mode = if hasTopology then "prescribed" else config.mode; + topology = lib.optionalAttrs hasTopology config.mode.topology; + in + '' + readarray -t zfs_devices < <(cat "$disko_devices_dir"/zfs_${config.name}) + # Try importing the pool without mounting anything if it exists. + # This allows us to set mounpoints. + if zpool import -N -f '${config.name}' || zpool list '${config.name}'; then + echo "not creating zpool ${config.name} as a pool with that name already exists" >&2 + else + continue=1 + for dev in "''${zfs_devices[@]}"; do + if ! blkid "$dev" >/dev/null; then + # blkid fails, so device seems empty + : + elif (blkid "$dev" -o export | grep '^PTUUID='); then + echo "device $dev already has a partuuid, skipping creating zpool ${config.name}" >&2 + continue=0 + elif (blkid "$dev" -o export | grep '^TYPE=zfs_member'); then + # zfs_member is a zfs partition, so we try to add the device to the pool + : + elif (blkid "$dev" -o export | grep '^TYPE='); then + echo "device $dev already has a partition, skipping creating zpool ${config.name}" >&2 + continue=0 + fi + done + if [ $continue -eq 1 ]; then + topology="" + # For shell check + mode="${mode}" + if [ "$mode" != "prescribed" ]; then + topology="${mode} ''${zfs_devices[*]}" + else + entries=() + ${lib.optionalString (hasTopology && topology.vdev != null) + (lib.concatMapStrings formatVdev topology.vdev)} + ${lib.optionalString (hasTopology && topology.special != null) + (formatOutput "special ${topology.special.mode}" topology.special.members)} + ${lib.optionalString (hasTopology && topology.cache != []) + (formatOutput "cache" topology.cache)} + all_devices=() + for line in "''${entries[@]}"; do + # lineformat is mode=device1 device2 device3 + mode=''${line%%=*} + devs=''${line#*=} + IFS=' ' read -r -a devices <<< "$devs" + all_devices+=("''${devices[@]}") + topology+=" ''${mode} ''${devices[*]}" + done + # all_devices sorted should equal zfs_devices sorted + all_devices_list=$(echo "''${all_devices[*]}" | tr ' ' '\n' | sort) + zfs_devices_list=$(echo "''${zfs_devices[*]}" | tr ' ' '\n' | sort) + if [[ "$all_devices_list" != "$zfs_devices_list" ]]; then + echo "not all disks accounted for, skipping creating zpool ${config.name}" >&2 + diff <(echo "$all_devices_list" ) <(echo "$zfs_devices_list") >&2 + continue=0 + fi + fi fi - done - if [ $continue -eq 1 ]; then - zpool create -f ${config.name} \ - -R ${rootMountPoint} ${config.mode} \ - ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-o ${n}=${v}") config.options)} \ - ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-O ${n}=${v}") config.rootFsOptions)} \ - "''${zfs_devices[@]}" - if [[ $(zfs get -H mounted ${config.name} | cut -f3) == "yes" ]]; then - zfs unmount ${config.name} + if [ $continue -eq 1 ]; then + zpool create -f ${config.name} \ + -R ${rootMountPoint} \ + ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-o ${n}=${v}") config.options)} \ + ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-O ${n}=${v}") config.rootFsOptions)} \ + ''${topology:+ $topology} + if [[ $(zfs get -H mounted ${config.name} | cut -f3) == "yes" ]]; then + zfs unmount ${config.name} + fi fi fi - fi - ${lib.concatMapStrings (dataset: dataset._create) (lib.attrValues config.datasets)} - ''; + ${lib.concatMapStrings (dataset: dataset._create) (lib.attrValues config.datasets)} + ''; }; _mount = diskoLib.mkMountOption { inherit config options; diff --git a/tests/zfs-with-vdevs.nix b/tests/zfs-with-vdevs.nix new file mode 100644 index 00000000..dbca4fc9 --- /dev/null +++ b/tests/zfs-with-vdevs.nix @@ -0,0 +1,33 @@ +{ pkgs ? import { } +, diskoLib ? pkgs.callPackage ../lib { } +}: +diskoLib.testLib.makeDiskoTest { + inherit pkgs; + name = "zfs-with-vdevs"; + disko-config = ../example/zfs-with-vdevs.nix; + extraInstallerConfig.networking.hostId = "8425e349"; + extraSystemConfig = { + networking.hostId = "8425e349"; + }; + extraTestScript = '' + def assert_property(ds, property, expected_value): + out = machine.succeed(f"zfs get -H {property} {ds} -o value").rstrip() + assert ( + out == expected_value + ), f"Expected {property}={expected_value} on {ds}, got: {out}" + + # These fields are 0 if l2arc is disabled + assert ( + machine.succeed( + "cat /proc/spl/kstat/zfs/arcstats" + " | grep '^l2_' | tr -s ' '" + " | cut -s -d ' ' -f3 | uniq" + ).strip() != "0" + ), "Excepted cache to be utilized." + + assert_property("zroot", "compression", "zstd") + assert_property("zroot/zfs_fs", "com.sun:auto-snapshot", "true") + assert_property("zroot/zfs_fs", "compression", "zstd") + machine.succeed("mountpoint /zfs_fs"); + ''; +}