]> git.proxmox.com Git - mirror_zfs.git/commitdiff
initramfs: Eliminate substitutions
authorRichard Laager <rlaager@wiktel.com>
Sun, 23 Feb 2020 00:09:55 +0000 (18:09 -0600)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Tue, 10 Mar 2020 16:53:20 +0000 (09:53 -0700)
These are now handled in zfs-functions, so this is all duplicative and
unnecessary.

Signed-off-by: Richard Laager <rlaager@wiktel.com>
contrib/initramfs/scripts/Makefile.am
contrib/initramfs/scripts/zfs [new file with mode: 0644]
contrib/initramfs/scripts/zfs.in [deleted file]

index 3ab18ba2cbce485d9decfe699defedda4d413b5f..2a142096e44907d44c0366ba66bd976a675e42c4 100644 (file)
@@ -1,22 +1,6 @@
 scriptsdir = /usr/share/initramfs-tools/scripts
 
-scripts_DATA = \
+dist_scripts_DATA = \
        zfs
 
 SUBDIRS = local-top
-
-EXTRA_DIST = \
-       $(top_srcdir)/contrib/initramfs/scripts/zfs.in
-
-$(scripts_DATA):%:%.in
-       -$(SED) -e 's,@sbindir\@,$(sbindir),g' \
-               -e 's,@sysconfdir\@,$(sysconfdir),g' \
-               $< >'$@'
-
-# Double-colon rules are allowed; there are multiple independent definitions.
-clean-local::
-       -$(RM) $(scripts_SCRIPTS)
-
-# Double-colon rules are allowed; there are multiple independent definitions.
-distclean-local::
-       -$(RM) $(scripts_SCRIPTS)
diff --git a/contrib/initramfs/scripts/zfs b/contrib/initramfs/scripts/zfs
new file mode 100644 (file)
index 0000000..dbc4e25
--- /dev/null
@@ -0,0 +1,1001 @@
+# ZFS boot stub for initramfs-tools.
+#
+# In the initramfs environment, the /init script sources this stub to
+# override the default functions in the /scripts/local script.
+#
+# Enable this by passing boot=zfs on the kernel command line.
+#
+
+# Source the common functions
+. /etc/zfs/zfs-functions
+
+# Start interactive shell.
+# Use debian's panic() if defined, because it allows to prevent shell access
+# by setting panic in cmdline (e.g. panic=0 or panic=15).
+# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
+# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
+shell() {
+       if type panic > /dev/null 2>&1; then
+               panic $@
+       else
+               /bin/sh
+       fi
+}
+
+# This runs any scripts that should run before we start importing
+# pools and mounting any filesystems.
+pre_mountroot()
+{
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-top"
+               run_scripts /scripts/local-top
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-premount"
+               run_scripts /scripts/local-premount
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+}
+
+# If plymouth is available, hide the splash image.
+disable_plymouth()
+{
+       if [ -x /bin/plymouth ] && /bin/plymouth --ping
+       then
+               /bin/plymouth hide-splash >/dev/null 2>&1
+       fi
+}
+
+# Get a ZFS filesystem property value.
+get_fs_value()
+{
+       local fs="$1"
+       local value=$2
+
+       "${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
+}
+
+# Find the 'bootfs' property on pool $1.
+# If the property does not contain '/', then ignore this
+# pool by exporting it again.
+find_rootfs()
+{
+       local pool="$1"
+
+       # If 'POOL_IMPORTED' isn't set, no pool imported and therefore
+       # we won't be able to find a root fs.
+       [ -z "${POOL_IMPORTED}" ] && return 1
+
+       # If it's already specified, just keep it mounted and exit
+       # User (kernel command line) must be correct.
+       [ -n "${ZFS_BOOTFS}" ] && return 0
+
+       # Not set, try to find it in the 'bootfs' property of the pool.
+       # NOTE: zpool does not support 'get -H -ovalue bootfs'...
+       ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
+
+       # Make sure it's not '-' and that it starts with /.
+       if [ "${ZFS_BOOTFS}" != "-" ] && \
+               $(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
+       then
+               # Keep it mounted
+               POOL_IMPORTED=1
+               return 0
+       fi
+
+       # Not boot fs here, export it and later try again..
+       "${ZPOOL}" export "$pool"
+       POOL_IMPORTED=""
+
+       return 1
+}
+
+# Support function to get a list of all pools, separated with ';'
+find_pools()
+{
+       local CMD="$*"
+       local pools pool
+
+       pools=$($CMD 2> /dev/null | \
+               grep -E "pool:|^[a-zA-Z0-9]" | \
+               sed 's@.*: @@' | \
+               while read pool; do \
+                   echo -n "$pool;"
+               done)
+
+       echo "${pools%%;}" # Return without the last ';'.
+}
+
+# Get a list of all available pools
+get_pools()
+{
+       local available_pools npools
+
+       if [ -n "${ZFS_POOL_IMPORT}" ]; then
+               echo "$ZFS_POOL_IMPORT"
+               return 0
+       fi
+
+       # Get the base list of available pools.
+       available_pools=$(find_pools "$ZPOOL" import)
+
+       # Just in case - seen it happen (that a pool isn't visible/found
+       # with a simple "zpool import" but only when using the "-d"
+       # option or setting ZPOOL_IMPORT_PATH).
+       if [ -d "/dev/disk/by-id" ]
+       then
+               npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
+               if [ -n "$npools" ]
+               then
+                       # Because we have found extra pool(s) here, which wasn't
+                       # found 'normally', we need to force USE_DISK_BY_ID to
+                       # make sure we're able to actually import it/them later.
+                       USE_DISK_BY_ID='yes'
+
+                       if [ -n "$available_pools" ]
+                       then
+                               # Filter out duplicates (pools found with the simple
+                               # "zpool import" but which is also found with the
+                               # "zpool import -d ...").
+                               npools=$(echo "$npools" | sed "s,$available_pools,,")
+
+                               # Add the list to the existing list of
+                               # available pools
+                               available_pools="$available_pools;$npools"
+                       else
+                               available_pools="$npools"
+                       fi
+               fi
+       fi
+
+       # Filter out any exceptions...
+       if [ -n "$ZFS_POOL_EXCEPTIONS" ]
+       then
+               local found=""
+               local apools=""
+               local pool exception
+               OLD_IFS="$IFS" ; IFS=";"
+
+               for pool in $available_pools
+               do
+                       for exception in $ZFS_POOL_EXCEPTIONS
+                       do
+                               [ "$pool" = "$exception" ] && continue 2
+                               found="$pool"
+                       done
+
+                       if [ -n "$found" ]
+                       then
+                               if [ -n "$apools" ]
+                               then
+                                       apools="$apools;$pool"
+                               else
+                                       apools="$pool"
+                               fi
+                       fi
+               done
+
+               IFS="$OLD_IFS"
+               available_pools="$apools"
+       fi
+
+       # Return list of available pools.
+       echo "$available_pools"
+}
+
+# Import given pool $1
+import_pool()
+{
+       local pool="$1"
+       local dirs dir
+
+       # Verify that the pool isn't already imported
+       # Make as sure as we can to not require '-f' to import.
+       "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
+
+       # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
+       # to something we can use later with the real import(s). We want to
+       # make sure we find all by* dirs, BUT by-vdev should be first (if it
+       # exists).
+       if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
+       then
+               dirs="$(for dir in $(echo /dev/disk/by-*)
+               do
+                       # Ignore by-vdev here - we want it first!
+                       echo "$dir" | grep -q /by-vdev && continue
+                       [ ! -d "$dir" ] && continue
+
+                       echo -n "$dir:"
+               done | sed 's,:$,,g')"
+
+               if [ -d "/dev/disk/by-vdev" ]
+               then
+                       # Add by-vdev at the beginning.
+                       ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
+               fi
+
+               # ... and /dev at the very end, just for good measure.
+               ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
+       fi
+
+       # Needs to be exported for "zpool" to catch it.
+       [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
+
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg \
+               "Importing pool '${pool}' using defaults"
+
+       ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
+       ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               if [ -f "${ZPOOL_CACHE}" ]
+               then
+                       [ "$quiet" != "y" ] && zfs_log_begin_msg \
+                               "Importing pool '${pool}' using cachefile."
+
+                       ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
+                       ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
+                       ZFS_ERROR="$?"
+               fi
+
+               if [ "${ZFS_ERROR}" != 0 ]
+               then
+                       [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+                       disable_plymouth
+                       echo ""
+                       echo "Command: ${ZFS_CMD} '$pool'"
+                       echo "Message: $ZFS_STDERR"
+                       echo "Error: $ZFS_ERROR"
+                       echo ""
+                       echo "Failed to import pool '$pool'."
+                       echo "Manually import the pool and exit."
+                       shell
+               fi
+       fi
+
+       [ "$quiet" != "y" ] && zfs_log_end_msg
+
+       POOL_IMPORTED=1
+       return 0
+}
+
+# Load ZFS modules
+# Loading a module in a initrd require a slightly different approach,
+# with more logging etc.
+load_module_initrd()
+{
+       if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
+       then
+               if [ "$quiet" != "y" ]; then
+                       zfs_log_begin_msg "Sleeping for" \
+                               "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
+               fi
+               sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
+       if type wait_for_udev > /dev/null 2>&1 ; then
+               wait_for_udev 10
+       elif type wait_for_dev > /dev/null 2>&1 ; then
+               wait_for_dev
+       fi
+
+       # zpool import refuse to import without a valid /proc/self/mounts
+       [ ! -f /proc/self/mounts ] && mount proc /proc
+
+       # Load the module
+       load_module "zfs" || return 1
+
+       if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
+       then
+               if [ "$quiet" != "y" ]; then
+                       zfs_log_begin_msg "Sleeping for" \
+                               "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
+               fi
+               sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Mount a given filesystem
+mount_fs()
+{
+       local fs="$1"
+       local mountpoint
+
+       # Check that the filesystem exists
+       "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
+       [ "$?" -ne 0 ] && return 1
+
+       # Skip filesystems with canmount=off.  The root fs should not have
+       # canmount=off, but ignore it for backwards compatibility just in case.
+       if [ "$fs" != "${ZFS_BOOTFS}" ]
+       then
+               canmount=$(get_fs_value "$fs" canmount)
+               [ "$canmount" = "off" ] && return 0
+       fi
+
+       # Need the _original_ datasets mountpoint!
+       mountpoint=$(get_fs_value "$fs" mountpoint)
+       if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
+               # Can't use the mountpoint property. Might be one of our
+               # clones. Check the 'org.zol:mountpoint' property set in
+               # clone_snap() if that's usable.
+               mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
+               if [ "$mountpoint" = "legacy" -o \
+                   "$mountpoint" = "none" -o \
+                   "$mountpoint" = "-" ]
+               then
+                       if [ "$fs" != "${ZFS_BOOTFS}" ]; then
+                               # We don't have a proper mountpoint and this
+                               # isn't the root fs.
+                               return 0
+                       else
+                               # Last hail-mary: Hope 'rootmnt' is set!
+                               mountpoint=""
+                       fi
+               fi
+
+               if [ "$mountpoint" = "legacy" ]; then
+                       ZFS_CMD="mount -t zfs"
+               else
+                       # If it's not a legacy filesystem, it can only be a
+                       # native one...
+                       ZFS_CMD="mount -o zfsutil -t zfs"
+               fi
+       else
+               ZFS_CMD="mount -o zfsutil -t zfs"
+       fi
+
+       # Possibly decrypt a filesystem using native encryption.
+       decrypt_fs "$fs"
+
+       [ "$quiet" != "y" ] && \
+           zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
+       [ -n "${ZFS_DEBUG}" ] && \
+           zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
+
+       ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
+       ZFS_ERROR=$?
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
+               echo "Manually mount the filesystem and exit."
+               shell
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Unlock a ZFS native encrypted filesystem.
+decrypt_fs()
+{
+       local fs="$1"
+       
+       # If pool encryption is active and the zfs command understands '-o encryption'
+       if [ "$(zpool list -H -o feature@encryption $(echo "${fs}" | awk -F\/ '{print $1}'))" = 'active' ]; then
+
+               # Determine dataset that holds key for root dataset
+               ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
+               KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
+
+               # If root dataset is encrypted...
+               if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
+                       KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
+                       # Continue only if the key needs to be loaded
+                       [ "$KEYSTATUS" = "unavailable" ] || return 0
+                       TRY_COUNT=3
+
+                       # If key is stored in a file, do not prompt
+                       if ! [ "${KEYLOCATION}" = "prompt" ]; then
+                               $ZFS load-key "${ENCRYPTIONROOT}"
+
+                       # Prompt with plymouth, if active
+                       elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
+                               while [ $TRY_COUNT -gt 0 ]; do
+                                       plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
+                                               $ZFS load-key "${ENCRYPTIONROOT}" && break
+                                       TRY_COUNT=$((TRY_COUNT - 1))
+                               done
+
+                       # Prompt with systemd, if active 
+                       elif [ -e /run/systemd/system ]; then
+                               while [ $TRY_COUNT -gt 0 ]; do
+                                       systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
+                                               $ZFS load-key "${ENCRYPTIONROOT}" && break
+                                       TRY_COUNT=$((TRY_COUNT - 1))
+                               done
+
+                       # Prompt with ZFS tty, otherwise
+                       else
+                               # Setting "printk" temporarily to "7" will allow prompt even if kernel option "quiet"
+                               storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
+                               echo 7 > /proc/sys/kernel/printk
+                               $ZFS load-key "${ENCRYPTIONROOT}"
+                               echo "$storeprintk" > /proc/sys/kernel/printk
+                       fi
+               fi
+       fi
+
+       return 0
+}
+
+# Destroy a given filesystem.
+destroy_fs()
+{
+       local fs="$1"
+
+       [ "$quiet" != "y" ] && \
+           zfs_log_begin_msg "Destroying '$fs'"
+
+       ZFS_CMD="${ZFS} destroy $fs"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
+               echo "Hint: Try:  zfs destroy -Rfn $fs"
+               echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
+               shell
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Clone snapshot $1 to destination filesystem $2
+# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
+# manual control over it's mounting (i.e., make sure it's not automatically
+# mounted with a 'zfs mount -a' in the init/systemd scripts).
+clone_snap()
+{
+       local snap="$1"
+       local destfs="$2"
+       local mountpoint="$3"
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
+
+       # Clone the snapshot into a dataset we can boot from
+       # + We don't want this filesystem to be automatically mounted, we
+       #   want control over this here and nowhere else.
+       # + We don't need any mountpoint set for the same reason.
+       # We use the 'org.zol:mountpoint' property to remember the mountpoint.
+       ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
+       ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
+       ZFS_CMD="${ZFS_CMD} $snap $destfs"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to clone snapshot."
+               echo "Make sure that the any problems are corrected and then make sure"
+               echo "that the dataset '$destfs' exists and is bootable."
+               shell
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Rollback a given snapshot.
+rollback_snap()
+{
+       local snap="$1"
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
+
+       ZFS_CMD="${ZFS} rollback -Rf $snap"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to rollback snapshot."
+               shell
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Get a list of snapshots, give them as a numbered list
+# to the user to choose from.
+ask_user_snap()
+{
+       local fs="$1"
+       local i=1
+       local SNAP snapnr snap debug
+
+       # We need to temporarily disable debugging. Set 'debug' so we
+       # remember to enabled it again.
+       if [ -n "${ZFS_DEBUG}" ]; then
+               unset ZFS_DEBUG
+               set +x
+               debug=1
+       fi
+
+       # Because we need the resulting snapshot, which is sent on
+       # stdout to the caller, we use stderr for our questions.
+       echo "What snapshot do you want to boot from?" > /dev/stderr
+       while read snap; do
+           echo "  $i: ${snap}" > /dev/stderr
+           eval `echo SNAP_$i=$snap`
+           i=$((i + 1))
+       done <<EOT
+$("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
+EOT
+
+       echo -n "  Snap nr [1-$((i-1))]? " > /dev/stderr
+       read snapnr
+
+       # Re-enable debugging.
+       if [ -n "${debug}" ]; then
+               ZFS_DEBUG=1
+               set -x
+       fi
+
+       echo "$(eval echo "$"SNAP_$snapnr)"
+}
+
+setup_snapshot_booting()
+{
+       local snap="$1"
+       local s destfs subfs mountpoint retval=0 filesystems fs
+
+       # Make sure that the snapshot specified actually exist.
+       if [ ! $(get_fs_value "${snap}" type) ]
+       then
+               # Snapshot does not exist (...@<null> ?)
+               # ask the user for a snapshot to use.
+               snap="$(ask_user_snap "${snap%%@*}")"
+       fi
+
+       # Separate the full snapshot ('$snap') into it's filesystem and
+       # snapshot names. Would have been nice with a split() function..
+       rootfs="${snap%%@*}"
+       snapname="${snap##*@}"
+       ZFS_BOOTFS="${rootfs}_${snapname}"
+
+       if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               # If the destination dataset for the clone
+               # already exists, destroy it. Recursively
+               if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
+                       filesystems=$("${ZFS}" list -oname -tfilesystem -H \
+                           -r -Sname "${ZFS_BOOTFS}")
+                       for fs in $filesystems; do
+                               destroy_fs "${fs}"
+                       done
+               fi
+       fi
+
+       # Get all snapshots, recursively (might need to clone /usr, /var etc
+       # as well).
+       for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
+           grep "${snapname}")
+       do
+               if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
+               then
+                       # Rollback snapshot
+                       rollback_snap "$s" || retval=$((retval + 1))
+               else
+                       # Setup a destination filesystem name.
+                       # Ex: Called with 'rpool/ROOT/debian@snap2'
+                       #       rpool/ROOT/debian@snap2         => rpool/ROOT/debian_snap2
+                       #       rpool/ROOT/debian/boot@snap2    => rpool/ROOT/debian_snap2/boot
+                       #       rpool/ROOT/debian/usr@snap2     => rpool/ROOT/debian_snap2/usr
+                       #       rpool/ROOT/debian/var@snap2     => rpool/ROOT/debian_snap2/var
+                       subfs="${s##$rootfs}"
+                       subfs="${subfs%%@$snapname}"
+
+                       destfs="${rootfs}_${snapname}" # base fs.
+                       [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
+
+                       # Get the mountpoint of the filesystem, to be used
+                       # with clone_snap(). If legacy or none, then use
+                       # the sub fs value.
+                       mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
+                       if [ "$mountpoint" = "legacy" -o \
+                           "$mountpoint" = "none" ]
+                       then
+                               if [ -n "${subfs}" ]; then
+                                       mountpoint="${subfs}"
+                               else
+                                       mountpoint="/"
+                               fi
+                       fi
+
+                       # Clone the snapshot into its own
+                       # filesystem
+                       clone_snap "$s" "${destfs}" "${mountpoint}" || \
+                           retval=$((retval + 1))
+               fi
+       done
+
+       # If we haven't return yet, we have a problem...
+       return "${retval}"
+}
+
+# ================================================================
+
+# This is the main function.
+mountroot()
+{
+       local snaporig snapsub destfs pool POOLS
+
+       # ----------------------------------------------------------------
+       # I N I T I A L   S E T U P
+
+       # ------------
+       # Run the pre-mount scripts from /scripts/local-top.
+       pre_mountroot
+
+       # ------------
+       # Source the default setup variables.
+       [ -r '/etc/default/zfs' ] && . /etc/default/zfs
+
+       # ------------
+       # Support debug option
+       if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               ZFS_DEBUG=1
+               mkdir /var/log
+               #exec 2> /var/log/boot.debug
+               set -x
+       fi
+
+       # ------------
+       # Load ZFS module etc.
+       if ! load_module_initrd; then
+               disable_plymouth
+               echo ""
+               echo "Failed to load ZFS modules."
+               echo "Manually load the modules and exit."
+               shell
+       fi
+
+       # ------------
+       # Look for the cache file (if any).
+       [ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
+
+       # ------------
+       # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
+       #                'root' is for Redhat/Fedora (etc),
+       #                'REAL_ROOT' is for Gentoo
+       if [ -z "$ROOT" ]
+       then
+               [ -n "$root" ] && ROOT=${root}
+
+               [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
+       fi
+
+       # ------------
+       # Where to mount the root fs in the initrd - set outside this script
+       # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
+       #                'NEWROOT' is for RedHat/Fedora (etc),
+       #                'NEW_ROOT' is for Gentoo
+       if [ -z "$rootmnt" ]
+       then
+               [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
+
+               [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
+       fi
+
+       # ------------
+       # No longer set in the defaults file, but it could have been set in
+       # get_pools() in some circumstances. If it's something, but not 'yes',
+       # it's no good to us.
+       [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
+           unset USE_DISK_BY_ID
+
+       # ----------------------------------------------------------------
+       # P A R S E   C O M M A N D   L I N E   O P T I O N S
+
+       # This part is the really ugly part - there's so many options and permutations
+       # 'out there', and if we should make this the 'primary' source for ZFS initrd
+       # scripting, we need/should support them all.
+       #
+       # Supports the following kernel command line argument combinations
+       # (in this order - first match win):
+       #
+       #       rpool=<pool>                    (tries to finds bootfs automatically)
+       #       bootfs=<pool>/<dataset>         (uses this for rpool - first part)
+       #       rpool=<pool> bootfs=<pool>/<dataset>
+       #       -B zfs-bootfs=<pool>/<fs>       (uses this for rpool - first part)
+       #       rpool=rpool                     (default if none of the above is used)
+       #       root=<pool>/<dataset>           (uses this for rpool - first part)
+       #       root=ZFS=<pool>/<dataset>       (uses this for rpool - first part, without 'ZFS=')
+       #       root=zfs:AUTO                   (tries to detect both pool and rootfs
+       #       root=zfs:<pool>/<dataset>       (uses this for rpool - first part, without 'zfs:')
+       #
+       # Option <dataset> could also be <snapshot>
+       # Option <pool> could also be <guid>
+
+       # ------------
+       # Support force option
+       # In addition, setting one of zfs_force, zfs.force or zfsforce to
+       # 'yes', 'on' or '1' will make sure we force import the pool.
+       # This should (almost) never be needed, but it's here for
+       # completeness.
+       ZPOOL_FORCE=""
+       if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               ZPOOL_FORCE="-f"
+       fi
+
+       # ------------
+       # Look for 'rpool' and 'bootfs' parameter
+       [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
+       [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
+
+       # ------------
+       # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
+       # 'ROOT'
+       [ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
+
+       # ------------
+       # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
+       # NOTE: Only use the pool name and dataset. The rest is not
+       #       supported by ZoL (whatever it's for).
+       if [ -z "$ZFS_RPOOL" ]
+       then
+               # The ${zfs-bootfs} variable is set at the kernel command
+               # line, usually by GRUB, but it cannot be referenced here
+               # directly because bourne variable names cannot contain a
+               # hyphen.
+               #
+               # Reassign the variable by dumping the environment and
+               # stripping the zfs-bootfs= prefix.  Let the shell handle
+               # quoting through the eval command.
+               eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
+       fi
+
+       # ------------
+       # No root fs or pool specified - do auto detect.
+       if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
+       then
+               # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
+               # which will be caught later
+               ROOT=zfs:AUTO
+       fi
+
+       # ----------------------------------------------------------------
+       # F I N D   A N D   I M P O R T   C O R R E C T   P O O L
+
+       # ------------
+       if [ "$ROOT" = "zfs:AUTO" ]
+       then
+               # Try to detect both pool and root fs.
+
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Attempting to import additional pools."
+
+               # Get a list of pools available for import
+               if [ -n "$ZFS_RPOOL" ]
+               then
+                       # We've specified a pool - check only that
+                       POOLS=$ZFS_RPOOL
+               else
+                       POOLS=$(get_pools)
+               fi
+
+               OLD_IFS="$IFS" ; IFS=";"
+               for pool in $POOLS
+               do
+                       [ -z "$pool" ] && continue
+
+                       import_pool "$pool"
+                       find_rootfs "$pool"
+               done
+               IFS="$OLD_IFS"
+
+               [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
+       else
+               # No auto - use value from the command line option.
+
+               # Strip 'zfs:' and 'ZFS='.
+               ZFS_BOOTFS="${ROOT#*[:=]}"
+
+               # Strip everything after the first slash.
+               ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
+       fi
+
+       # Import the pool (if not already done so in the AUTO check above).
+       if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
+
+               import_pool "${ZFS_RPOOL}"
+               find_rootfs "${ZFS_RPOOL}"
+
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       if [ -z "${POOL_IMPORTED}" ]
+       then
+               # No pool imported, this is serious!
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "No pool imported. Manually import the root pool"
+               echo "at the command prompt and then exit."
+               echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
+               shell
+       fi
+
+       # In case the pool was specified as guid, resolve guid to name
+       pool="$("${ZPOOL}" get name,guid -o name,value -H | \
+           awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
+       if [ -n "$pool" ]; then
+               # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
+               ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
+                       sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
+               ZFS_RPOOL="${pool}"
+       fi
+
+       # Set the no-op scheduler on the disks containing the vdevs of
+       # the root pool. For single-queue devices, this scheduler is
+       # "noop", for multi-queue devices, it is "none".
+       # ZFS already does this for wholedisk vdevs (for all pools), so this
+       # is only important for partitions.
+       "${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
+           awk '/^\t / && !/(mirror|raidz)/ {
+               dev=$1;
+               sub(/[0-9]+$/, "", dev);
+               print dev
+           }' |
+       while read -r i
+       do
+               SCHEDULER=/sys/block/$i/queue/scheduler
+               if [ -e "${SCHEDULER}" ]
+               then
+                       # Query to see what schedulers are available
+                       case "$(cat "${SCHEDULER}")" in
+                               *noop*) echo noop > "${SCHEDULER}" ;;
+                               *none*) echo none > "${SCHEDULER}" ;;
+                       esac
+               fi
+       done
+
+
+       # ----------------------------------------------------------------
+       # P R E P A R E   R O O T   F I L E S Y S T E M
+
+       if [ -n "${ZFS_BOOTFS}" ]
+       then
+               # Booting from a snapshot?
+               # Will overwrite the ZFS_BOOTFS variable like so:
+               #   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
+               echo "${ZFS_BOOTFS}" | grep -q '@' && \
+                   setup_snapshot_booting "${ZFS_BOOTFS}"
+       fi
+
+       if [ -z "${ZFS_BOOTFS}" ]
+       then
+               # Still nothing! Let the user sort this out.
+               disable_plymouth
+               echo ""
+               echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
+               echo "       not specified on the kernel command line."
+               echo ""
+               echo "Manually mount the root filesystem on $rootmnt and then exit."
+               echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
+               shell
+       fi
+
+       # ----------------------------------------------------------------
+       # M O U N T   F I L E S Y S T E M S
+
+       # * Ideally, the root filesystem would be mounted like this:
+       #
+       #     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
+       #     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
+       #
+       #   but the MOUNTPOINT prefix is preserved on descendent filesystem
+       #   after the pivot into the regular root, which later breaks things
+       #   like `zfs mount -a` and the /proc/self/mounts refresh.
+       #
+       # * Mount additional filesystems required
+       #   Such as /usr, /var, /usr/local etc.
+       #   NOTE: Mounted in the order specified in the
+       #         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
+
+       # Go through the complete list (recursively) of all filesystems below
+       # the real root dataset
+       filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
+       for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
+       do
+               mount_fs "$fs"
+       done
+
+       # ------------
+       # Debugging information
+       if [ -n "${ZFS_DEBUG}" ]
+       then
+               #exec 2>&1-
+
+               echo "DEBUG: imported pools:"
+               "${ZPOOL}" list -H
+               echo
+
+               echo "DEBUG: mounted ZFS filesystems:"
+               mount | grep zfs
+               echo
+
+               echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
+               echo -n "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
+               read b
+
+               [ "$b" = "c" ] && /bin/sh
+               [ "$b" = "r" ] && reboot -f
+
+               set +x
+       fi
+
+       # ------------
+       # Run local bottom script
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-bottom"
+               run_scripts /scripts/local-bottom
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+}
diff --git a/contrib/initramfs/scripts/zfs.in b/contrib/initramfs/scripts/zfs.in
deleted file mode 100644 (file)
index 4bbdf53..0000000
+++ /dev/null
@@ -1,1009 +0,0 @@
-# ZFS boot stub for initramfs-tools.
-#
-# In the initramfs environment, the /init script sources this stub to
-# override the default functions in the /scripts/local script.
-#
-# Enable this by passing boot=zfs on the kernel command line.
-#
-
-# Source the common init script
-. /etc/zfs/zfs-functions
-
-# Paths to what we need - in the initrd, these paths are hardcoded,
-# so override the defines in zfs-functions.
-ZFS="@sbindir@/zfs"
-ZPOOL="@sbindir@/zpool"
-ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
-export ZFS ZPOOL ZPOOL_CACHE
-
-
-# Start interactive shell.
-# Use debian's panic() if defined, because it allows to prevent shell access
-# by setting panic in cmdline (e.g. panic=0 or panic=15).
-# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
-# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
-shell() {
-       if type panic > /dev/null 2>&1; then
-               panic $@
-       else
-               /bin/sh
-       fi
-}
-
-# This runs any scripts that should run before we start importing
-# pools and mounting any filesystems.
-pre_mountroot()
-{
-       if type run_scripts > /dev/null 2>&1 && \
-           [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
-       then
-               [ "$quiet" != "y" ] && \
-                   zfs_log_begin_msg "Running /scripts/local-top"
-               run_scripts /scripts/local-top
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       if type run_scripts > /dev/null 2>&1 && \
-           [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
-       then
-               [ "$quiet" != "y" ] && \
-                   zfs_log_begin_msg "Running /scripts/local-premount"
-               run_scripts /scripts/local-premount
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-}
-
-# If plymouth is available, hide the splash image.
-disable_plymouth()
-{
-       if [ -x /bin/plymouth ] && /bin/plymouth --ping
-       then
-               /bin/plymouth hide-splash >/dev/null 2>&1
-       fi
-}
-
-# Get a ZFS filesystem property value.
-get_fs_value()
-{
-       local fs="$1"
-       local value=$2
-
-       "${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
-}
-
-# Find the 'bootfs' property on pool $1.
-# If the property does not contain '/', then ignore this
-# pool by exporting it again.
-find_rootfs()
-{
-       local pool="$1"
-
-       # If 'POOL_IMPORTED' isn't set, no pool imported and therefore
-       # we won't be able to find a root fs.
-       [ -z "${POOL_IMPORTED}" ] && return 1
-
-       # If it's already specified, just keep it mounted and exit
-       # User (kernel command line) must be correct.
-       [ -n "${ZFS_BOOTFS}" ] && return 0
-
-       # Not set, try to find it in the 'bootfs' property of the pool.
-       # NOTE: zpool does not support 'get -H -ovalue bootfs'...
-       ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
-
-       # Make sure it's not '-' and that it starts with /.
-       if [ "${ZFS_BOOTFS}" != "-" ] && \
-               $(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
-       then
-               # Keep it mounted
-               POOL_IMPORTED=1
-               return 0
-       fi
-
-       # Not boot fs here, export it and later try again..
-       "${ZPOOL}" export "$pool"
-       POOL_IMPORTED=""
-
-       return 1
-}
-
-# Support function to get a list of all pools, separated with ';'
-find_pools()
-{
-       local CMD="$*"
-       local pools pool
-
-       pools=$($CMD 2> /dev/null | \
-               grep -E "pool:|^[a-zA-Z0-9]" | \
-               sed 's@.*: @@' | \
-               while read pool; do \
-                   echo -n "$pool;"
-               done)
-
-       echo "${pools%%;}" # Return without the last ';'.
-}
-
-# Get a list of all available pools
-get_pools()
-{
-       local available_pools npools
-
-       if [ -n "${ZFS_POOL_IMPORT}" ]; then
-               echo "$ZFS_POOL_IMPORT"
-               return 0
-       fi
-
-       # Get the base list of available pools.
-       available_pools=$(find_pools "$ZPOOL" import)
-
-       # Just in case - seen it happen (that a pool isn't visible/found
-       # with a simple "zpool import" but only when using the "-d"
-       # option or setting ZPOOL_IMPORT_PATH).
-       if [ -d "/dev/disk/by-id" ]
-       then
-               npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
-               if [ -n "$npools" ]
-               then
-                       # Because we have found extra pool(s) here, which wasn't
-                       # found 'normally', we need to force USE_DISK_BY_ID to
-                       # make sure we're able to actually import it/them later.
-                       USE_DISK_BY_ID='yes'
-
-                       if [ -n "$available_pools" ]
-                       then
-                               # Filter out duplicates (pools found with the simple
-                               # "zpool import" but which is also found with the
-                               # "zpool import -d ...").
-                               npools=$(echo "$npools" | sed "s,$available_pools,,")
-
-                               # Add the list to the existing list of
-                               # available pools
-                               available_pools="$available_pools;$npools"
-                       else
-                               available_pools="$npools"
-                       fi
-               fi
-       fi
-
-       # Filter out any exceptions...
-       if [ -n "$ZFS_POOL_EXCEPTIONS" ]
-       then
-               local found=""
-               local apools=""
-               local pool exception
-               OLD_IFS="$IFS" ; IFS=";"
-
-               for pool in $available_pools
-               do
-                       for exception in $ZFS_POOL_EXCEPTIONS
-                       do
-                               [ "$pool" = "$exception" ] && continue 2
-                               found="$pool"
-                       done
-
-                       if [ -n "$found" ]
-                       then
-                               if [ -n "$apools" ]
-                               then
-                                       apools="$apools;$pool"
-                               else
-                                       apools="$pool"
-                               fi
-                       fi
-               done
-
-               IFS="$OLD_IFS"
-               available_pools="$apools"
-       fi
-
-       # Return list of available pools.
-       echo "$available_pools"
-}
-
-# Import given pool $1
-import_pool()
-{
-       local pool="$1"
-       local dirs dir
-
-       # Verify that the pool isn't already imported
-       # Make as sure as we can to not require '-f' to import.
-       "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
-
-       # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
-       # to something we can use later with the real import(s). We want to
-       # make sure we find all by* dirs, BUT by-vdev should be first (if it
-       # exists).
-       if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
-       then
-               dirs="$(for dir in $(echo /dev/disk/by-*)
-               do
-                       # Ignore by-vdev here - we want it first!
-                       echo "$dir" | grep -q /by-vdev && continue
-                       [ ! -d "$dir" ] && continue
-
-                       echo -n "$dir:"
-               done | sed 's,:$,,g')"
-
-               if [ -d "/dev/disk/by-vdev" ]
-               then
-                       # Add by-vdev at the beginning.
-                       ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
-               fi
-
-               # ... and /dev at the very end, just for good measure.
-               ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
-       fi
-
-       # Needs to be exported for "zpool" to catch it.
-       [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
-
-
-       [ "$quiet" != "y" ] && zfs_log_begin_msg \
-               "Importing pool '${pool}' using defaults"
-
-       ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
-       ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
-       ZFS_ERROR="$?"
-       if [ "${ZFS_ERROR}" != 0 ]
-       then
-               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-               if [ -f "${ZPOOL_CACHE}" ]
-               then
-                       [ "$quiet" != "y" ] && zfs_log_begin_msg \
-                               "Importing pool '${pool}' using cachefile."
-
-                       ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
-                       ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
-                       ZFS_ERROR="$?"
-               fi
-
-               if [ "${ZFS_ERROR}" != 0 ]
-               then
-                       [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-                       disable_plymouth
-                       echo ""
-                       echo "Command: ${ZFS_CMD} '$pool'"
-                       echo "Message: $ZFS_STDERR"
-                       echo "Error: $ZFS_ERROR"
-                       echo ""
-                       echo "Failed to import pool '$pool'."
-                       echo "Manually import the pool and exit."
-                       shell
-               fi
-       fi
-
-       [ "$quiet" != "y" ] && zfs_log_end_msg
-
-       POOL_IMPORTED=1
-       return 0
-}
-
-# Load ZFS modules
-# Loading a module in a initrd require a slightly different approach,
-# with more logging etc.
-load_module_initrd()
-{
-       if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
-       then
-               if [ "$quiet" != "y" ]; then
-                       zfs_log_begin_msg "Sleeping for" \
-                               "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
-               fi
-               sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
-       if type wait_for_udev > /dev/null 2>&1 ; then
-               wait_for_udev 10
-       elif type wait_for_dev > /dev/null 2>&1 ; then
-               wait_for_dev
-       fi
-
-       # zpool import refuse to import without a valid /proc/self/mounts
-       [ ! -f /proc/self/mounts ] && mount proc /proc
-
-       # Load the module
-       load_module "zfs" || return 1
-
-       if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
-       then
-               if [ "$quiet" != "y" ]; then
-                       zfs_log_begin_msg "Sleeping for" \
-                               "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
-               fi
-               sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       return 0
-}
-
-# Mount a given filesystem
-mount_fs()
-{
-       local fs="$1"
-       local mountpoint
-
-       # Check that the filesystem exists
-       "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
-       [ "$?" -ne 0 ] && return 1
-
-       # Skip filesystems with canmount=off.  The root fs should not have
-       # canmount=off, but ignore it for backwards compatibility just in case.
-       if [ "$fs" != "${ZFS_BOOTFS}" ]
-       then
-               canmount=$(get_fs_value "$fs" canmount)
-               [ "$canmount" = "off" ] && return 0
-       fi
-
-       # Need the _original_ datasets mountpoint!
-       mountpoint=$(get_fs_value "$fs" mountpoint)
-       if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
-               # Can't use the mountpoint property. Might be one of our
-               # clones. Check the 'org.zol:mountpoint' property set in
-               # clone_snap() if that's usable.
-               mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
-               if [ "$mountpoint" = "legacy" -o \
-                   "$mountpoint" = "none" -o \
-                   "$mountpoint" = "-" ]
-               then
-                       if [ "$fs" != "${ZFS_BOOTFS}" ]; then
-                               # We don't have a proper mountpoint and this
-                               # isn't the root fs.
-                               return 0
-                       else
-                               # Last hail-mary: Hope 'rootmnt' is set!
-                               mountpoint=""
-                       fi
-               fi
-
-               if [ "$mountpoint" = "legacy" ]; then
-                       ZFS_CMD="mount -t zfs"
-               else
-                       # If it's not a legacy filesystem, it can only be a
-                       # native one...
-                       ZFS_CMD="mount -o zfsutil -t zfs"
-               fi
-       else
-               ZFS_CMD="mount -o zfsutil -t zfs"
-       fi
-
-       # Possibly decrypt a filesystem using native encryption.
-       decrypt_fs "$fs"
-
-       [ "$quiet" != "y" ] && \
-           zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
-       [ -n "${ZFS_DEBUG}" ] && \
-           zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
-
-       ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
-       ZFS_ERROR=$?
-       if [ "${ZFS_ERROR}" != 0 ]
-       then
-               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-               disable_plymouth
-               echo ""
-               echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
-               echo "Message: $ZFS_STDERR"
-               echo "Error: $ZFS_ERROR"
-               echo ""
-               echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
-               echo "Manually mount the filesystem and exit."
-               shell
-       else
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       return 0
-}
-
-# Unlock a ZFS native encrypted filesystem.
-decrypt_fs()
-{
-       local fs="$1"
-       
-       # If pool encryption is active and the zfs command understands '-o encryption'
-       if [ "$(zpool list -H -o feature@encryption $(echo "${fs}" | awk -F\/ '{print $1}'))" = 'active' ]; then
-
-               # Determine dataset that holds key for root dataset
-               ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
-               KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
-
-               # If root dataset is encrypted...
-               if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
-                       KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
-                       # Continue only if the key needs to be loaded
-                       [ "$KEYSTATUS" = "unavailable" ] || return 0
-                       TRY_COUNT=3
-
-                       # If key is stored in a file, do not prompt
-                       if ! [ "${KEYLOCATION}" = "prompt" ]; then
-                               $ZFS load-key "${ENCRYPTIONROOT}"
-
-                       # Prompt with plymouth, if active
-                       elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
-                               while [ $TRY_COUNT -gt 0 ]; do
-                                       plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
-                                               $ZFS load-key "${ENCRYPTIONROOT}" && break
-                                       TRY_COUNT=$((TRY_COUNT - 1))
-                               done
-
-                       # Prompt with systemd, if active 
-                       elif [ -e /run/systemd/system ]; then
-                               while [ $TRY_COUNT -gt 0 ]; do
-                                       systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
-                                               $ZFS load-key "${ENCRYPTIONROOT}" && break
-                                       TRY_COUNT=$((TRY_COUNT - 1))
-                               done
-
-                       # Prompt with ZFS tty, otherwise
-                       else
-                               # Setting "printk" temporarily to "7" will allow prompt even if kernel option "quiet"
-                               storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
-                               echo 7 > /proc/sys/kernel/printk
-                               $ZFS load-key "${ENCRYPTIONROOT}"
-                               echo "$storeprintk" > /proc/sys/kernel/printk
-                       fi
-               fi
-       fi
-
-       return 0
-}
-
-# Destroy a given filesystem.
-destroy_fs()
-{
-       local fs="$1"
-
-       [ "$quiet" != "y" ] && \
-           zfs_log_begin_msg "Destroying '$fs'"
-
-       ZFS_CMD="${ZFS} destroy $fs"
-       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
-       ZFS_ERROR="$?"
-       if [ "${ZFS_ERROR}" != 0 ]
-       then
-               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-               disable_plymouth
-               echo ""
-               echo "Command: $ZFS_CMD"
-               echo "Message: $ZFS_STDERR"
-               echo "Error: $ZFS_ERROR"
-               echo ""
-               echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
-               echo "Hint: Try:  zfs destroy -Rfn $fs"
-               echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
-               shell
-       else
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       return 0
-}
-
-# Clone snapshot $1 to destination filesystem $2
-# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
-# manual control over it's mounting (i.e., make sure it's not automatically
-# mounted with a 'zfs mount -a' in the init/systemd scripts).
-clone_snap()
-{
-       local snap="$1"
-       local destfs="$2"
-       local mountpoint="$3"
-
-       [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
-
-       # Clone the snapshot into a dataset we can boot from
-       # + We don't want this filesystem to be automatically mounted, we
-       #   want control over this here and nowhere else.
-       # + We don't need any mountpoint set for the same reason.
-       # We use the 'org.zol:mountpoint' property to remember the mountpoint.
-       ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
-       ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
-       ZFS_CMD="${ZFS_CMD} $snap $destfs"
-       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
-       ZFS_ERROR="$?"
-       if [ "${ZFS_ERROR}" != 0 ]
-       then
-               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-               disable_plymouth
-               echo ""
-               echo "Command: $ZFS_CMD"
-               echo "Message: $ZFS_STDERR"
-               echo "Error: $ZFS_ERROR"
-               echo ""
-               echo "Failed to clone snapshot."
-               echo "Make sure that the any problems are corrected and then make sure"
-               echo "that the dataset '$destfs' exists and is bootable."
-               shell
-       else
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       return 0
-}
-
-# Rollback a given snapshot.
-rollback_snap()
-{
-       local snap="$1"
-
-       [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
-
-       ZFS_CMD="${ZFS} rollback -Rf $snap"
-       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
-       ZFS_ERROR="$?"
-       if [ "${ZFS_ERROR}" != 0 ]
-       then
-               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
-
-               disable_plymouth
-               echo ""
-               echo "Command: $ZFS_CMD"
-               echo "Message: $ZFS_STDERR"
-               echo "Error: $ZFS_ERROR"
-               echo ""
-               echo "Failed to rollback snapshot."
-               shell
-       else
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       return 0
-}
-
-# Get a list of snapshots, give them as a numbered list
-# to the user to choose from.
-ask_user_snap()
-{
-       local fs="$1"
-       local i=1
-       local SNAP snapnr snap debug
-
-       # We need to temporarily disable debugging. Set 'debug' so we
-       # remember to enabled it again.
-       if [ -n "${ZFS_DEBUG}" ]; then
-               unset ZFS_DEBUG
-               set +x
-               debug=1
-       fi
-
-       # Because we need the resulting snapshot, which is sent on
-       # stdout to the caller, we use stderr for our questions.
-       echo "What snapshot do you want to boot from?" > /dev/stderr
-       while read snap; do
-           echo "  $i: ${snap}" > /dev/stderr
-           eval `echo SNAP_$i=$snap`
-           i=$((i + 1))
-       done <<EOT
-$("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
-EOT
-
-       echo -n "  Snap nr [1-$((i-1))]? " > /dev/stderr
-       read snapnr
-
-       # Re-enable debugging.
-       if [ -n "${debug}" ]; then
-               ZFS_DEBUG=1
-               set -x
-       fi
-
-       echo "$(eval echo "$"SNAP_$snapnr)"
-}
-
-setup_snapshot_booting()
-{
-       local snap="$1"
-       local s destfs subfs mountpoint retval=0 filesystems fs
-
-       # Make sure that the snapshot specified actually exist.
-       if [ ! $(get_fs_value "${snap}" type) ]
-       then
-               # Snapshot does not exist (...@<null> ?)
-               # ask the user for a snapshot to use.
-               snap="$(ask_user_snap "${snap%%@*}")"
-       fi
-
-       # Separate the full snapshot ('$snap') into it's filesystem and
-       # snapshot names. Would have been nice with a split() function..
-       rootfs="${snap%%@*}"
-       snapname="${snap##*@}"
-       ZFS_BOOTFS="${rootfs}_${snapname}"
-
-       if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
-       then
-               # If the destination dataset for the clone
-               # already exists, destroy it. Recursively
-               if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
-                       filesystems=$("${ZFS}" list -oname -tfilesystem -H \
-                           -r -Sname "${ZFS_BOOTFS}")
-                       for fs in $filesystems; do
-                               destroy_fs "${fs}"
-                       done
-               fi
-       fi
-
-       # Get all snapshots, recursively (might need to clone /usr, /var etc
-       # as well).
-       for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
-           grep "${snapname}")
-       do
-               if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
-               then
-                       # Rollback snapshot
-                       rollback_snap "$s" || retval=$((retval + 1))
-               else
-                       # Setup a destination filesystem name.
-                       # Ex: Called with 'rpool/ROOT/debian@snap2'
-                       #       rpool/ROOT/debian@snap2         => rpool/ROOT/debian_snap2
-                       #       rpool/ROOT/debian/boot@snap2    => rpool/ROOT/debian_snap2/boot
-                       #       rpool/ROOT/debian/usr@snap2     => rpool/ROOT/debian_snap2/usr
-                       #       rpool/ROOT/debian/var@snap2     => rpool/ROOT/debian_snap2/var
-                       subfs="${s##$rootfs}"
-                       subfs="${subfs%%@$snapname}"
-
-                       destfs="${rootfs}_${snapname}" # base fs.
-                       [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
-
-                       # Get the mountpoint of the filesystem, to be used
-                       # with clone_snap(). If legacy or none, then use
-                       # the sub fs value.
-                       mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
-                       if [ "$mountpoint" = "legacy" -o \
-                           "$mountpoint" = "none" ]
-                       then
-                               if [ -n "${subfs}" ]; then
-                                       mountpoint="${subfs}"
-                               else
-                                       mountpoint="/"
-                               fi
-                       fi
-
-                       # Clone the snapshot into its own
-                       # filesystem
-                       clone_snap "$s" "${destfs}" "${mountpoint}" || \
-                           retval=$((retval + 1))
-               fi
-       done
-
-       # If we haven't return yet, we have a problem...
-       return "${retval}"
-}
-
-# ================================================================
-
-# This is the main function.
-mountroot()
-{
-       local snaporig snapsub destfs pool POOLS
-
-       # ----------------------------------------------------------------
-       # I N I T I A L   S E T U P
-
-       # ------------
-       # Run the pre-mount scripts from /scripts/local-top.
-       pre_mountroot
-
-       # ------------
-       # Source the default setup variables.
-       [ -r '/etc/default/zfs' ] && . /etc/default/zfs
-
-       # ------------
-       # Support debug option
-       if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
-       then
-               ZFS_DEBUG=1
-               mkdir /var/log
-               #exec 2> /var/log/boot.debug
-               set -x
-       fi
-
-       # ------------
-       # Load ZFS module etc.
-       if ! load_module_initrd; then
-               disable_plymouth
-               echo ""
-               echo "Failed to load ZFS modules."
-               echo "Manually load the modules and exit."
-               shell
-       fi
-
-       # ------------
-       # Look for the cache file (if any).
-       [ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
-
-       # ------------
-       # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
-       #                'root' is for Redhat/Fedora (etc),
-       #                'REAL_ROOT' is for Gentoo
-       if [ -z "$ROOT" ]
-       then
-               [ -n "$root" ] && ROOT=${root}
-
-               [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
-       fi
-
-       # ------------
-       # Where to mount the root fs in the initrd - set outside this script
-       # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
-       #                'NEWROOT' is for RedHat/Fedora (etc),
-       #                'NEW_ROOT' is for Gentoo
-       if [ -z "$rootmnt" ]
-       then
-               [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
-
-               [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
-       fi
-
-       # ------------
-       # No longer set in the defaults file, but it could have been set in
-       # get_pools() in some circumstances. If it's something, but not 'yes',
-       # it's no good to us.
-       [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
-           unset USE_DISK_BY_ID
-
-       # ----------------------------------------------------------------
-       # P A R S E   C O M M A N D   L I N E   O P T I O N S
-
-       # This part is the really ugly part - there's so many options and permutations
-       # 'out there', and if we should make this the 'primary' source for ZFS initrd
-       # scripting, we need/should support them all.
-       #
-       # Supports the following kernel command line argument combinations
-       # (in this order - first match win):
-       #
-       #       rpool=<pool>                    (tries to finds bootfs automatically)
-       #       bootfs=<pool>/<dataset>         (uses this for rpool - first part)
-       #       rpool=<pool> bootfs=<pool>/<dataset>
-       #       -B zfs-bootfs=<pool>/<fs>       (uses this for rpool - first part)
-       #       rpool=rpool                     (default if none of the above is used)
-       #       root=<pool>/<dataset>           (uses this for rpool - first part)
-       #       root=ZFS=<pool>/<dataset>       (uses this for rpool - first part, without 'ZFS=')
-       #       root=zfs:AUTO                   (tries to detect both pool and rootfs
-       #       root=zfs:<pool>/<dataset>       (uses this for rpool - first part, without 'zfs:')
-       #
-       # Option <dataset> could also be <snapshot>
-       # Option <pool> could also be <guid>
-
-       # ------------
-       # Support force option
-       # In addition, setting one of zfs_force, zfs.force or zfsforce to
-       # 'yes', 'on' or '1' will make sure we force import the pool.
-       # This should (almost) never be needed, but it's here for
-       # completeness.
-       ZPOOL_FORCE=""
-       if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
-       then
-               ZPOOL_FORCE="-f"
-       fi
-
-       # ------------
-       # Look for 'rpool' and 'bootfs' parameter
-       [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
-       [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
-
-       # ------------
-       # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
-       # 'ROOT'
-       [ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
-
-       # ------------
-       # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
-       # NOTE: Only use the pool name and dataset. The rest is not
-       #       supported by ZoL (whatever it's for).
-       if [ -z "$ZFS_RPOOL" ]
-       then
-               # The ${zfs-bootfs} variable is set at the kernel command
-               # line, usually by GRUB, but it cannot be referenced here
-               # directly because bourne variable names cannot contain a
-               # hyphen.
-               #
-               # Reassign the variable by dumping the environment and
-               # stripping the zfs-bootfs= prefix.  Let the shell handle
-               # quoting through the eval command.
-               eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
-       fi
-
-       # ------------
-       # No root fs or pool specified - do auto detect.
-       if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
-       then
-               # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
-               # which will be caught later
-               ROOT=zfs:AUTO
-       fi
-
-       # ----------------------------------------------------------------
-       # F I N D   A N D   I M P O R T   C O R R E C T   P O O L
-
-       # ------------
-       if [ "$ROOT" = "zfs:AUTO" ]
-       then
-               # Try to detect both pool and root fs.
-
-               [ "$quiet" != "y" ] && \
-                   zfs_log_begin_msg "Attempting to import additional pools."
-
-               # Get a list of pools available for import
-               if [ -n "$ZFS_RPOOL" ]
-               then
-                       # We've specified a pool - check only that
-                       POOLS=$ZFS_RPOOL
-               else
-                       POOLS=$(get_pools)
-               fi
-
-               OLD_IFS="$IFS" ; IFS=";"
-               for pool in $POOLS
-               do
-                       [ -z "$pool" ] && continue
-
-                       import_pool "$pool"
-                       find_rootfs "$pool"
-               done
-               IFS="$OLD_IFS"
-
-               [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
-       else
-               # No auto - use value from the command line option.
-
-               # Strip 'zfs:' and 'ZFS='.
-               ZFS_BOOTFS="${ROOT#*[:=]}"
-
-               # Strip everything after the first slash.
-               ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
-       fi
-
-       # Import the pool (if not already done so in the AUTO check above).
-       if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
-       then
-               [ "$quiet" != "y" ] && \
-                   zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
-
-               import_pool "${ZFS_RPOOL}"
-               find_rootfs "${ZFS_RPOOL}"
-
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-
-       if [ -z "${POOL_IMPORTED}" ]
-       then
-               # No pool imported, this is serious!
-               disable_plymouth
-               echo ""
-               echo "Command: $ZFS_CMD"
-               echo "Message: $ZFS_STDERR"
-               echo "Error: $ZFS_ERROR"
-               echo ""
-               echo "No pool imported. Manually import the root pool"
-               echo "at the command prompt and then exit."
-               echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
-               shell
-       fi
-
-       # In case the pool was specified as guid, resolve guid to name
-       pool="$("${ZPOOL}" get name,guid -o name,value -H | \
-           awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
-       if [ -n "$pool" ]; then
-               # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
-               ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
-                       sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
-               ZFS_RPOOL="${pool}"
-       fi
-
-       # Set the no-op scheduler on the disks containing the vdevs of
-       # the root pool. For single-queue devices, this scheduler is
-       # "noop", for multi-queue devices, it is "none".
-       # ZFS already does this for wholedisk vdevs (for all pools), so this
-       # is only important for partitions.
-       "${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
-           awk '/^\t / && !/(mirror|raidz)/ {
-               dev=$1;
-               sub(/[0-9]+$/, "", dev);
-               print dev
-           }' |
-       while read -r i
-       do
-               SCHEDULER=/sys/block/$i/queue/scheduler
-               if [ -e "${SCHEDULER}" ]
-               then
-                       # Query to see what schedulers are available
-                       case "$(cat "${SCHEDULER}")" in
-                               *noop*) echo noop > "${SCHEDULER}" ;;
-                               *none*) echo none > "${SCHEDULER}" ;;
-                       esac
-               fi
-       done
-
-
-       # ----------------------------------------------------------------
-       # P R E P A R E   R O O T   F I L E S Y S T E M
-
-       if [ -n "${ZFS_BOOTFS}" ]
-       then
-               # Booting from a snapshot?
-               # Will overwrite the ZFS_BOOTFS variable like so:
-               #   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
-               echo "${ZFS_BOOTFS}" | grep -q '@' && \
-                   setup_snapshot_booting "${ZFS_BOOTFS}"
-       fi
-
-       if [ -z "${ZFS_BOOTFS}" ]
-       then
-               # Still nothing! Let the user sort this out.
-               disable_plymouth
-               echo ""
-               echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
-               echo "       not specified on the kernel command line."
-               echo ""
-               echo "Manually mount the root filesystem on $rootmnt and then exit."
-               echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
-               shell
-       fi
-
-       # ----------------------------------------------------------------
-       # M O U N T   F I L E S Y S T E M S
-
-       # * Ideally, the root filesystem would be mounted like this:
-       #
-       #     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
-       #     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
-       #
-       #   but the MOUNTPOINT prefix is preserved on descendent filesystem
-       #   after the pivot into the regular root, which later breaks things
-       #   like `zfs mount -a` and the /proc/self/mounts refresh.
-       #
-       # * Mount additional filesystems required
-       #   Such as /usr, /var, /usr/local etc.
-       #   NOTE: Mounted in the order specified in the
-       #         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
-
-       # Go through the complete list (recursively) of all filesystems below
-       # the real root dataset
-       filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
-       for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
-       do
-               mount_fs "$fs"
-       done
-
-       # ------------
-       # Debugging information
-       if [ -n "${ZFS_DEBUG}" ]
-       then
-               #exec 2>&1-
-
-               echo "DEBUG: imported pools:"
-               "${ZPOOL}" list -H
-               echo
-
-               echo "DEBUG: mounted ZFS filesystems:"
-               mount | grep zfs
-               echo
-
-               echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
-               echo -n "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
-               read b
-
-               [ "$b" = "c" ] && /bin/sh
-               [ "$b" = "r" ] && reboot -f
-
-               set +x
-       fi
-
-       # ------------
-       # Run local bottom script
-       if type run_scripts > /dev/null 2>&1 && \
-           [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
-       then
-               [ "$quiet" != "y" ] && \
-                   zfs_log_begin_msg "Running /scripts/local-bottom"
-               run_scripts /scripts/local-bottom
-               [ "$quiet" != "y" ] && zfs_log_end_msg
-       fi
-}