]> git.proxmox.com Git - mirror_zfs.git/blob - contrib/initramfs/scripts/zfs
c724f0c2cf57a12f9d1b15ab9e97b5969c0e6ebb
[mirror_zfs.git] / contrib / initramfs / scripts / zfs
1 # ZFS boot stub for initramfs-tools.
2 #
3 # In the initramfs environment, the /init script sources this stub to
4 # override the default functions in the /scripts/local script.
5 #
6 # Enable this by passing boot=zfs on the kernel command line.
7 #
8 # $quiet, $root, $rpool, $bootfs come from the cmdline:
9 # shellcheck disable=SC2154
10
11 # Source the common functions
12 . /etc/zfs/zfs-functions
13
14 # Start interactive shell.
15 # Use debian's panic() if defined, because it allows to prevent shell access
16 # by setting panic in cmdline (e.g. panic=0 or panic=15).
17 # See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
18 # https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
19 shell() {
20 if command -v panic > /dev/null 2>&1; then
21 panic
22 else
23 /bin/sh
24 fi
25 }
26
27 # This runs any scripts that should run before we start importing
28 # pools and mounting any filesystems.
29 pre_mountroot()
30 {
31 if command -v run_scripts > /dev/null 2>&1
32 then
33 if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
34 then
35 [ "$quiet" != "y" ] && \
36 zfs_log_begin_msg "Running /scripts/local-top"
37 run_scripts /scripts/local-top
38 [ "$quiet" != "y" ] && zfs_log_end_msg
39 fi
40
41 if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
42 then
43 [ "$quiet" != "y" ] && \
44 zfs_log_begin_msg "Running /scripts/local-premount"
45 run_scripts /scripts/local-premount
46 [ "$quiet" != "y" ] && zfs_log_end_msg
47 fi
48 fi
49 }
50
51 # If plymouth is available, hide the splash image.
52 disable_plymouth()
53 {
54 if [ -x /bin/plymouth ] && /bin/plymouth --ping
55 then
56 /bin/plymouth hide-splash >/dev/null 2>&1
57 fi
58 }
59
60 # Get a ZFS filesystem property value.
61 get_fs_value()
62 {
63 fs="$1"
64 value=$2
65
66 "${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
67 }
68
69 # Find the 'bootfs' property on pool $1.
70 # If the property does not contain '/', then ignore this
71 # pool by exporting it again.
72 find_rootfs()
73 {
74 pool="$1"
75
76 # If 'POOL_IMPORTED' isn't set, no pool imported and therefore
77 # we won't be able to find a root fs.
78 [ -z "${POOL_IMPORTED}" ] && return 1
79
80 # If it's already specified, just keep it mounted and exit
81 # User (kernel command line) must be correct.
82 if [ -n "${ZFS_BOOTFS}" ] && [ "${ZFS_BOOTFS}" != "zfs:AUTO" ]; then
83 return 0
84 fi
85
86 # Not set, try to find it in the 'bootfs' property of the pool.
87 # NOTE: zpool does not support 'get -H -ovalue bootfs'...
88 ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
89
90 # Make sure it's not '-' and that it starts with /.
91 if [ "${ZFS_BOOTFS}" != "-" ] && \
92 get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
93 then
94 # Keep it mounted
95 POOL_IMPORTED=1
96 return 0
97 fi
98
99 # Not boot fs here, export it and later try again..
100 "${ZPOOL}" export "$pool"
101 POOL_IMPORTED=
102 ZFS_BOOTFS=
103 return 1
104 }
105
106 # Support function to get a list of all pools, separated with ';'
107 find_pools()
108 {
109 pools=$("$@" 2> /dev/null | \
110 sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
111 tr '\n' ';')
112
113 echo "${pools%%;}" # Return without the last ';'.
114 }
115
116 # Get a list of all available pools
117 get_pools()
118 {
119 if [ -n "${ZFS_POOL_IMPORT}" ]; then
120 echo "$ZFS_POOL_IMPORT"
121 return 0
122 fi
123
124 # Get the base list of available pools.
125 available_pools=$(find_pools "$ZPOOL" import)
126
127 # Just in case - seen it happen (that a pool isn't visible/found
128 # with a simple "zpool import" but only when using the "-d"
129 # option or setting ZPOOL_IMPORT_PATH).
130 if [ -d "/dev/disk/by-id" ]
131 then
132 npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
133 if [ -n "$npools" ]
134 then
135 # Because we have found extra pool(s) here, which wasn't
136 # found 'normally', we need to force USE_DISK_BY_ID to
137 # make sure we're able to actually import it/them later.
138 USE_DISK_BY_ID='yes'
139
140 if [ -n "$available_pools" ]
141 then
142 # Filter out duplicates (pools found with the simple
143 # "zpool import" but which is also found with the
144 # "zpool import -d ...").
145 npools=$(echo "$npools" | sed "s,$available_pools,,")
146
147 # Add the list to the existing list of
148 # available pools
149 available_pools="$available_pools;$npools"
150 else
151 available_pools="$npools"
152 fi
153 fi
154 fi
155
156 # Filter out any exceptions...
157 if [ -n "$ZFS_POOL_EXCEPTIONS" ]
158 then
159 found=""
160 apools=""
161 OLD_IFS="$IFS" ; IFS=";"
162
163 for pool in $available_pools
164 do
165 for exception in $ZFS_POOL_EXCEPTIONS
166 do
167 [ "$pool" = "$exception" ] && continue 2
168 found="$pool"
169 done
170
171 if [ -n "$found" ]
172 then
173 if [ -n "$apools" ]
174 then
175 apools="$apools;$pool"
176 else
177 apools="$pool"
178 fi
179 fi
180 done
181
182 IFS="$OLD_IFS"
183 available_pools="$apools"
184 fi
185
186 # Return list of available pools.
187 echo "$available_pools"
188 }
189
190 # Import given pool $1
191 import_pool()
192 {
193 pool="$1"
194
195 # Verify that the pool isn't already imported
196 # Make as sure as we can to not require '-f' to import.
197 "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
198
199 # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
200 # to something we can use later with the real import(s). We want to
201 # make sure we find all by* dirs, BUT by-vdev should be first (if it
202 # exists).
203 if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
204 then
205 dirs="$(for dir in /dev/disk/by-*
206 do
207 # Ignore by-vdev here - we want it first!
208 echo "$dir" | grep -q /by-vdev && continue
209 [ ! -d "$dir" ] && continue
210
211 printf "%s" "$dir:"
212 done | sed 's,:$,,g')"
213
214 if [ -d "/dev/disk/by-vdev" ]
215 then
216 # Add by-vdev at the beginning.
217 ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
218 fi
219
220 # ... and /dev at the very end, just for good measure.
221 ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
222 fi
223
224 # Needs to be exported for "zpool" to catch it.
225 [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
226
227
228 [ "$quiet" != "y" ] && zfs_log_begin_msg \
229 "Importing pool '${pool}' using defaults"
230
231 ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
232 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
233 ZFS_ERROR="$?"
234 if [ "${ZFS_ERROR}" != 0 ]
235 then
236 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
237
238 if [ -f "${ZPOOL_CACHE}" ]
239 then
240 [ "$quiet" != "y" ] && zfs_log_begin_msg \
241 "Importing pool '${pool}' using cachefile."
242
243 ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
244 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
245 ZFS_ERROR="$?"
246 fi
247
248 if [ "${ZFS_ERROR}" != 0 ]
249 then
250 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
251
252 disable_plymouth
253 echo ""
254 echo "Command: ${ZFS_CMD} '$pool'"
255 echo "Message: $ZFS_STDERR"
256 echo "Error: $ZFS_ERROR"
257 echo ""
258 echo "Failed to import pool '$pool'."
259 echo "Manually import the pool and exit."
260 shell
261 fi
262 fi
263
264 [ "$quiet" != "y" ] && zfs_log_end_msg
265
266 POOL_IMPORTED=1
267 return 0
268 }
269
270 # Load ZFS modules
271 # Loading a module in a initrd require a slightly different approach,
272 # with more logging etc.
273 load_module_initrd()
274 {
275 [ -n "$ROOTDELAY" ] && ZFS_INITRD_PRE_MOUNTROOT_SLEEP="$ROOTDELAY"
276
277 if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null
278 then
279 if [ "$quiet" != "y" ]; then
280 zfs_log_begin_msg "Sleeping for" \
281 "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
282 fi
283 sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
284 [ "$quiet" != "y" ] && zfs_log_end_msg
285 fi
286
287 # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
288 if command -v wait_for_udev > /dev/null 2>&1 ; then
289 wait_for_udev 10
290 elif command -v wait_for_dev > /dev/null 2>&1 ; then
291 wait_for_dev
292 fi
293
294 # zpool import refuse to import without a valid /proc/self/mounts
295 [ ! -f /proc/self/mounts ] && mount proc /proc
296
297 # Load the module
298 load_module "zfs" || return 1
299
300 if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
301 then
302 if [ "$quiet" != "y" ]; then
303 zfs_log_begin_msg "Sleeping for" \
304 "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
305 fi
306 sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
307 [ "$quiet" != "y" ] && zfs_log_end_msg
308 fi
309
310 return 0
311 }
312
313 # Mount a given filesystem
314 mount_fs()
315 {
316 fs="$1"
317
318 # Check that the filesystem exists
319 "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 || return 1
320
321 # Skip filesystems with canmount=off. The root fs should not have
322 # canmount=off, but ignore it for backwards compatibility just in case.
323 if [ "$fs" != "${ZFS_BOOTFS}" ]
324 then
325 canmount=$(get_fs_value "$fs" canmount)
326 [ "$canmount" = "off" ] && return 0
327 fi
328
329 # Need the _original_ datasets mountpoint!
330 mountpoint=$(get_fs_value "$fs" mountpoint)
331 ZFS_CMD="mount.zfs -o zfsutil"
332 if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
333 # Can't use the mountpoint property. Might be one of our
334 # clones. Check the 'org.zol:mountpoint' property set in
335 # clone_snap() if that's usable.
336 mountpoint1=$(get_fs_value "$fs" org.zol:mountpoint)
337 if [ "$mountpoint1" = "legacy" ] ||
338 [ "$mountpoint1" = "none" ] ||
339 [ "$mountpoint1" = "-" ]
340 then
341 if [ "$fs" != "${ZFS_BOOTFS}" ]; then
342 # We don't have a proper mountpoint and this
343 # isn't the root fs.
344 return 0
345 fi
346 ZFS_CMD="mount.zfs"
347 # Last hail-mary: Hope 'rootmnt' is set!
348 mountpoint=""
349 else
350 mountpoint="$mountpoint1"
351 fi
352 fi
353
354 # Possibly decrypt a filesystem using native encryption.
355 decrypt_fs "$fs"
356
357 [ "$quiet" != "y" ] && \
358 zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
359 [ -n "${ZFS_DEBUG}" ] && \
360 zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
361
362 ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
363 ZFS_ERROR=$?
364 if [ "${ZFS_ERROR}" != 0 ]
365 then
366 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
367
368 disable_plymouth
369 echo ""
370 echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
371 echo "Message: $ZFS_STDERR"
372 echo "Error: $ZFS_ERROR"
373 echo ""
374 echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
375 echo "Manually mount the filesystem and exit."
376 shell
377 else
378 [ "$quiet" != "y" ] && zfs_log_end_msg
379 fi
380
381 return 0
382 }
383
384 # Unlock a ZFS native encrypted filesystem.
385 decrypt_fs()
386 {
387 fs="$1"
388
389 # If pool encryption is active and the zfs command understands '-o encryption'
390 if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then
391
392 # Determine dataset that holds key for root dataset
393 ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
394 KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
395
396 echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
397
398 # If root dataset is encrypted...
399 if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
400 KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
401 # Continue only if the key needs to be loaded
402 [ "$KEYSTATUS" = "unavailable" ] || return 0
403
404 # Do not prompt if key is stored noninteractively,
405 if ! [ "${KEYLOCATION}" = "prompt" ]; then
406 $ZFS load-key "${ENCRYPTIONROOT}"
407
408 # Prompt with plymouth, if active
409 elif /bin/plymouth --ping 2>/dev/null; then
410 echo "plymouth" > /run/zfs_console_askpwd_cmd
411 for _ in 1 2 3; do
412 plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
413 $ZFS load-key "${ENCRYPTIONROOT}" && break
414 done
415
416 # Prompt with systemd, if active
417 elif [ -e /run/systemd/system ]; then
418 echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
419 for _ in 1 2 3; do
420 systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
421 $ZFS load-key "${ENCRYPTIONROOT}" && break
422 done
423
424 # Prompt with ZFS tty, otherwise
425 else
426 # Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
427 echo "load-key" > /run/zfs_console_askpwd_cmd
428 read -r storeprintk _ < /proc/sys/kernel/printk
429 echo 7 > /proc/sys/kernel/printk
430 $ZFS load-key "${ENCRYPTIONROOT}"
431 echo "$storeprintk" > /proc/sys/kernel/printk
432 fi
433 fi
434 fi
435
436 return 0
437 }
438
439 # Destroy a given filesystem.
440 destroy_fs()
441 {
442 fs="$1"
443
444 [ "$quiet" != "y" ] && \
445 zfs_log_begin_msg "Destroying '$fs'"
446
447 ZFS_CMD="${ZFS} destroy $fs"
448 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
449 ZFS_ERROR="$?"
450 if [ "${ZFS_ERROR}" != 0 ]
451 then
452 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
453
454 disable_plymouth
455 echo ""
456 echo "Command: $ZFS_CMD"
457 echo "Message: $ZFS_STDERR"
458 echo "Error: $ZFS_ERROR"
459 echo ""
460 echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
461 echo "Hint: Try: zfs destroy -Rfn $fs"
462 echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
463 shell
464 else
465 [ "$quiet" != "y" ] && zfs_log_end_msg
466 fi
467
468 return 0
469 }
470
471 # Clone snapshot $1 to destination filesystem $2
472 # Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
473 # manual control over it's mounting (i.e., make sure it's not automatically
474 # mounted with a 'zfs mount -a' in the init/systemd scripts).
475 clone_snap()
476 {
477 snap="$1"
478 destfs="$2"
479 mountpoint="$3"
480
481 [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
482
483 # Clone the snapshot into a dataset we can boot from
484 # + We don't want this filesystem to be automatically mounted, we
485 # want control over this here and nowhere else.
486 # + We don't need any mountpoint set for the same reason.
487 # We use the 'org.zol:mountpoint' property to remember the mountpoint.
488 ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
489 ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
490 ZFS_CMD="${ZFS_CMD} $snap $destfs"
491 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
492 ZFS_ERROR="$?"
493 if [ "${ZFS_ERROR}" != 0 ]
494 then
495 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
496
497 disable_plymouth
498 echo ""
499 echo "Command: $ZFS_CMD"
500 echo "Message: $ZFS_STDERR"
501 echo "Error: $ZFS_ERROR"
502 echo ""
503 echo "Failed to clone snapshot."
504 echo "Make sure that any problems are corrected and then make sure"
505 echo "that the dataset '$destfs' exists and is bootable."
506 shell
507 else
508 [ "$quiet" != "y" ] && zfs_log_end_msg
509 fi
510
511 return 0
512 }
513
514 # Rollback a given snapshot.
515 rollback_snap()
516 {
517 snap="$1"
518
519 [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
520
521 ZFS_CMD="${ZFS} rollback -Rf $snap"
522 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
523 ZFS_ERROR="$?"
524 if [ "${ZFS_ERROR}" != 0 ]
525 then
526 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
527
528 disable_plymouth
529 echo ""
530 echo "Command: $ZFS_CMD"
531 echo "Message: $ZFS_STDERR"
532 echo "Error: $ZFS_ERROR"
533 echo ""
534 echo "Failed to rollback snapshot."
535 shell
536 else
537 [ "$quiet" != "y" ] && zfs_log_end_msg
538 fi
539
540 return 0
541 }
542
543 # Get a list of snapshots, give them as a numbered list
544 # to the user to choose from.
545 ask_user_snap()
546 {
547 fs="$1"
548
549 # We need to temporarily disable debugging. Set 'debug' so we
550 # remember to enabled it again.
551 if [ -n "${ZFS_DEBUG}" ]; then
552 unset ZFS_DEBUG
553 set +x
554 debug=1
555 fi
556
557 # Because we need the resulting snapshot, which is sent on
558 # stdout to the caller, we use stderr for our questions.
559 echo "What snapshot do you want to boot from?" > /dev/stderr
560 # shellcheck disable=SC2046
561 IFS="
562 " set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
563
564 i=1
565 for snap in "$@"; do
566 echo " $i: $snap"
567 i=$((i + 1))
568 done > /dev/stderr
569
570 # expr instead of test here because [ a -lt 0 ] errors out,
571 # but expr falls back to lexicographical, which works out right
572 snapnr=0
573 while expr "$snapnr" "<" 1 > /dev/null ||
574 expr "$snapnr" ">" "$#" > /dev/null
575 do
576 printf "%s" "Snap nr [1-$#]? " > /dev/stderr
577 read -r snapnr
578 done
579
580 # Re-enable debugging.
581 if [ -n "${debug}" ]; then
582 ZFS_DEBUG=1
583 set -x
584 fi
585
586 eval echo '$'"$snapnr"
587 }
588
589 setup_snapshot_booting()
590 {
591 snap="$1"
592 retval=0
593
594 # Make sure that the snapshot specified actually exists.
595 if [ -z "$(get_fs_value "${snap}" type)" ]
596 then
597 # Snapshot does not exist (...@<null> ?)
598 # ask the user for a snapshot to use.
599 snap="$(ask_user_snap "${snap%%@*}")"
600 fi
601
602 # Separate the full snapshot ('$snap') into it's filesystem and
603 # snapshot names. Would have been nice with a split() function..
604 rootfs="${snap%%@*}"
605 snapname="${snap##*@}"
606 ZFS_BOOTFS="${rootfs}_${snapname}"
607
608 if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
609 then
610 # If the destination dataset for the clone
611 # already exists, destroy it. Recursively
612 if [ -n "$(get_fs_value "${rootfs}_${snapname}" type)" ]
613 then
614 filesystems=$("${ZFS}" list -oname -tfilesystem -H \
615 -r -Sname "${ZFS_BOOTFS}")
616 for fs in $filesystems; do
617 destroy_fs "${fs}"
618 done
619 fi
620 fi
621
622 # Get all snapshots, recursively (might need to clone /usr, /var etc
623 # as well).
624 for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
625 grep "${snapname}")
626 do
627 if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
628 then
629 # Rollback snapshot
630 rollback_snap "$s" || retval=$((retval + 1))
631 ZFS_BOOTFS="${rootfs}"
632 else
633 # Setup a destination filesystem name.
634 # Ex: Called with 'rpool/ROOT/debian@snap2'
635 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
636 # rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
637 # rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
638 # rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
639 subfs="${s##"$rootfs"}"
640 subfs="${subfs%%@"$snapname"}"
641
642 destfs="${rootfs}_${snapname}" # base fs.
643 [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
644
645 # Get the mountpoint of the filesystem, to be used
646 # with clone_snap(). If legacy or none, then use
647 # the sub fs value.
648 mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
649 if [ "$mountpoint" = "legacy" ] || \
650 [ "$mountpoint" = "none" ]
651 then
652 if [ -n "${subfs}" ]; then
653 mountpoint="${subfs}"
654 else
655 mountpoint="/"
656 fi
657 fi
658
659 # Clone the snapshot into its own
660 # filesystem
661 clone_snap "$s" "${destfs}" "${mountpoint}" || \
662 retval=$((retval + 1))
663 fi
664 done
665
666 # If we haven't return yet, we have a problem...
667 return "${retval}"
668 }
669
670 # ================================================================
671
672 # This is the main function.
673 mountroot()
674 {
675 # ----------------------------------------------------------------
676 # I N I T I A L S E T U P
677
678 # ------------
679 # Run the pre-mount scripts from /scripts/local-top.
680 pre_mountroot
681
682 # ------------
683 # Source the default setup variables.
684 [ -r '/etc/default/zfs' ] && . /etc/default/zfs
685
686 # ------------
687 # Support debug option
688 if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
689 then
690 ZFS_DEBUG=1
691 mkdir /var/log
692 #exec 2> /var/log/boot.debug
693 set -x
694 fi
695
696 # ------------
697 # Load ZFS module etc.
698 if ! load_module_initrd; then
699 disable_plymouth
700 echo ""
701 echo "Failed to load ZFS modules."
702 echo "Manually load the modules and exit."
703 shell
704 fi
705
706 # ------------
707 # Look for the cache file (if any).
708 [ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
709 [ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
710
711 # ------------
712 # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
713 # 'root' is for Redhat/Fedora (etc),
714 # 'REAL_ROOT' is for Gentoo
715 if [ -z "$ROOT" ]
716 then
717 [ -n "$root" ] && ROOT=${root}
718
719 [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
720 fi
721
722 # ------------
723 # Where to mount the root fs in the initrd - set outside this script
724 # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
725 # 'NEWROOT' is for RedHat/Fedora (etc),
726 # 'NEW_ROOT' is for Gentoo
727 if [ -z "$rootmnt" ]
728 then
729 [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
730
731 [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
732 fi
733
734 # ------------
735 # No longer set in the defaults file, but it could have been set in
736 # get_pools() in some circumstances. If it's something, but not 'yes',
737 # it's no good to us.
738 [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
739 unset USE_DISK_BY_ID
740
741 # ----------------------------------------------------------------
742 # P A R S E C O M M A N D L I N E O P T I O N S
743
744 # This part is the really ugly part - there's so many options and permutations
745 # 'out there', and if we should make this the 'primary' source for ZFS initrd
746 # scripting, we need/should support them all.
747 #
748 # Supports the following kernel command line argument combinations
749 # (in this order - first match win):
750 #
751 # rpool=<pool> (tries to finds bootfs automatically)
752 # bootfs=<pool>/<dataset> (uses this for rpool - first part)
753 # rpool=<pool> bootfs=<pool>/<dataset>
754 # -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part)
755 # rpool=rpool (default if none of the above is used)
756 # root=<pool>/<dataset> (uses this for rpool - first part)
757 # root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=')
758 # root=zfs:AUTO (tries to detect both pool and rootfs)
759 # root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
760 #
761 # Option <dataset> could also be <snapshot>
762 # Option <pool> could also be <guid>
763
764 # ------------
765 # Support force option
766 # In addition, setting one of zfs_force, zfs.force or zfsforce to
767 # 'yes', 'on' or '1' will make sure we force import the pool.
768 # This should (almost) never be needed, but it's here for
769 # completeness.
770 ZPOOL_FORCE=""
771 if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
772 then
773 ZPOOL_FORCE="-f"
774 fi
775
776 # ------------
777 # Look for 'rpool' and 'bootfs' parameter
778 [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
779 [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
780
781 # ------------
782 # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
783 # 'ROOT'
784 [ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
785
786 # ------------
787 # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
788 # NOTE: Only use the pool name and dataset. The rest is not
789 # supported by OpenZFS (whatever it's for).
790 if [ -z "$ZFS_RPOOL" ]
791 then
792 # The ${zfs-bootfs} variable is set at the kernel command
793 # line, usually by GRUB, but it cannot be referenced here
794 # directly because bourne variable names cannot contain a
795 # hyphen.
796 #
797 # Reassign the variable by dumping the environment and
798 # stripping the zfs-bootfs= prefix. Let the shell handle
799 # quoting through the eval command:
800 # shellcheck disable=SC2046
801 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
802 fi
803
804 # ------------
805 # No root fs or pool specified - do auto detect.
806 if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
807 then
808 # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
809 # which will be caught later
810 ROOT='zfs:AUTO'
811 fi
812
813 # ----------------------------------------------------------------
814 # F I N D A N D I M P O R T C O R R E C T P O O L
815
816 # ------------
817 if [ "$ROOT" = "zfs:AUTO" ]
818 then
819 # Try to detect both pool and root fs.
820
821 # If we got here, that means we don't have a hint so as to
822 # the root dataset, but with root=zfs:AUTO on cmdline,
823 # this says "zfs:AUTO" here and interferes with checks later
824 ZFS_BOOTFS=
825
826 [ "$quiet" != "y" ] && \
827 zfs_log_begin_msg "Attempting to import additional pools."
828
829 # Get a list of pools available for import
830 if [ -n "$ZFS_RPOOL" ]
831 then
832 # We've specified a pool - check only that
833 POOLS=$ZFS_RPOOL
834 else
835 POOLS=$(get_pools)
836 fi
837
838 OLD_IFS="$IFS" ; IFS=";"
839 for pool in $POOLS
840 do
841 [ -z "$pool" ] && continue
842
843 IFS="$OLD_IFS" import_pool "$pool"
844 IFS="$OLD_IFS" find_rootfs "$pool" && break
845 done
846 IFS="$OLD_IFS"
847
848 [ "$quiet" != "y" ] && zfs_log_end_msg "$ZFS_ERROR"
849 else
850 # No auto - use value from the command line option.
851
852 # Strip 'zfs:' and 'ZFS='.
853 ZFS_BOOTFS="${ROOT#*[:=]}"
854
855 # Strip everything after the first slash.
856 ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
857 fi
858
859 # Import the pool (if not already done so in the AUTO check above).
860 if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
861 then
862 [ "$quiet" != "y" ] && \
863 zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
864
865 import_pool "${ZFS_RPOOL}"
866 find_rootfs "${ZFS_RPOOL}"
867
868 [ "$quiet" != "y" ] && zfs_log_end_msg
869 fi
870
871 if [ -z "${POOL_IMPORTED}" ]
872 then
873 # No pool imported, this is serious!
874 disable_plymouth
875 echo ""
876 echo "Command: $ZFS_CMD"
877 echo "Message: $ZFS_STDERR"
878 echo "Error: $ZFS_ERROR"
879 echo ""
880 echo "No pool imported. Manually import the root pool"
881 echo "at the command prompt and then exit."
882 echo "Hint: Try: zpool import -N ${ZFS_RPOOL}"
883 shell
884 fi
885
886 # In case the pool was specified as guid, resolve guid to name
887 pool="$("${ZPOOL}" get name,guid -o name,value -H | \
888 awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
889 if [ -n "$pool" ]; then
890 # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
891 ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
892 sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
893 ZFS_RPOOL="${pool}"
894 fi
895
896
897 # ----------------------------------------------------------------
898 # P R E P A R E R O O T F I L E S Y S T E M
899
900 if [ -n "${ZFS_BOOTFS}" ]
901 then
902 # Booting from a snapshot?
903 # Will overwrite the ZFS_BOOTFS variable like so:
904 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
905 echo "${ZFS_BOOTFS}" | grep -q '@' && \
906 setup_snapshot_booting "${ZFS_BOOTFS}"
907 fi
908
909 if [ -z "${ZFS_BOOTFS}" ]
910 then
911 # Still nothing! Let the user sort this out.
912 disable_plymouth
913 echo ""
914 echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
915 echo " not specified on the kernel command line."
916 echo ""
917 echo "Manually mount the root filesystem on $rootmnt and then exit."
918 echo "Hint: Try: mount.zfs -o zfsutil ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
919 shell
920 fi
921
922 # ----------------------------------------------------------------
923 # M O U N T F I L E S Y S T E M S
924
925 # * Ideally, the root filesystem would be mounted like this:
926 #
927 # zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
928 # zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
929 #
930 # but the MOUNTPOINT prefix is preserved on descendent filesystem
931 # after the pivot into the regular root, which later breaks things
932 # like `zfs mount -a` and the /proc/self/mounts refresh.
933 #
934 # * Mount additional filesystems required
935 # Such as /usr, /var, /usr/local etc.
936 # NOTE: Mounted in the order specified in the
937 # ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
938
939 # Go through the complete list (recursively) of all filesystems below
940 # the real root dataset
941 filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
942 OLD_IFS="$IFS" ; IFS="
943 "
944 for fs in $filesystems; do
945 IFS="$OLD_IFS" mount_fs "$fs"
946 done
947 IFS="$OLD_IFS"
948 for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
949 mount_fs "$fs"
950 done
951
952 touch /run/zfs_unlock_complete
953 if [ -e /run/zfs_unlock_complete_notify ]; then
954 read -r < /run/zfs_unlock_complete_notify
955 fi
956
957 # ------------
958 # Debugging information
959 if [ -n "${ZFS_DEBUG}" ]
960 then
961 #exec 2>&1-
962
963 echo "DEBUG: imported pools:"
964 "${ZPOOL}" list -H
965 echo
966
967 echo "DEBUG: mounted ZFS filesystems:"
968 mount | grep zfs
969 echo
970
971 echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
972 printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
973 read -r b
974
975 [ "$b" = "c" ] && /bin/sh
976 [ "$b" = "r" ] && reboot -f
977
978 set +x
979 fi
980
981 # ------------
982 # Run local bottom script
983 if command -v run_scripts > /dev/null 2>&1
984 then
985 if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
986 then
987 [ "$quiet" != "y" ] && \
988 zfs_log_begin_msg "Running /scripts/local-bottom"
989 run_scripts /scripts/local-bottom
990 [ "$quiet" != "y" ] && zfs_log_end_msg
991 fi
992 fi
993 }