]> git.proxmox.com Git - mirror_zfs-debian.git/blob - contrib/initramfs/scripts/zfs
New upstream version 0.7.4
[mirror_zfs-debian.git] / contrib / initramfs / scripts / zfs
1 # ZFS boot stub for initramfs-tools.
2 #
3 # In the initramfs environment, the /init script sources this stub to
4 # override the default functions in the /scripts/local script.
5 #
6 # Enable this by passing boot=zfs on the kernel command line.
7 #
8
9 # Source the common init script
10 . /etc/zfs/zfs-functions
11
12 # Paths to what we need - in the initrd, these paths are hardcoded,
13 # so override the defines in zfs-functions.
14 ZFS="/sbin/zfs"
15 ZPOOL="/sbin/zpool"
16 ZPOOL_CACHE="/etc/zfs/zpool.cache"
17 export ZFS ZPOOL ZPOOL_CACHE
18
19 # This runs any scripts that should run before we start importing
20 # pools and mounting any filesystems.
21 pre_mountroot()
22 {
23 if type run_scripts > /dev/null 2>&1 && \
24 [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
25 then
26 [ "$quiet" != "y" ] && \
27 zfs_log_begin_msg "Running /scripts/local-top"
28 run_scripts /scripts/local-top
29 [ "$quiet" != "y" ] && zfs_log_end_msg
30 fi
31
32 if type run_scripts > /dev/null 2>&1 && \
33 [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
34 then
35 [ "$quiet" != "y" ] && \
36 zfs_log_begin_msg "Running /scripts/local-premount"
37 run_scripts /scripts/local-premount
38 [ "$quiet" != "y" ] && zfs_log_end_msg
39 fi
40 }
41
42 # If plymouth is availible, hide the splash image.
43 disable_plymouth()
44 {
45 if [ -x /bin/plymouth ] && /bin/plymouth --ping
46 then
47 /bin/plymouth hide-splash >/dev/null 2>&1
48 fi
49 }
50
51 # Get a ZFS filesystem property value.
52 get_fs_value()
53 {
54 local fs="$1"
55 local value=$2
56
57 "${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
58 }
59
60 # Find the 'bootfs' property on pool $1.
61 # If the property does not contain '/', then ignore this
62 # pool by exporting it again.
63 find_rootfs()
64 {
65 local pool="$1"
66
67 # If 'POOL_IMPORTED' isn't set, no pool imported and therefor
68 # we won't be able to find a root fs.
69 [ -z "${POOL_IMPORTED}" ] && return 1
70
71 # If it's already specified, just keep it mounted and exit
72 # User (kernel command line) must be correct.
73 [ -n "${ZFS_BOOTFS}" ] && return 0
74
75 # Not set, try to find it in the 'bootfs' property of the pool.
76 # NOTE: zpool does not support 'get -H -ovalue bootfs'...
77 ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
78
79 # Make sure it's not '-' and that it starts with /.
80 if [ "${ZFS_BOOTFS}" != "-" ] && \
81 $(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
82 then
83 # Keep it mounted
84 POOL_IMPORTED=1
85 return 0
86 fi
87
88 # Not boot fs here, export it and later try again..
89 "${ZPOOL}" export "$pool"
90 POOL_IMPORTED=""
91
92 return 1
93 }
94
95 # Support function to get a list of all pools, separated with ';'
96 find_pools()
97 {
98 local CMD="$*"
99 local pools pool
100
101 pools=$($CMD 2> /dev/null | \
102 grep -E "pool:|^[a-zA-Z0-9]" | \
103 sed 's@.*: @@' | \
104 while read pool; do \
105 echo -n "$pool;"
106 done)
107
108 echo "${pools%%;}" # Return without the last ';'.
109 }
110
111 # Get a list of all availible pools
112 get_pools()
113 {
114 local available_pools npools
115
116 if [ -n "${ZFS_POOL_IMPORT}" ]; then
117 echo "$ZFS_POOL_IMPORT"
118 return 0
119 fi
120
121 # Get the base list of availible pools.
122 available_pools=$(find_pools "$ZPOOL" import)
123
124 # Just in case - seen it happen (that a pool isn't visable/found
125 # with a simple "zpool import" but only when using the "-d"
126 # option or setting ZPOOL_IMPORT_PATH).
127 if [ -d "/dev/disk/by-id" ]
128 then
129 npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
130 if [ -n "$npools" ]
131 then
132 # Because we have found extra pool(s) here, which wasn't
133 # found 'normally', we need to force USE_DISK_BY_ID to
134 # make sure we're able to actually import it/them later.
135 USE_DISK_BY_ID='yes'
136
137 if [ -n "$available_pools" ]
138 then
139 # Filter out duplicates (pools found with the simple
140 # "zpool import" but which is also found with the
141 # "zpool import -d ...").
142 npools=$(echo "$npools" | sed "s,$available_pools,,")
143
144 # Add the list to the existing list of
145 # available pools
146 available_pools="$available_pools;$npools"
147 else
148 available_pools="$npools"
149 fi
150 fi
151 fi
152
153 # Filter out any exceptions...
154 if [ -n "$ZFS_POOL_EXCEPTIONS" ]
155 then
156 local found=""
157 local apools=""
158 local pool exception
159 OLD_IFS="$IFS" ; IFS=";"
160
161 for pool in $available_pools
162 do
163 for exception in $ZFS_POOL_EXCEPTIONS
164 do
165 [ "$pool" = "$exception" ] && continue 2
166 found="$pool"
167 done
168
169 if [ -n "$found" ]
170 then
171 if [ -n "$apools" ]
172 then
173 apools="$apools;$pool"
174 else
175 apools="$pool"
176 fi
177 fi
178 done
179
180 IFS="$OLD_IFS"
181 available_pools="$apools"
182 fi
183
184 # Return list of availible pools.
185 echo "$available_pools"
186 }
187
188 # Import given pool $1
189 import_pool()
190 {
191 local pool="$1"
192 local dirs dir
193
194 # Verify that the pool isn't already imported
195 # Make as sure as we can to not require '-f' to import.
196 "${ZPOOL}" status "$pool" > /dev/null 2>&1 && return 0
197
198 # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
199 # to something we can use later with the real import(s). We want to
200 # make sure we find all by* dirs, BUT by-vdev should be first (if it
201 # exists).
202 if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
203 then
204 dirs="$(for dir in $(echo /dev/disk/by-*)
205 do
206 # Ignore by-vdev here - we want it first!
207 echo "$dir" | grep -q /by-vdev && continue
208 [ ! -d "$dir" ] && continue
209
210 echo -n "$dir:"
211 done | sed 's,:$,,g')"
212
213 if [ -d "/dev/disk/by-vdev" ]
214 then
215 # Add by-vdev at the beginning.
216 ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
217 fi
218
219 # ... and /dev at the very end, just for good measure.
220 ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
221 fi
222
223 # Needs to be exported for "zpool" to catch it.
224 [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
225
226
227 [ "$quiet" != "y" ] && zfs_log_begin_msg \
228 "Importing pool '${pool}' using defaults"
229
230 ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
231 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
232 ZFS_ERROR="$?"
233 if [ "${ZFS_ERROR}" != 0 ]
234 then
235 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
236
237 if [ -f "${ZPOOL_CACHE}" ]
238 then
239 [ "$quiet" != "y" ] && zfs_log_begin_msg \
240 "Importing pool '${pool}' using cachefile."
241
242 ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
243 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
244 ZFS_ERROR="$?"
245 fi
246
247 if [ "${ZFS_ERROR}" != 0 ]
248 then
249 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
250
251 disable_plymouth
252 echo ""
253 echo "Command: ${ZFS_CMD} '$pool'"
254 echo "Message: $ZFS_STDERR"
255 echo "Error: $ZFS_ERROR"
256 echo ""
257 echo "Failed to import pool '$pool'."
258 echo "Manually import the pool and exit."
259 /bin/sh
260 fi
261 fi
262
263 [ "$quiet" != "y" ] && zfs_log_end_msg
264
265 POOL_IMPORTED=1
266 return 0
267 }
268
269 # Load ZFS modules
270 # Loading a module in a initrd require a slightly different approach,
271 # with more logging etc.
272 load_module_initrd()
273 {
274 if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
275 then
276 if [ "$quiet" != "y" ]; then
277 zfs_log_begin_msg "Sleeping for" \
278 "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
279 fi
280 sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
281 [ "$quiet" != "y" ] && zfs_log_end_msg
282 fi
283
284 # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
285 if type wait_for_udev > /dev/null 2>&1 ; then
286 wait_for_udev 10
287 elif type wait_for_dev > /dev/null 2>&1 ; then
288 wait_for_dev
289 fi
290
291 # zpool import refuse to import without a valid /proc/self/mounts
292 [ ! -f /proc/self/mounts ] && mount proc /proc
293
294 # Load the module
295 load_module "zfs" || return 1
296
297 if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
298 then
299 if [ "$quiet" != "y" ]; then
300 zfs_log_begin_msg "Sleeping for" \
301 "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
302 fi
303 sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
304 [ "$quiet" != "y" ] && zfs_log_end_msg
305 fi
306
307 return 0
308 }
309
310 # Mount a given filesystem
311 mount_fs()
312 {
313 local fs="$1"
314 local mountpoint
315
316 # Check that the filesystem exists
317 "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
318 [ "$?" -ne 0 ] && return 1
319
320 # Skip filesystems with canmount=off. The root fs should not have
321 # canmount=off, but ignore it for backwards compatibility just in case.
322 if [ "$fs" != "${ZFS_BOOTFS}" ]
323 then
324 canmount=$(get_fs_value "$fs" canmount)
325 [ "$canmount" = "off" ] && return 0
326 fi
327
328 # Need the _original_ datasets mountpoint!
329 mountpoint=$(get_fs_value "$fs" mountpoint)
330 if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
331 # Can't use the mountpoint property. Might be one of our
332 # clones. Check the 'org.zol:mountpoint' property set in
333 # clone_snap() if that's usable.
334 mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
335 if [ "$mountpoint" = "legacy" -o \
336 "$mountpoint" = "none" -o \
337 "$mountpoint" = "-" ]
338 then
339 if [ "$fs" != "${ZFS_BOOTFS}" ]; then
340 # We don't have a proper mountpoint and this
341 # isn't the root fs.
342 return 0
343 else
344 # Last hail-mary: Hope 'rootmnt' is set!
345 mountpoint=""
346 fi
347 fi
348
349 if [ "$mountpoint" = "legacy" ]; then
350 ZFS_CMD="mount -t zfs"
351 else
352 # If it's not a legacy filesystem, it can only be a
353 # native one...
354 ZFS_CMD="mount -o zfsutil -t zfs"
355 fi
356 else
357 ZFS_CMD="mount -o zfsutil -t zfs"
358 fi
359
360 # Possibly decrypt a filesystem using native encryption.
361 decrypt_fs "$fs"
362
363 [ "$quiet" != "y" ] && \
364 zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
365 [ -n "${ZFS_DEBUG}" ] && \
366 zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
367
368 ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
369 ZFS_ERROR=$?
370 if [ "${ZFS_ERROR}" != 0 ]
371 then
372 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
373
374 disable_plymouth
375 echo ""
376 echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
377 echo "Message: $ZFS_STDERR"
378 echo "Error: $ZFS_ERROR"
379 echo ""
380 echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
381 echo "Manually mount the filesystem and exit."
382 /bin/sh
383 else
384 [ "$quiet" != "y" ] && zfs_log_end_msg
385 fi
386
387 return 0
388 }
389
390 # Unlock a ZFS native crypted filesystem.
391 decrypt_fs()
392 {
393 local fs="$1"
394
395 # If the 'zfs key' command isn't availible, exit right here.
396 "${ZFS}" 2>&1 | grep -q 'key -l ' || return 0
397
398 # Check if filesystem is encrypted. If not, exit right here.
399 [ "$(get_fs_value "$fs" encryption)" != "off" ] || return 0
400
401 [ "$quiet" != "y" ] && \
402 zfs_log_begin_msg "Loading crypto wrapper key for $fs"
403
404 # Just make sure that ALL crypto modules module is loaded.
405 # Simplest just to load all...
406 for mod in sun-ccm sun-gcm sun-ctr
407 do
408 [ "$quiet" != "y" ] && zfs_log_progress_msg "${mod} "
409
410 ZFS_CMD="load_module $mod"
411 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
412 ZFS_ERROR="$?"
413
414 if [ "${ZFS_ERROR}" != 0 ]
415 then
416 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
417
418 disable_plymouth
419 echo ""
420 echo "Command: $ZFS_CMD"
421 echo "Message: $ZFS_STDERR"
422 echo "Error: $ZFS_ERROR"
423 echo ""
424 echo "Failed to load $mod module."
425 echo "Please verify that it is availible on the initrd image"
426 echo "(without it it won't be possible to unlock the filesystem)"
427 echo "and rerun: $ZFS_CMD"
428 /bin/sh
429 else
430 [ "$quiet" != "y" ] && zfs_log_end_msg
431 fi
432 done
433
434 # If the key isn't availible, then this will fail!
435 ZFS_CMD="${ZFS} key -l -r $fs"
436 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
437 ZFS_ERROR="$?"
438
439 if [ "${ZFS_ERROR}" != 0 ]
440 then
441 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
442
443 disable_plymouth
444 echo ""
445 echo "Command: $ZFS_CMD"
446 echo "Message: $ZFS_STDERR"
447 echo "Error: $ZFS_ERROR"
448 echo ""
449 echo "Failed to load zfs encryption wrapper key (s)."
450 echo "Please verify dataset property 'keysource' for datasets"
451 echo "and rerun: $ZFS_CMD"
452 /bin/sh
453 else
454 [ "$quiet" != "y" ] && zfs_log_end_msg
455 fi
456
457 return 0
458 }
459
460 # Destroy a given filesystem.
461 destroy_fs()
462 {
463 local fs="$1"
464
465 [ "$quiet" != "y" ] && \
466 zfs_log_begin_msg "Destroying '$fs'"
467
468 ZFS_CMD="${ZFS} destroy $fs"
469 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
470 ZFS_ERROR="$?"
471 if [ "${ZFS_ERROR}" != 0 ]
472 then
473 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
474
475 disable_plymouth
476 echo ""
477 echo "Command: $ZFS_CMD"
478 echo "Message: $ZFS_STDERR"
479 echo "Error: $ZFS_ERROR"
480 echo ""
481 echo "Failed to destroy '$fs'. Please make sure that '$fs' is not availible."
482 echo "Hint: Try: zfs destroy -Rfn $fs"
483 echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
484 /bin/sh
485 else
486 [ "$quiet" != "y" ] && zfs_log_end_msg
487 fi
488
489 return 0
490 }
491
492 # Clone snapshot $1 to destination filesystem $2
493 # Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
494 # manual control over it's mounting (i.e., make sure it's not automatically
495 # mounted with a 'zfs mount -a' in the init/systemd scripts).
496 clone_snap()
497 {
498 local snap="$1"
499 local destfs="$2"
500 local mountpoint="$3"
501
502 [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
503
504 # Clone the snapshot into a dataset we can boot from
505 # + We don't want this filesystem to be automatically mounted, we
506 # want control over this here and nowhere else.
507 # + We don't need any mountpoint set for the same reason.
508 # We use the 'org.zol:mountpoint' property to remember the mountpoint.
509 ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
510 ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
511 ZFS_CMD="${ZFS_CMD} $snap $destfs"
512 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
513 ZFS_ERROR="$?"
514 if [ "${ZFS_ERROR}" != 0 ]
515 then
516 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
517
518 disable_plymouth
519 echo ""
520 echo "Command: $ZFS_CMD"
521 echo "Message: $ZFS_STDERR"
522 echo "Error: $ZFS_ERROR"
523 echo ""
524 echo "Failed to clone snapshot."
525 echo "Make sure that the any problems are corrected and then make sure"
526 echo "that the dataset '$destfs' exists and is bootable."
527 /bin/sh
528 else
529 [ "$quiet" != "y" ] && zfs_log_end_msg
530 fi
531
532 return 0
533 }
534
535 # Rollback a given snapshot.
536 rollback_snap()
537 {
538 local snap="$1"
539
540 [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
541
542 ZFS_CMD="${ZFS} rollback -Rf $snap"
543 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
544 ZFS_ERROR="$?"
545 if [ "${ZFS_ERROR}" != 0 ]
546 then
547 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
548
549 disable_plymouth
550 echo ""
551 echo "Command: $ZFS_CMD"
552 echo "Message: $ZFS_STDERR"
553 echo "Error: $ZFS_ERROR"
554 echo ""
555 echo "Failed to rollback snapshot."
556 /bin/sh
557 else
558 [ "$quiet" != "y" ] && zfs_log_end_msg
559 fi
560
561 return 0
562 }
563
564 # Get a list of snapshots, give them as a numbered list
565 # to the user to choose from.
566 ask_user_snap()
567 {
568 local fs="$1"
569 local i=1
570 local SNAP snapnr snap debug
571
572 # We need to temporarily disable debugging. Set 'debug' so we
573 # remember to enabled it again.
574 if [ -n "${ZFS_DEBUG}" ]; then
575 unset ZFS_DEBUG
576 set +x
577 debug=1
578 fi
579
580 # Because we need the resulting snapshot, which is sent on
581 # stdout to the caller, we use stderr for our questions.
582 echo "What snapshot do you want to boot from?" > /dev/stderr
583 while read snap; do
584 echo " $i: ${snap}" > /dev/stderr
585 eval `echo SNAP_$i=$snap`
586 i=$((i + 1))
587 done <<EOT
588 $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
589 EOT
590
591 echo -n " Snap nr [1-$((i-1))]? " > /dev/stderr
592 read snapnr
593
594 # Re-enable debugging.
595 if [ -n "${debug}" ]; then
596 ZFS_DEBUG=1
597 set -x
598 fi
599
600 echo "$(eval echo "$"SNAP_$snapnr)"
601 }
602
603 setup_snapshot_booting()
604 {
605 local snap="$1"
606 local s destfs subfs mountpoint retval=0 filesystems fs
607
608 # Make sure that the snapshot specified actually exist.
609 if [ ! $(get_fs_value "${snap}" type) ]
610 then
611 # Snapshot does not exist (...@<null> ?)
612 # ask the user for a snapshot to use.
613 snap="$(ask_user_snap "${snap%%@*}")"
614 fi
615
616 # Separate the full snapshot ('$snap') into it's filesystem and
617 # snapshot names. Would have been nice with a split() function..
618 rootfs="${snap%%@*}"
619 snapname="${snap##*@}"
620 ZFS_BOOTFS="${rootfs}_${snapname}"
621
622 if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
623 then
624 # If the destination dataset for the clone
625 # already exists, destroy it. Recursivly
626 if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
627 filesystems=$("${ZFS}" list -oname -tfilesystem -H \
628 -r -Sname "${ZFS_BOOTFS}")
629 for fs in $filesystems; do
630 destroy_fs "${fs}"
631 done
632 fi
633 fi
634
635 # Get all snapshots, recursivly (might need to clone /usr, /var etc
636 # as well).
637 for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
638 grep "${snapname}")
639 do
640 if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
641 then
642 # Rollback snapshot
643 rollback_snap "$s" || retval=$((retval + 1))
644 else
645 # Setup a destination filesystem name.
646 # Ex: Called with 'rpool/ROOT/debian@snap2'
647 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
648 # rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
649 # rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
650 # rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
651 subfs="${s##$rootfs}"
652 subfs="${subfs%%@$snapname}"
653
654 destfs="${rootfs}_${snapname}" # base fs.
655 [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
656
657 # Get the mountpoint of the filesystem, to be used
658 # with clone_snap(). If legacy or none, then use
659 # the sub fs value.
660 mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
661 if [ "$mountpoint" = "legacy" -o \
662 "$mountpoint" = "none" ]
663 then
664 if [ -n "${subfs}" ]; then
665 mountpoint="${subfs}"
666 else
667 mountpoint="/"
668 fi
669 fi
670
671 # Clone the snapshot into its own
672 # filesystem
673 clone_snap "$s" "${destfs}" "${mountpoint}" || \
674 retval=$((retval + 1))
675 fi
676 done
677
678 # If we haven't return yet, we have a problem...
679 return "${retval}"
680 }
681
682 # ================================================================
683
684 # This is the main function.
685 mountroot()
686 {
687 local snaporig snapsub destfs pool POOLS
688
689 # ----------------------------------------------------------------
690 # I N I T I A L S E T U P
691
692 # ------------
693 # Run the pre-mount scripts from /scripts/local-top.
694 pre_mountroot
695
696 # ------------
697 # Source the default setup variables.
698 [ -r '/etc/default/zfs' ] && . /etc/default/zfs
699
700 # ------------
701 # Support debug option
702 if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
703 then
704 ZFS_DEBUG=1
705 mkdir /var/log
706 #exec 2> /var/log/boot.debug
707 set -x
708 fi
709
710 # ------------
711 # Load ZFS module etc.
712 if ! load_module_initrd; then
713 disable_plymouth
714 echo ""
715 echo "Failed to load ZFS modules."
716 echo "Manually load the modules and exit."
717 /bin/sh
718 fi
719
720 # ------------
721 # Look for the cache file (if any).
722 [ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
723
724 # ------------
725 # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
726 # 'root' is for Redhat/Fedora (etc),
727 # 'REAL_ROOT' is for Gentoo
728 if [ -z "$ROOT" ]
729 then
730 [ -n "$root" ] && ROOT=${root}
731
732 [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
733 fi
734
735 # ------------
736 # Where to mount the root fs in the initrd - set outside this script
737 # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
738 # 'NEWROOT' is for RedHat/Fedora (etc),
739 # 'NEW_ROOT' is for Gentoo
740 if [ -z "$rootmnt" ]
741 then
742 [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
743
744 [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
745 fi
746
747 # ------------
748 # No longer set in the defaults file, but it could have been set in
749 # get_pools() in some circumstances. If it's something, but not 'yes',
750 # it's no good to us.
751 [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
752 unset USE_DISK_BY_ID
753
754 # ----------------------------------------------------------------
755 # P A R S E C O M M A N D L I N E O P T I O N S
756
757 # This part is the really ugly part - there's so many options and permutations
758 # 'out there', and if we should make this the 'primary' source for ZFS initrd
759 # scripting, we need/should support them all.
760 #
761 # Supports the following kernel command line argument combinations
762 # (in this order - first match win):
763 #
764 # rpool=<pool> (tries to finds bootfs automatically)
765 # bootfs=<pool>/<dataset> (uses this for rpool - first part)
766 # rpool=<pool> bootfs=<pool>/<dataset>
767 # -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part)
768 # rpool=rpool (default if none of the above is used)
769 # root=<pool>/<dataset> (uses this for rpool - first part)
770 # root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=')
771 # root=zfs:AUTO (tries to detect both pool and rootfs
772 # root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
773 #
774 # Option <dataset> could also be <snapshot>
775
776 # ------------
777 # Support force option
778 # In addition, setting one of zfs_force, zfs.force or zfsforce to
779 # 'yes', 'on' or '1' will make sure we force import the pool.
780 # This should (almost) never be needed, but it's here for
781 # completeness.
782 ZPOOL_FORCE=""
783 if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
784 then
785 ZPOOL_FORCE="-f"
786 fi
787
788 # ------------
789 # Look for 'rpool' and 'bootfs' parameter
790 [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
791 [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
792
793 # ------------
794 # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
795 # 'ROOT'
796 [ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
797
798 # ------------
799 # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
800 # NOTE: Only use the pool name and dataset. The rest is not
801 # supported by ZoL (whatever it's for).
802 if [ -z "$ZFS_RPOOL" ]
803 then
804 # The ${zfs-bootfs} variable is set at the kernel command
805 # line, usually by GRUB, but it cannot be referenced here
806 # directly because bourne variable names cannot contain a
807 # hyphen.
808 #
809 # Reassign the variable by dumping the environment and
810 # stripping the zfs-bootfs= prefix. Let the shell handle
811 # quoting through the eval command.
812 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
813 fi
814
815 # ------------
816 # No root fs or pool specified - do auto detect.
817 if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
818 then
819 # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
820 # which will be caught later
821 ROOT=zfs:AUTO
822 fi
823
824 # ----------------------------------------------------------------
825 # F I N D A N D I M P O R T C O R R E C T P O O L
826
827 # ------------
828 if [ "$ROOT" = "zfs:AUTO" ]
829 then
830 # Try to detect both pool and root fs.
831
832 [ "$quiet" != "y" ] && \
833 zfs_log_begin_msg "Attempting to import additional pools."
834
835 # Get a list of pools available for import
836 if [ -n "$ZFS_RPOOL" ]
837 then
838 # We've specified a pool - check only that
839 POOLS=$ZFS_RPOOL
840 else
841 POOLS=$(get_pools)
842 fi
843
844 OLD_IFS="$IFS" ; IFS=";"
845 for pool in $POOLS
846 do
847 [ -z "$pool" ] && continue
848
849 import_pool "$pool"
850 find_rootfs "$pool"
851 done
852 IFS="$OLD_IFS"
853
854 [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
855 else
856 # No auto - use value from the command line option.
857
858 # Strip 'zfs:' and 'ZFS='.
859 ZFS_BOOTFS="${ROOT#*[:=]}"
860
861 # Stip everything after the first slash.
862 ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
863 fi
864
865 # Import the pool (if not already done so in the AUTO check above).
866 if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
867 then
868 [ "$quiet" != "y" ] && \
869 zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
870
871 import_pool "${ZFS_RPOOL}"
872 find_rootfs "${ZFS_RPOOL}"
873
874 [ "$quiet" != "y" ] && zfs_log_end_msg
875 fi
876
877 if [ -z "${POOL_IMPORTED}" ]
878 then
879 # No pool imported, this is serious!
880 disable_plymouth
881 echo ""
882 echo "Command: $ZFS_CMD"
883 echo "Message: $ZFS_STDERR"
884 echo "Error: $ZFS_ERROR"
885 echo ""
886 echo "No pool imported. Manually import the root pool"
887 echo "at the command prompt and then exit."
888 echo "Hint: Try: zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
889 /bin/sh
890 fi
891
892 # Set elevator=noop on the root pool's vdevs' disks. ZFS already
893 # does this for wholedisk vdevs (for all pools), so this is only
894 # important for partitions.
895 "${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
896 awk '/^\t / && !/(mirror|raidz)/ {
897 dev=$1;
898 sub(/[0-9]+$/, "", dev);
899 print dev
900 }' |
901 while read i
902 do
903 if [ -e "/sys/block/$i/queue/scheduler" ]
904 then
905 echo noop > "/sys/block/$i/queue/scheduler"
906 fi
907 done
908
909
910 # ----------------------------------------------------------------
911 # P R E P A R E R O O T F I L E S Y S T E M
912
913 if [ -n "${ZFS_BOOTFS}" ]
914 then
915 # Booting from a snapshot?
916 # Will overwrite the ZFS_BOOTFS variable like so:
917 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
918 echo "${ZFS_BOOTFS}" | grep -q '@' && \
919 setup_snapshot_booting "${ZFS_BOOTFS}"
920 fi
921
922 if [ -z "${ZFS_BOOTFS}" ]
923 then
924 # Still nothing! Let the user sort this out.
925 disable_plymouth
926 echo ""
927 echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
928 echo " not specified on the kernel command line."
929 echo ""
930 echo "Manually mount the root filesystem on $rootmnt and then exit."
931 echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
932 /bin/sh
933 fi
934
935 # ----------------------------------------------------------------
936 # M O U N T F I L E S Y S T E M S
937
938 # * Ideally, the root filesystem would be mounted like this:
939 #
940 # zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
941 # zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
942 #
943 # but the MOUNTPOINT prefix is preserved on descendent filesystem
944 # after the pivot into the regular root, which later breaks things
945 # like `zfs mount -a` and the /proc/self/mounts refresh.
946 #
947 # * Mount additional filesystems required
948 # Such as /usr, /var, /usr/local etc.
949 # NOTE: Mounted in the order specified in the
950 # ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
951
952 # Go through the complete list (recursively) of all filesystems below
953 # the real root dataset
954 filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
955 for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
956 do
957 mount_fs "$fs"
958 done
959
960 # ------------
961 # Debugging information
962 if [ -n "${ZFS_DEBUG}" ]
963 then
964 #exec 2>&1-
965
966 echo "DEBUG: imported pools:"
967 "${ZPOOL}" list -H
968 echo
969
970 echo "DEBUG: mounted ZFS filesystems:"
971 mount | grep zfs
972 echo
973
974 echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
975 echo -n " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
976 read b
977
978 [ "$b" = "c" ] && /bin/sh
979 [ "$b" = "r" ] && reboot -f
980
981 set +x
982 fi
983
984 # ------------
985 # Run local bottom script
986 if type run_scripts > /dev/null 2>&1 && \
987 [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
988 then
989 [ "$quiet" != "y" ] && \
990 zfs_log_begin_msg "Running /scripts/local-bottom"
991 run_scripts /scripts/local-bottom
992 [ "$quiet" != "y" ] && zfs_log_end_msg
993 fi
994 }