]> git.proxmox.com Git - systemd.git/blame - test/units/testsuite-64.sh
New upstream version 250.4
[systemd.git] / test / units / testsuite-64.sh
CommitLineData
ea0999c9
MB
1#!/usr/bin/env bash
2# SPDX-License-Identifier: LGPL-2.1-or-later
3# vi: ts=4 sw=4 tw=0 et:
4
5set -eux
6set -o pipefail
7
8# Check if all symlinks under /dev/disk/ are valid
9# shellcheck disable=SC2120
10helper_check_device_symlinks() {(
11 set +x
12
13 local dev link path paths target
14
15 [[ $# -gt 0 ]] && paths=("$@") || paths=("/dev/disk" "/dev/mapper")
16
17 # Check if all given paths are valid
18 for path in "${paths[@]}"; do
19 if ! test -e "$path"; then
20 echo >&2 "Path '$path' doesn't exist"
21 return 1
22 fi
23 done
24
25 while read -r link; do
26 target="$(readlink -f "$link")"
27 echo "$link -> $target"
28 # Both checks should do virtually the same thing, but check both to be
29 # on the safe side
30 if [[ ! -e "$link" || ! -e "$target" ]]; then
31 echo >&2 "ERROR: symlink '$link' points to '$target' which doesn't exist"
32 return 1
33 fi
34
35 # Check if the symlink points to the correct device in /dev
36 dev="/dev/$(udevadm info -q name "$link")"
37 if [[ "$target" != "$dev" ]]; then
38 echo >&2 "ERROR: symlink '$link' points to '$target' but '$dev' was expected"
39 return 1
40 fi
41 done < <(find "${paths[@]}" -type l)
42)}
43
44# Wait for a specific device link to appear
45# Arguments:
46# $1 - device path
47# $2 - number of retries (default: 10)
48helper_wait_for_dev() {
49 local dev="${1:?}"
50 local ntries="${2:-10}"
51 local i
52
53 for ((i = 0; i < ntries; i++)); do
54 test ! -e "$dev" || return 0
55 sleep .2
56 done
57
58 return 1
59}
60
61# Wrapper around `helper_wait_for_lvm_activate()` and `helper_wait_for_pvscan()`
62# functions to cover differences between pre and post lvm 2.03.14, which introduced
63# a new way of vgroup autoactivation
64# See: https://sourceware.org/git/?p=lvm2.git;a=commit;h=67722b312390cdab29c076c912e14bd739c5c0f6
65# Arguments:
66# $1 - device path (for helper_wait_for_pvscan())
67# $2 - volume group name (for helper_wait_for_lvm_activate())
68# $3 - number of retries (default: 10)
69helper_wait_for_vgroup() {
70 local dev="${1:?}"
71 local vgroup="${2:?}"
72 local ntries="${3:-10}"
73
74 if ! systemctl -q list-unit-files lvm2-pvscan@.service >/dev/null; then
75 helper_wait_for_lvm_activate "$vgroup" "$ntries"
76 else
77 helper_wait_for_pvscan "$dev" "$ntries"
78 fi
79}
80
81# Wait for the lvm-activate-$vgroup.service of a specific $vgroup to finish
82# Arguments:
83# $1 - volume group name
84# $2 - number of retries (default: 10)
85helper_wait_for_lvm_activate() {
86 local vgroup="${1:?}"
87 local ntries="${2:-10}"
88 local i lvm_activate_svc
89
90 lvm_activate_svc="lvm-activate-$vgroup.service"
91 for ((i = 0; i < ntries; i++)); do
92 if systemctl -q is-active "$lvm_activate_svc"; then
93 # Since the service is started via `systemd-run --no-block`, we need
94 # to wait until it finishes, otherwise we might continue while
95 # `vgchange` is still running
96 if [[ "$(systemctl show -P SubState "$lvm_activate_svc")" == exited ]]; then
97 return 0
98 fi
9cde670f
LB
99 else
100 # Since lvm 2.03.15 the lvm-activate transient unit no longer remains
101 # after finishing, so we have to treat non-existent units as a success
102 # as well
103 # See: https://sourceware.org/git/?p=lvm2.git;a=commit;h=fbd8b0cf43dc67f51f86f060dce748f446985855
104 if [[ "$(systemctl show -P LoadState "$lvm_activate_svc")" == not-found ]]; then
105 return 0
106 fi
ea0999c9
MB
107 fi
108
109 sleep .5
110 done
111
112 return 1
113}
114
115# Wait for the lvm2-pvscan@.service of a specific device to finish
116# Arguments:
117# $1 - device path
118# $2 - number of retries (default: 10)
119helper_wait_for_pvscan() {
120 local dev="${1:?}"
121 local ntries="${2:-10}"
122 local MAJOR MINOR i pvscan_svc real_dev
123
124 # Sanity check we got a valid block device (or a symlink to it)
125 real_dev="$(readlink -f "$dev")"
126 if [[ ! -b "$real_dev" ]]; then
127 echo >&2 "ERROR: '$dev ($real_dev) is not a valid block device'"
128 return 1
129 fi
130
131 # Get major and minor numbers from the udev database
132 # (udevadm returns MAJOR= and MINOR= expressions, so let's pull them into
133 # the current environment via `source` for easier parsing)
134 #
135 # shellcheck source=/dev/null
136 source <(udevadm info -q property "$real_dev" | grep -E "(MAJOR|MINOR)=")
137 # Sanity check if we got correct major and minor numbers
138 test -e "/sys/dev/block/$MAJOR:$MINOR/"
139
140 # Wait n_tries*0.5 seconds until the respective lvm2-pvscan service becomes
141 # active (i.e. it got executed and finished)
142 pvscan_svc="lvm2-pvscan@$MAJOR:$MINOR.service"
143 for ((i = 0; i < ntries; i++)); do
144 ! systemctl -q is-active "$pvscan_svc" || return 0
145 sleep .5
146 done
147
148 return 1
149}
150
151testcase_megasas2_basic() {
152 lsblk -S
153 [[ "$(lsblk --scsi --noheadings | wc -l)" -ge 128 ]]
154}
155
156testcase_nvme_basic() {
157 lsblk --noheadings | grep "^nvme"
158 [[ "$(lsblk --noheadings | grep -c "^nvme")" -ge 28 ]]
159}
160
161testcase_virtio_scsi_identically_named_partitions() {
162 lsblk --noheadings -a -o NAME,PARTLABEL
163 [[ "$(lsblk --noheadings -a -o NAME,PARTLABEL | grep -c "Hello world")" -eq $((16 * 8)) ]]
164}
165
166testcase_multipath_basic_failover() {
167 local dmpath i path wwid
168
169 # Configure multipath
170 cat >/etc/multipath.conf <<\EOF
171defaults {
172 # Use /dev/mapper/$WWN paths instead of /dev/mapper/mpathX
173 user_friendly_names no
174 find_multipaths yes
175 enable_foreign "^$"
176}
177
178blacklist_exceptions {
179 property "(SCSI_IDENT_|ID_WWN)"
180}
181
182blacklist {
183}
184EOF
185 modprobe -v dm_multipath
186 systemctl start multipathd.service
187 systemctl status multipathd.service
188 multipath -ll
189 udevadm settle
190 ls -l /dev/disk/by-id/
191
192 for i in {0..63}; do
193 wwid="deaddeadbeef$(printf "%.4d" "$i")"
194 path="/dev/disk/by-id/wwn-0x$wwid"
195 dmpath="$(readlink -f "$path")"
196
197 lsblk "$path"
198 multipath -C "$dmpath"
199 # We should have 4 active paths for each multipath device
200 [[ "$(multipath -l "$path" | grep -c running)" -eq 4 ]]
201 done
202
203 # Test failover (with the first multipath device that has a partitioned disk)
204 echo "${FUNCNAME[0]}: test failover"
205 local device expected link mpoint part
206 local -a devices
207 mpoint="$(mktemp -d /mnt/mpathXXX)"
208 wwid="deaddeadbeef0000"
209 path="/dev/disk/by-id/wwn-0x$wwid"
210
211 # All following symlinks should exists and should be valid
212 local -a part_links=(
213 "/dev/disk/by-id/wwn-0x$wwid-part2"
214 "/dev/disk/by-partlabel/failover_part"
215 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
216 "/dev/disk/by-label/failover_vol"
217 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
218 )
219 for link in "${part_links[@]}"; do
220 test -e "$link"
221 done
222
223 # Choose a random symlink to the failover data partition each time, for
224 # a better coverage
225 part="${part_links[$RANDOM % ${#part_links[@]}]}"
226
227 # Get all devices attached to a specific multipath device (in H:C:T:L format)
228 # and sort them in a random order, so we cut off different paths each time
229 mapfile -t devices < <(multipath -l "$path" | grep -Eo '[0-9]+:[0-9]+:[0-9]+:[0-9]+' | sort -R)
230 if [[ "${#devices[@]}" -ne 4 ]]; then
231 echo "Expected 4 devices attached to WWID=$wwid, got ${#devices[@]} instead"
232 return 1
233 fi
234 # Drop the last path from the array, since we want to leave at least one path active
235 unset "devices[3]"
236 # Mount the first multipath partition, write some data we can check later,
237 # and then disconnect the remaining paths one by one while checking if we
238 # can still read/write from the mount
239 mount -t ext4 "$part" "$mpoint"
240 expected=0
241 echo -n "$expected" >"$mpoint/test"
242 # Sanity check we actually wrote what we wanted
243 [[ "$(<"$mpoint/test")" == "$expected" ]]
244
245 for device in "${devices[@]}"; do
246 echo offline >"/sys/class/scsi_device/$device/device/state"
247 [[ "$(<"$mpoint/test")" == "$expected" ]]
248 expected="$((expected + 1))"
249 echo -n "$expected" >"$mpoint/test"
250
251 # Make sure all symlinks are still valid
252 for link in "${part_links[@]}"; do
253 test -e "$link"
254 done
255 done
256
257 multipath -l "$path"
258 # Three paths should be now marked as 'offline' and one as 'running'
259 [[ "$(multipath -l "$path" | grep -c offline)" -eq 3 ]]
260 [[ "$(multipath -l "$path" | grep -c running)" -eq 1 ]]
261
262 umount "$mpoint"
263 rm -fr "$mpoint"
264}
265
266testcase_simultaneous_events() {
267 local blockdev part partscript
268
269 blockdev="$(readlink -f /dev/disk/by-id/scsi-*_deadbeeftest)"
270 partscript="$(mktemp)"
271
272 if [[ ! -b "$blockdev" ]]; then
273 echo "ERROR: failed to find the test SCSI block device"
274 return 1
275 fi
276
277 cat >"$partscript" <<EOF
278$(printf 'name="test%d", size=2M\n' {1..50})
279EOF
280
281 # Initial partition table
282 sfdisk -q -X gpt "$blockdev" <"$partscript"
283
284 # Delete the partitions, immediately recreate them, wait for udev to settle
285 # down, and then check if we have any dangling symlinks in /dev/disk/. Rinse
286 # and repeat.
287 #
288 # On unpatched udev versions the delete-recreate cycle may trigger a race
289 # leading to dead symlinks in /dev/disk/
290 for i in {1..100}; do
291 sfdisk -q --delete "$blockdev"
292 sfdisk -q -X gpt "$blockdev" <"$partscript"
293
294 if ((i % 10 == 0)); then
295 udevadm settle
296 helper_check_device_symlinks
297 fi
298 done
299
300 rm -f "$partscript"
301}
302
303testcase_lvm_basic() {
304 local i part
305 local vgroup="MyTestGroup$RANDOM"
306 local devices=(
307 /dev/disk/by-id/ata-foobar_deadbeeflvm{0..3}
308 )
309
310 # Make sure all the necessary soon-to-be-LVM devices exist
311 ls -l "${devices[@]}"
312
313 # Add all test devices into a volume group, create two logical volumes,
314 # and check if necessary symlinks exist (and are valid)
315 lvm pvcreate -y "${devices[@]}"
316 lvm pvs
317 lvm vgcreate "$vgroup" -y "${devices[@]}"
318 lvm vgs
319 lvm vgchange -ay "$vgroup"
320 lvm lvcreate -y -L 4M "$vgroup" -n mypart1
321 lvm lvcreate -y -L 8M "$vgroup" -n mypart2
322 lvm lvs
323 udevadm settle
324 test -e "/dev/$vgroup/mypart1"
325 test -e "/dev/$vgroup/mypart2"
326 mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1"
327 udevadm settle
328 test -e "/dev/disk/by-label/mylvpart1"
329 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
330
331 # Disable the VG and check symlinks...
332 lvm vgchange -an "$vgroup"
333 udevadm settle
334 test ! -e "/dev/$vgroup"
335 test ! -e "/dev/disk/by-label/mylvpart1"
336 helper_check_device_symlinks "/dev/disk"
337
338 # reenable the VG and check the symlinks again if all LVs are properly activated
339 lvm vgchange -ay "$vgroup"
340 udevadm settle
341 test -e "/dev/$vgroup/mypart1"
342 test -e "/dev/$vgroup/mypart2"
343 test -e "/dev/disk/by-label/mylvpart1"
344 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
345
346 # Same as above, but now with more "stress"
347 for i in {1..50}; do
348 lvm vgchange -an "$vgroup"
349 lvm vgchange -ay "$vgroup"
350
351 if ((i % 5 == 0)); then
352 udevadm settle
353 test -e "/dev/$vgroup/mypart1"
354 test -e "/dev/$vgroup/mypart2"
355 test -e "/dev/disk/by-label/mylvpart1"
356 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
357 fi
358 done
359
360 # Remove the first LV
361 lvm lvremove -y "$vgroup/mypart1"
362 udevadm settle
363 test ! -e "/dev/$vgroup/mypart1"
364 test -e "/dev/$vgroup/mypart2"
365 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
366
367 # Create & remove LVs in a loop, i.e. with more "stress"
368 for i in {1..16}; do
369 # 1) Create 16 logical volumes
370 for part in {0..15}; do
371 lvm lvcreate -y -L 4M "$vgroup" -n "looppart$part"
372 done
373
374 # 2) Immediately remove them
375 lvm lvremove -y "$vgroup"/looppart{0..15}
376
377 # 3) On every 4th iteration settle udev and check if all partitions are
378 # indeed gone, and if all symlinks are still valid
379 if ((i % 4 == 0)); then
380 udevadm settle
381 for part in {0..15}; do
382 test ! -e "/dev/$vgroup/looppart$part"
383 done
384 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
385 fi
386 done
387}
388
389testcase_btrfs_basic() {
390 local dev_stub i label mpoint uuid
391 local devices=(
392 /dev/disk/by-id/ata-foobar_deadbeefbtrfs{0..3}
393 )
394
395 ls -l "${devices[@]}"
396
397 echo "Single device: default settings"
398 uuid="deadbeef-dead-dead-beef-000000000000"
399 label="btrfs_root"
400 mkfs.btrfs -L "$label" -U "$uuid" "${devices[0]}"
401 udevadm settle
402 btrfs filesystem show
403 test -e "/dev/disk/by-uuid/$uuid"
404 test -e "/dev/disk/by-label/$label"
405 helper_check_device_symlinks
406
407 echo "Multiple devices: using partitions, data: single, metadata: raid1"
408 uuid="deadbeef-dead-dead-beef-000000000001"
409 label="btrfs_mpart"
410 sfdisk --wipe=always "${devices[0]}" <<EOF
411label: gpt
412
413name="diskpart1", size=85M
414name="diskpart2", size=85M
415name="diskpart3", size=85M
416name="diskpart4", size=85M
417EOF
418 udevadm settle
419 mkfs.btrfs -d single -m raid1 -L "$label" -U "$uuid" /dev/disk/by-partlabel/diskpart{1..4}
420 udevadm settle
421 btrfs filesystem show
422 test -e "/dev/disk/by-uuid/$uuid"
423 test -e "/dev/disk/by-label/$label"
424 helper_check_device_symlinks
425 wipefs -a -f "${devices[0]}"
426
427 echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode"
428 uuid="deadbeef-dead-dead-beef-000000000002"
429 label="btrfs_mdisk"
430 mkfs.btrfs -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}"
431 udevadm settle
432 btrfs filesystem show
433 test -e "/dev/disk/by-uuid/$uuid"
434 test -e "/dev/disk/by-label/$label"
435 helper_check_device_symlinks
436
437 echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode"
438 uuid="deadbeef-dead-dead-beef-000000000003"
439 label="btrfs_mencdisk"
440 mpoint="/btrfs_enc$RANDOM"
441 mkdir "$mpoint"
442 # Create a key-file
443 dd if=/dev/urandom of=/etc/btrfs_keyfile bs=64 count=1 iflag=fullblock
444 chmod 0600 /etc/btrfs_keyfile
445 # Encrypt each device and add it to /etc/crypttab, so it can be mounted
446 # automagically later
447 : >/etc/crypttab
448 for ((i = 0; i < ${#devices[@]}; i++)); do
449 # Intentionally use weaker cipher-related settings, since we don't care
450 # about security here as it's a throwaway LUKS partition
451 cryptsetup luksFormat -q \
452 --use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \
453 --uuid "deadbeef-dead-dead-beef-11111111111$i" --label "encdisk$i" "${devices[$i]}" /etc/btrfs_keyfile
454 udevadm settle
455 test -e "/dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111$i"
456 test -e "/dev/disk/by-label/encdisk$i"
457 # Add the device into /etc/crypttab, reload systemd, and then activate
458 # the device so we can create a filesystem on it later
459 echo "encbtrfs$i UUID=deadbeef-dead-dead-beef-11111111111$i /etc/btrfs_keyfile luks,noearly" >>/etc/crypttab
460 systemctl daemon-reload
461 systemctl start "systemd-cryptsetup@encbtrfs$i"
462 done
463 helper_check_device_symlinks
464 # Check if we have all necessary DM devices
465 ls -l /dev/mapper/encbtrfs{0..3}
466 # Create a multi-device btrfs filesystem on the LUKS devices
467 mkfs.btrfs -M -d raid1 -m raid1 -L "$label" -U "$uuid" /dev/mapper/encbtrfs{0..3}
468 udevadm settle
469 btrfs filesystem show
470 test -e "/dev/disk/by-uuid/$uuid"
471 test -e "/dev/disk/by-label/$label"
472 helper_check_device_symlinks
473 # Mount it and write some data to it we can compare later
474 mount -t btrfs /dev/mapper/encbtrfs0 "$mpoint"
475 echo "hello there" >"$mpoint/test"
476 # "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise)
477 umount "$mpoint"
478 systemctl stop systemd-cryptsetup@encbtrfs{0..3}
479 test ! -e "/dev/disk/by-uuid/$uuid"
480 helper_check_device_symlinks
481 # Add the mount point to /etc/fstab and check if the device can be put together
482 # automagically. The source device is the DM name of the first LUKS device
483 # (from /etc/crypttab). We have to specify all LUKS devices manually, as
484 # registering the necessary devices is usually initrd's job (via btrfs device scan)
485 dev_stub="/dev/mapper/encbtrfs"
486 echo "/dev/mapper/encbtrfs0 $mpoint btrfs device=${dev_stub}0,device=${dev_stub}1,device=${dev_stub}2,device=${dev_stub}3 0 2" >>/etc/fstab
487 # Tell systemd about the new mount
488 systemctl daemon-reload
489 # Restart cryptsetup.target to trigger autounlock of partitions in /etc/crypttab
490 systemctl restart cryptsetup.target
491 # Start the corresponding mount unit and check if the btrfs device was reconstructed
492 # correctly
493 systemctl start "${mpoint##*/}.mount"
494 btrfs filesystem show
495 test -e "/dev/disk/by-uuid/$uuid"
496 test -e "/dev/disk/by-label/$label"
497 helper_check_device_symlinks
498 grep "hello there" "$mpoint/test"
499 # Cleanup
500 systemctl stop "${mpoint##*/}.mount"
501 systemctl stop systemd-cryptsetup@encbtrfs{0..3}
502 sed -i "/${mpoint##*/}/d" /etc/fstab
503 : >/etc/crypttab
504 rm -fr "$mpoint"
505 systemctl daemon-reload
506 udevadm settle
507}
508
509testcase_iscsi_lvm() {
510 local dev i label link lun_id mpoint target_name uuid
511 local target_ip="127.0.0.1"
512 local target_port="3260"
513 local vgroup="iscsi_lvm$RANDOM"
514 local expected_symlinks=()
515 local devices=(
516 /dev/disk/by-id/ata-foobar_deadbeefiscsi{0..3}
517 )
518
519 ls -l "${devices[@]}"
520
521 # Start the target daemon
522 systemctl start tgtd
523 systemctl status tgtd
524
525 echo "iSCSI LUNs backed by devices"
526 # See RFC3721 and RFC7143
527 target_name="iqn.2021-09.com.example:iscsi.test"
528 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
529 # backed by a device
530 tgtadm --lld iscsi --op new --mode target --tid=1 --targetname "$target_name"
531 for ((i = 0; i < ${#devices[@]}; i++)); do
532 # lun-0 is reserved by iSCSI
533 lun_id="$((i + 1))"
534 tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun "$lun_id" -b "${devices[$i]}"
535 tgtadm --lld iscsi --op update --mode logicalunit --tid 1 --lun "$lun_id"
536 expected_symlinks+=(
537 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$lun_id"
538 )
539 done
540 tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
541 # Configure the iSCSI initiator
542 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover
543 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login
544 udevadm settle
545 # Check if all device symlinks are valid and if all expected device symlinks exist
546 for link in "${expected_symlinks[@]}"; do
547 # We need to do some active waiting anyway, as it may take kernel a bit
548 # to attach the newly connected SCSI devices
549 helper_wait_for_dev "$link"
550 test -e "$link"
551 done
552 udevadm settle
553 helper_check_device_symlinks
554 # Cleanup
555 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout
556 tgtadm --lld iscsi --op delete --mode target --tid=1
557
558 echo "iSCSI LUNs backed by files + LVM"
559 # Note: we use files here to "trick" LVM the disks are indeed on a different
560 # host, so it doesn't automagically detect another path to the backing
561 # device once we disconnect the iSCSI devices
562 target_name="iqn.2021-09.com.example:iscsi.lvm.test"
563 mpoint="$(mktemp -d /iscsi_storeXXX)"
564 expected_symlinks=()
565 # Use the first device as it's configured with larger capacity
566 mkfs.ext4 -L iscsi_store "${devices[0]}"
567 udevadm settle
568 mount "${devices[0]}" "$mpoint"
569 for i in {1..4}; do
570 dd if=/dev/zero of="$mpoint/lun$i.img" bs=1M count=32
571 done
572 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
573 # backed by a file
574 tgtadm --lld iscsi --op new --mode target --tid=2 --targetname "$target_name"
575 # lun-0 is reserved by iSCSI
576 for i in {1..4}; do
577 tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun "$i" -b "$mpoint/lun$i.img"
578 tgtadm --lld iscsi --op update --mode logicalunit --tid 2 --lun "$i"
579 expected_symlinks+=(
580 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$i"
581 )
582 done
583 tgtadm --lld iscsi --op bind --mode target --tid 2 -I ALL
584 # Configure the iSCSI initiator
585 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover
586 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login
587 udevadm settle
588 # Check if all device symlinks are valid and if all expected device symlinks exist
589 for link in "${expected_symlinks[@]}"; do
590 # We need to do some active waiting anyway, as it may take kernel a bit
591 # to attach the newly connected SCSI devices
592 helper_wait_for_dev "$link"
593 test -e "$link"
594 done
595 udevadm settle
596 helper_check_device_symlinks
597 # Add all iSCSI devices into a LVM volume group, create two logical volumes,
598 # and check if necessary symlinks exist (and are valid)
599 lvm pvcreate -y "${expected_symlinks[@]}"
600 lvm pvs
601 lvm vgcreate "$vgroup" -y "${expected_symlinks[@]}"
602 lvm vgs
603 lvm vgchange -ay "$vgroup"
604 lvm lvcreate -y -L 4M "$vgroup" -n mypart1
605 lvm lvcreate -y -L 8M "$vgroup" -n mypart2
606 lvm lvs
607 udevadm settle
608 test -e "/dev/$vgroup/mypart1"
609 test -e "/dev/$vgroup/mypart2"
610 mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1"
611 udevadm settle
612 test -e "/dev/disk/by-label/mylvpart1"
613 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
614 # Disconnect the iSCSI devices and check all the symlinks
615 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout
616 # "Reset" the DM state, since we yanked the backing storage from under the LVM,
617 # so the currently active VGs/LVs are invalid
618 dmsetup remove_all --deferred
619 udevadm settle
620 # The LVM and iSCSI related symlinks should be gone
621 test ! -e "/dev/$vgroup"
622 test ! -e "/dev/disk/by-label/mylvpart1"
623 for link in "${expected_symlinks[@]}"; do
624 test ! -e "$link"
625 done
626 helper_check_device_symlinks "/dev/disk"
627 # Reconnect the iSCSI devices and check if everything get detected correctly
628 iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover
629 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login
630 udevadm settle
631 for link in "${expected_symlinks[@]}"; do
632 helper_wait_for_dev "$link"
633 helper_wait_for_vgroup "$link" "$vgroup"
634 test -e "$link"
635 done
636 udevadm settle
637 test -e "/dev/$vgroup/mypart1"
638 test -e "/dev/$vgroup/mypart2"
639 test -e "/dev/disk/by-label/mylvpart1"
640 helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
641 # Cleanup
642 iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout
643 tgtadm --lld iscsi --op delete --mode target --tid=2
644 umount "$mpoint"
645 rm -rf "$mpoint"
646}
647
648testcase_long_sysfs_path() {
649 local link logfile mpoint
650 local expected_symlinks=(
651 "/dev/disk/by-label/data_vol"
652 "/dev/disk/by-label/swap_vol"
653 "/dev/disk/by-partlabel/test_swap"
654 "/dev/disk/by-partlabel/test_part"
655 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
656 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
657 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-222222222222"
658 )
659
660 # Make sure the test device is connected and show its "wonderful" path
661 stat /sys/block/vda
662 readlink -f /sys/block/vda/dev
663
664 for link in "${expected_symlinks[@]}"; do
665 test -e "$link"
666 done
667
668 # Try to mount the data partition manually (using its label)
669 mpoint="$(mktemp -d /logsysfsXXX)"
670 mount LABEL=data_vol "$mpoint"
671 touch "$mpoint/test"
672 umount "$mpoint"
673 # Do the same, but with UUID and using fstab
674 echo "UUID=deadbeef-dead-dead-beef-222222222222 $mpoint ext4 defaults 0 0" >>/etc/fstab
675 systemctl daemon-reload
676 mount "$mpoint"
677 test -e "$mpoint/test"
678 umount "$mpoint"
679
680 # Test out the swap partition
681 swapon -v -L swap_vol
682 swapoff -v -L swap_vol
683
684 udevadm settle
685
686 logfile="$(mktemp)"
687 journalctl -b -q --no-pager -o short-monotonic -p info --grep "Device path.*vda.?' too long to fit into unit name"
688 # Make sure we don't unnecessarily spam the log
689 journalctl -b -q --no-pager -o short-monotonic -p info --grep "/sys/devices/.+/vda[0-9]?" _PID=1 + UNIT=systemd-udevd.service | tee "$logfile"
690 [[ "$(wc -l <"$logfile")" -lt 10 ]]
691
692 : >/etc/fstab
693 rm -fr "${logfile:?}" "${mpoint:?}"
694}
695
696: >/failed
697
698udevadm settle
699udevadm control --log-level debug
700lsblk -a
701
702echo "Check if all symlinks under /dev/disk/ are valid (pre-test)"
703helper_check_device_symlinks
704
705# TEST_FUNCTION_NAME is passed on the kernel command line via systemd.setenv=
706# in the respective test.sh file
707if ! command -v "${TEST_FUNCTION_NAME:?}"; then
708 echo >&2 "Missing verification handler for test case '$TEST_FUNCTION_NAME'"
709 exit 1
710fi
711
712echo "TEST_FUNCTION_NAME=$TEST_FUNCTION_NAME"
713"$TEST_FUNCTION_NAME"
714udevadm settle
715
716echo "Check if all symlinks under /dev/disk/ are valid (post-test)"
717helper_check_device_symlinks
718
719udevadm control --log-level info
720
721systemctl status systemd-udevd
722
723touch /testok
724rm /failed