]> git.proxmox.com Git - mirror_zfs.git/blame - tests/zfs-tests/tests/functional/kstat/state.ksh
Fix flake 8 style warnings
[mirror_zfs.git] / tests / zfs-tests / tests / functional / kstat / state.ksh
CommitLineData
f0ed6c74
TH
1#!/bin/ksh -p
2#
3# CDDL HEADER START
4#
5# The contents of this file are subject to the terms of the
6# Common Development and Distribution License (the "License").
7# You may not use this file except in compliance with the License.
8#
9# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10# or http://www.opensolaris.org/os/licensing.
11# See the License for the specific language governing permissions
12# and limitations under the License.
13#
14# When distributing Covered Code, include this CDDL HEADER in each
15# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16# If applicable, add the following below this CDDL HEADER, with the
17# fields enclosed by brackets "[]" replaced with your own identifying
18# information: Portions Copyright [yyyy] [name of copyright owner]
19#
20# CDDL HEADER END
21
22#
23# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
24#
25
26#
27# DESCRIPTION:
28# Test /proc/spl/kstat/zfs/<pool>/state kstat
29#
30# STRATEGY:
31# 1. Create a mirrored pool
32# 2. Check that pool is ONLINE
33# 3. Fault one disk
34# 4. Check that pool is DEGRADED
35# 5. Create a new pool with a single scsi_debug disk
36# 6. Remove the disk
37# 7. Check that pool is SUSPENDED
38# 8. Add the disk back in
39# 9. Clear errors and destroy the pools
40
41. $STF_SUITE/include/libtest.shlib
42
43verify_runnable "both"
44
45function cleanup
46{
47 # Destroy the scsi_debug pool
48 if [ -n "$TESTPOOL2" ] ; then
49 if [ -n "$host" ] ; then
50 # Re-enable the disk
51 scan_scsi_hosts $host
52
53 # Device may have changed names after being inserted
54 SDISK=$(get_debug_device)
55 log_must ln $DEV_RDSKDIR/$SDISK $REALDISK
56 fi
57
58 # Restore our working pool image
59 if [ -n "$BACKUP" ] ; then
60 gunzip -c $BACKUP > $REALDISK
61 log_must rm -f $BACKUP
62 fi
63
64 # Our disk is back. Now we can clear errors and destroy the
65 # pool cleanly.
66 log_must zpool clear $TESTPOOL2
67
68 # Now that the disk is back and errors cleared, wait for our
69 # hung 'zpool scrub' to finish.
70 wait
71
72 destroy_pool $TESTPOOL2
73 log_must rm $REALDISK
74 unload_scsi_debug
75 fi
76}
77
78# Check that our pool state values match what's expected
79#
80# $1: pool name
81# $2: expected state ("ONLINE", "DEGRADED", "SUSPENDED", etc)
82function check_all
83{
84 pool=$1
85 expected=$2
86
87 state1=$(zpool status $pool | awk '/state: /{print $2}');
88 state2=$(zpool list -H -o health $pool)
89 state3=$(cat /proc/spl/kstat/zfs/$pool/state)
90 log_note "Checking $expected = $state1 = $state2 = $state3"
91 if [[ "$expected" == "$state1" && "$expected" == "$state2" && \
92 "$expected" == "$state3" ]] ; then
93 true
94 else
95 false
96 fi
97}
98
99log_onexit cleanup
100
101log_assert "Testing /proc/spl/kstat/zfs/<pool>/state kstat"
102
103# Test that the initial pool is healthy
104check_all $TESTPOOL "ONLINE"
105
106# Fault one of the disks, and check that pool is degraded
107DISK1=$(echo "$DISKS" | awk '{print $2}')
108zpool offline -tf $TESTPOOL $DISK1
109check_all $TESTPOOL "DEGRADED"
110
111# Create a new pool out of a scsi_debug disk
112TESTPOOL2=testpool2
113MINVDEVSIZE_MB=$((MINVDEVSIZE / 1048576))
114load_scsi_debug $MINVDEVSIZE_MB 1 1 1 '512b'
115
116SDISK=$(get_debug_device)
117host=$(get_scsi_host $SDISK)
118
119# Use $REALDISK instead of $SDISK in our pool because $SDISK can change names
120# as we remove/add the disk (i.e. /dev/sdf -> /dev/sdg).
121REALDISK=/dev/kstat-state-realdisk
122log_must [ ! -e $REALDISK ]
123ln $DEV_RDSKDIR/$SDISK $REALDISK
124
125log_must zpool create $TESTPOOL2 $REALDISK
126
127# Backup the contents of the disk image
0fe7c953 128BACKUP=$TEST_BASE_DIR/kstat-state-realdisk.gz
f0ed6c74
TH
129log_must [ ! -e $BACKUP ]
130gzip -c $REALDISK > $BACKUP
131
132# Yank out the disk from under the pool
133log_must rm $REALDISK
134remove_disk $SDISK
135
136# Run a 'zpool scrub' in the background to suspend the pool. We run it in the
137# background since the command will hang when the pool gets suspended. The
138# command will resume and exit after we restore the missing disk later on.
139zpool scrub $TESTPOOL2 &
140sleep 1 # Give the scrub some time to run before we check if it fails
141
142log_must check_all $TESTPOOL2 "SUSPENDED"
143
144log_pass "/proc/spl/kstat/zfs/<pool>/state test successful"