]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/nvme/cuse/nvme_ns_manage_cuse.sh
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / nvme / cuse / nvme_ns_manage_cuse.sh
1 #!/usr/bin/env bash
2
3 testdir=$(readlink -f $(dirname $0))
4 rootdir=$(readlink -f $testdir/../../..)
5 source $rootdir/scripts/common.sh
6 source $rootdir/test/common/autotest_common.sh
7
8 NVME_CMD="/usr/local/src/nvme-cli/nvme"
9
10 rpc_py=$rootdir/scripts/rpc.py
11
12 $rootdir/scripts/setup.sh
13 sleep 1
14
15 bdfs=$(get_nvme_bdfs)
16
17 $rootdir/scripts/setup.sh reset
18 sleep 1
19
20 # Find bdf that supports Namespace Managment
21 for bdf in $bdfs; do
22 nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
23 if [[ -z "$nvme_name" ]]; then
24 continue
25 fi
26
27 # Check Optional Admin Command Support for Namespace Management
28 oacs=$($NVME_CMD id-ctrl /dev/${nvme_name} | grep oacs | cut -d: -f2)
29 oacs_ns_manage=$((oacs & 0x8))
30
31 if [[ "$oacs_ns_manage" -ne 0 ]]; then
32 break
33 fi
34 done
35
36 if [[ "${nvme_name}" == "" ]] || [[ "$oacs_ns_manage" -eq 0 ]]; then
37 echo "No NVMe device supporting Namespace managment found"
38 $rootdir/scripts/setup.sh
39 exit 1
40 fi
41
42 nvme_dev=/dev/${nvme_name}
43
44 # Detect supported features and configuration
45 oaes=$($NVME_CMD id-ctrl ${nvme_dev} | grep oaes | cut -d: -f2)
46 aer_ns_change=$((oaes & 0x100))
47
48 function reset_nvme_if_aer_unsupported() {
49 if [[ "$aer_ns_change" -eq "0" ]]; then
50 sleep 1
51 $NVME_CMD reset "$1" || true
52 fi
53 }
54
55 function clean_up() {
56 $rootdir/scripts/setup.sh reset
57
58 # This assumes every NVMe controller contains single namespace,
59 # encompassing Total NVM Capacity and formatted as 512 block size.
60 # 512 block size is needed for test/vhost/vhost_boot.sh to
61 # succesfully run.
62
63 tnvmcap=$($NVME_CMD id-ctrl ${nvme_dev} | grep tnvmcap | cut -d: -f2)
64 blksize=512
65
66 size=$((tnvmcap / blksize))
67
68 echo "Restoring $nvme_dev..."
69 $NVME_CMD detach-ns ${nvme_dev} -n 0xffffffff -c 0 || true
70 $NVME_CMD delete-ns ${nvme_dev} -n 0xffffffff || true
71 $NVME_CMD create-ns ${nvme_dev} -s ${size} -c ${size} -b ${blksize}
72 $NVME_CMD attach-ns ${nvme_dev} -n 1 -c 0
73 $NVME_CMD reset ${nvme_dev}
74
75 $rootdir/scripts/setup.sh
76 }
77
78 function info_print() {
79 echo "---"
80 echo "$@"
81 echo "---"
82 }
83
84 # Prepare controller
85 info_print "delete all namespaces"
86 $NVME_CMD detach-ns ${nvme_dev} -n 0xffffffff -c 0 || true
87 $NVME_CMD delete-ns ${nvme_dev} -n 0xffffffff || true
88
89 reset_nvme_if_aer_unsupported ${nvme_dev}
90 sleep 1
91
92 PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh
93
94 $SPDK_BIN_DIR/spdk_tgt -m 0x3 &
95 spdk_tgt_pid=$!
96 trap 'kill -9 ${spdk_tgt_pid}; clean_up; exit 1' SIGINT SIGTERM EXIT
97
98 waitforlisten $spdk_tgt_pid
99
100 $rpc_py bdev_nvme_attach_controller -b Nvme0 -t PCIe -a ${bdf}
101 $rpc_py bdev_nvme_cuse_register -n Nvme0
102
103 sleep 1
104 [[ -c /dev/spdk/nvme0 ]]
105
106 for dev in /dev/spdk/nvme0n*; do
107 [[ ! -c ${dev} ]]
108 done
109
110 info_print "create ns: nsze=10000 ncap=10000 flbias=0"
111 $NVME_CMD create-ns /dev/spdk/nvme0 -s 10000 -c 10000 -f 0
112
113 info_print "attach ns: nsid=1 controller=0"
114 $NVME_CMD attach-ns /dev/spdk/nvme0 -n 1 -c 0
115
116 reset_nvme_if_aer_unsupported /dev/spdk/nvme0
117 sleep 1
118
119 [[ -c /dev/spdk/nvme0n1 ]]
120
121 info_print "create ns: nsze=10000 ncap=10000 flbias=0"
122 $NVME_CMD create-ns /dev/spdk/nvme0 -s 10000 -c 10000 -f 0
123
124 info_print "attach ns: nsid=2 controller=0"
125 $NVME_CMD attach-ns /dev/spdk/nvme0 -n 2 -c 0
126
127 reset_nvme_if_aer_unsupported /dev/spdk/nvme0
128 sleep 1
129
130 [[ -c /dev/spdk/nvme0n2 ]]
131
132 info_print "detach ns: nsid=2 controller=0"
133 $NVME_CMD detach-ns /dev/spdk/nvme0 -n 2 -c 0 || true
134
135 info_print "delete ns: nsid=2"
136 $NVME_CMD delete-ns /dev/spdk/nvme0 -n 2 || true
137
138 reset_nvme_if_aer_unsupported /dev/spdk/nvme0
139 sleep 1
140
141 [[ ! -c /dev/spdk/nvme0n2 ]]
142
143 info_print "detach ns: nsid=1 controller=0"
144 $NVME_CMD detach-ns /dev/spdk/nvme0 -n 1 -c 0 || true
145
146 info_print "delete ns: nsid=1"
147 $NVME_CMD delete-ns /dev/spdk/nvme0 -n 1 || true
148
149 reset_nvme_if_aer_unsupported /dev/spdk/nvme0
150 sleep 1
151
152 # Here we should not have any cuse devices
153 for dev in /dev/spdk/nvme0n*; do
154 [[ ! -c ${dev} ]]
155 done
156
157 $rpc_py bdev_nvme_detach_controller Nvme0
158
159 sleep 1
160 [[ ! -c /dev/spdk/nvme0 ]]
161
162 trap - SIGINT SIGTERM EXIT
163 killprocess $spdk_tgt_pid
164 clean_up