3 testdir
=$
(readlink
-f $
(dirname $0))
4 rootdir
=$
(readlink
-f $testdir/..
/..
/..
)
5 source $rootdir/scripts
/common.sh
6 source $rootdir/test
/common
/autotest_common.sh
8 NVME_CMD
="/usr/local/src/nvme-cli/nvme"
10 rpc_py
=$rootdir/scripts
/rpc.py
12 $rootdir/scripts
/setup.sh
17 $rootdir/scripts
/setup.sh
reset
20 # Find bdf that supports Namespace Managment
22 nvme_name
=$
(get_nvme_ctrlr_from_bdf
${bdf})
23 if [[ -z "$nvme_name" ]]; then
27 # Check Optional Admin Command Support for Namespace Management
28 oacs
=$
($NVME_CMD id-ctrl
/dev
/${nvme_name} |
grep oacs | cut
-d: -f2)
29 oacs_ns_manage
=$
((oacs
& 0x8))
31 if [[ "$oacs_ns_manage" -ne 0 ]]; then
36 if [[ "${nvme_name}" == "" ]] ||
[[ "$oacs_ns_manage" -eq 0 ]]; then
37 echo "No NVMe device supporting Namespace managment found"
38 $rootdir/scripts
/setup.sh
42 nvme_dev
=/dev
/${nvme_name}
44 # Detect supported features and configuration
45 oaes
=$
($NVME_CMD id-ctrl
${nvme_dev} |
grep oaes | cut
-d: -f2)
46 aer_ns_change
=$
((oaes
& 0x100))
48 function reset_nvme_if_aer_unsupported
() {
49 if [[ "$aer_ns_change" -eq "0" ]]; then
51 $NVME_CMD reset "$1" || true
56 $rootdir/scripts
/setup.sh
reset
58 # This assumes every NVMe controller contains single namespace,
59 # encompassing Total NVM Capacity and formatted as 512 block size.
60 # 512 block size is needed for test/vhost/vhost_boot.sh to
63 tnvmcap
=$
($NVME_CMD id-ctrl
${nvme_dev} |
grep tnvmcap | cut
-d: -f2)
66 size
=$
((tnvmcap
/ blksize
))
68 echo "Restoring $nvme_dev..."
69 $NVME_CMD detach-ns
${nvme_dev} -n 0xffffffff -c 0 || true
70 $NVME_CMD delete-ns
${nvme_dev} -n 0xffffffff || true
71 $NVME_CMD create-ns
${nvme_dev} -s ${size} -c ${size} -b ${blksize}
72 $NVME_CMD attach-ns
${nvme_dev} -n 1 -c 0
73 $NVME_CMD reset ${nvme_dev}
75 $rootdir/scripts
/setup.sh
78 function info_print
() {
85 info_print
"delete all namespaces"
86 $NVME_CMD detach-ns
${nvme_dev} -n 0xffffffff -c 0 || true
87 $NVME_CMD delete-ns
${nvme_dev} -n 0xffffffff || true
89 reset_nvme_if_aer_unsupported
${nvme_dev}
92 PCI_WHITELIST
="${bdf}" $rootdir/scripts
/setup.sh
94 $SPDK_BIN_DIR/spdk_tgt
-m 0x3 &
96 trap 'kill -9 ${spdk_tgt_pid}; clean_up; exit 1' SIGINT SIGTERM EXIT
98 waitforlisten
$spdk_tgt_pid
100 $rpc_py bdev_nvme_attach_controller
-b Nvme0
-t PCIe
-a ${bdf}
101 $rpc_py bdev_nvme_cuse_register
-n Nvme0
104 [[ -c /dev
/spdk
/nvme0
]]
106 for dev
in /dev
/spdk
/nvme0n
*; do
110 info_print
"create ns: nsze=10000 ncap=10000 flbias=0"
111 $NVME_CMD create-ns
/dev
/spdk
/nvme0
-s 10000 -c 10000 -f 0
113 info_print
"attach ns: nsid=1 controller=0"
114 $NVME_CMD attach-ns
/dev
/spdk
/nvme0
-n 1 -c 0
116 reset_nvme_if_aer_unsupported
/dev
/spdk
/nvme0
119 [[ -c /dev
/spdk
/nvme0n1
]]
121 info_print
"create ns: nsze=10000 ncap=10000 flbias=0"
122 $NVME_CMD create-ns
/dev
/spdk
/nvme0
-s 10000 -c 10000 -f 0
124 info_print
"attach ns: nsid=2 controller=0"
125 $NVME_CMD attach-ns
/dev
/spdk
/nvme0
-n 2 -c 0
127 reset_nvme_if_aer_unsupported
/dev
/spdk
/nvme0
130 [[ -c /dev
/spdk
/nvme0n2
]]
132 info_print
"detach ns: nsid=2 controller=0"
133 $NVME_CMD detach-ns
/dev
/spdk
/nvme0
-n 2 -c 0 || true
135 info_print
"delete ns: nsid=2"
136 $NVME_CMD delete-ns
/dev
/spdk
/nvme0
-n 2 || true
138 reset_nvme_if_aer_unsupported
/dev
/spdk
/nvme0
141 [[ ! -c /dev
/spdk
/nvme0n2
]]
143 info_print
"detach ns: nsid=1 controller=0"
144 $NVME_CMD detach-ns
/dev
/spdk
/nvme0
-n 1 -c 0 || true
146 info_print
"delete ns: nsid=1"
147 $NVME_CMD delete-ns
/dev
/spdk
/nvme0
-n 1 || true
149 reset_nvme_if_aer_unsupported
/dev
/spdk
/nvme0
152 # Here we should not have any cuse devices
153 for dev
in /dev
/spdk
/nvme0n
*; do
157 $rpc_py bdev_nvme_detach_controller Nvme0
160 [[ ! -c /dev
/spdk
/nvme0
]]
162 trap - SIGINT SIGTERM EXIT
163 killprocess
$spdk_tgt_pid