]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/nvmf/nvme_cli/nvme_cli.sh
update download target update for octopus release
[ceph.git] / ceph / src / spdk / test / nvmf / nvme_cli / nvme_cli.sh
1 #!/usr/bin/env bash
2
3 testdir=$(readlink -f $(dirname $0))
4 rootdir=$(readlink -f $testdir/../../..)
5 source $rootdir/test/common/autotest_common.sh
6 source $rootdir/test/nvmf/common.sh
7
8 if [ -z "${DEPENDENCY_DIR}" ]; then
9 echo DEPENDENCY_DIR not defined!
10 exit 1
11 fi
12
13 spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"
14
15 MALLOC_BDEV_SIZE=64
16 MALLOC_BLOCK_SIZE=512
17
18 rpc_py="$rootdir/scripts/rpc.py"
19
20 set -e
21
22 # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
23 # e.g. sudo ./nvme_cli.sh iso
24 nvmftestinit $1
25
26 RDMA_IP_LIST=$(get_available_rdma_ips)
27 NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
28 if [ -z $NVMF_FIRST_TARGET_IP ]; then
29 echo "no NIC for nvmf test"
30 exit 0
31 fi
32
33 timing_enter nvme_cli
34 timing_enter start_nvmf_tgt
35 $NVMF_APP -m 0xF &
36 nvmfpid=$!
37
38 trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
39
40 waitforlisten $nvmfpid
41 $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
42 timing_exit start_nvmf_tgt
43
44 bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
45 bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
46
47 modprobe -v nvme-rdma
48
49 $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
50 for bdev in $bdevs; do
51 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
52 done
53 $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
54
55 nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
56
57 waitforblk "nvme0n1"
58 waitforblk "nvme0n2"
59
60 nvme list
61
62 for ctrl in /dev/nvme?; do
63 nvme id-ctrl $ctrl
64 nvme smart-log $ctrl
65 done
66
67 for ns in /dev/nvme?n*; do
68 nvme id-ns $ns
69 done
70
71 nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
72 nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
73
74 if [ -d $spdk_nvme_cli ]; then
75 # Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect
76 cd $spdk_nvme_cli
77 ./nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
78 nvme_num_before_connection=$(nvme list |grep "/dev/nvme*"|awk '{print $1}'|wc -l)
79 ./nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
80 sleep 1
81 nvme_num=$(nvme list |grep "/dev/nvme*"|awk '{print $1}'|wc -l)
82 ./nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
83 if [ $nvme_num -le $nvme_num_before_connection ]; then
84 echo "spdk/nvme-cli connect target devices failed"
85 exit 1
86 fi
87 fi
88
89 $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
90 trap - SIGINT SIGTERM EXIT
91
92 nvmfcleanup
93 killprocess $nvmfpid
94 nvmftestfini $1
95 report_test_completion "nvmf_spdk_nvme_cli"
96 timing_exit nvme_cli