]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/nvmf/lvol/nvmf_lvol.sh
update download target update for octopus release
[ceph.git] / ceph / src / spdk / test / nvmf / lvol / nvmf_lvol.sh
1 #!/usr/bin/env bash
2
3 testdir=$(readlink -f $(dirname $0))
4 rootdir=$(readlink -f $testdir/../../..)
5 source $rootdir/test/common/autotest_common.sh
6 source $rootdir/test/nvmf/common.sh
7
8 MALLOC_BDEV_SIZE=64
9 MALLOC_BLOCK_SIZE=512
10 LVOL_BDEV_SIZE=10
11 SUBSYS_NR=2
12 LVOL_BDEVS_NR=6
13
14 rpc_py="$rootdir/scripts/rpc.py"
15
16 function disconnect_nvmf()
17 {
18 for i in `seq 1 $SUBSYS_NR`; do
19 nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
20 done
21 }
22
23 set -e
24
25 # pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
26 # e.g. sudo ./nvmf_lvol.sh iso
27 nvmftestinit $1
28
29 RDMA_IP_LIST=$(get_available_rdma_ips)
30 NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
31 if [ -z $NVMF_FIRST_TARGET_IP ]; then
32 echo "no NIC for nvmf test"
33 exit 0
34 fi
35
36 # SoftRoce does not have enough queues available for
37 # multiconnection tests. Detect if we're using software RDMA.
38 # If so - lower the number of subsystems for test.
39 if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
40 echo "Using software RDMA, lowering number of NVMeOF subsystems."
41 SUBSYS_NR=1
42 fi
43
44 timing_enter lvol_integrity
45 timing_enter start_nvmf_tgt
46 # Start up the NVMf target in another process
47 $NVMF_APP -m 0xF &
48 pid=$!
49
50 trap "process_shm --id $NVMF_APP_SHM_ID; disconnect_nvmf; killprocess $pid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
51
52 waitforlisten $pid
53 $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
54 timing_exit start_nvmf_tgt
55
56 modprobe -v nvme-rdma
57
58 lvol_stores=()
59 lvol_bdevs=()
60 # Create the first LVS from a Raid-0 bdev, which is created from two malloc bdevs
61 # Create remaining LVSs from a malloc bdev, respectively
62 for i in `seq 1 $SUBSYS_NR`; do
63 if [ $i -eq 1 ]; then
64 # construct RAID bdev and put its name in $bdev
65 malloc_bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
66 malloc_bdevs+="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
67 $rpc_py construct_raid_bdev -n raid0 -s 64 -r 0 -b "$malloc_bdevs"
68 bdev="raid0"
69 else
70 # construct malloc bdev and put its name in $bdev
71 bdev="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
72 fi
73 ls_guid="$($rpc_py construct_lvol_store $bdev lvs_$i -c 524288)"
74 lvol_stores+=("$ls_guid")
75
76 # 1 NVMe-OF subsystem per malloc bdev / lvol store / 10 lvol bdevs
77 ns_bdevs=""
78
79 # Create lvol bdevs on each lvol store
80 for j in `seq 1 $LVOL_BDEVS_NR`; do
81 lb_name="$($rpc_py construct_lvol_bdev -u $ls_guid lbd_$j $LVOL_BDEV_SIZE)"
82 lvol_bdevs+=("$lb_name")
83 ns_bdevs+="$lb_name "
84 done
85
86 $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
87 for bdev in $ns_bdevs; do
88 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i $bdev
89 done
90 $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
91 done
92
93 for i in `seq 1 $SUBSYS_NR`; do
94 k=$[$i-1]
95 nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
96
97 for j in `seq 1 $LVOL_BDEVS_NR`; do
98 waitforblk "nvme${k}n${j}"
99 done
100 done
101
102 $testdir/../fio/nvmf_fio.py 262144 64 randwrite 10 verify
103
104 sync
105 disconnect_nvmf
106
107 for i in `seq 1 $SUBSYS_NR`; do
108 $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i
109 done
110
111 rm -f ./local-job*
112
113 trap - SIGINT SIGTERM EXIT
114
115 nvmfcleanup
116 killprocess $pid
117 nvmftestfini $1
118 timing_exit lvol_integrity