]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/test/nvmf/discovery/discovery.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / test / nvmf / discovery / discovery.sh
index b966cb797c402301a8070e4394f3354ea2741404..f0db8ab36a2d4f356bbc10a87dc7b08ad758e9e9 100755 (executable)
@@ -2,43 +2,54 @@
 
 testdir=$(readlink -f $(dirname $0))
 rootdir=$(readlink -f $testdir/../../..)
-source $rootdir/scripts/autotest_common.sh
+source $rootdir/test/common/autotest_common.sh
 source $rootdir/test/nvmf/common.sh
 
-MALLOC_BDEV_SIZE=64
-MALLOC_BLOCK_SIZE=512
+NULL_BDEV_SIZE=102400
+NULL_BLOCK_SIZE=512
 
-rpc_py="python $rootdir/scripts/rpc.py"
+rpc_py="$rootdir/scripts/rpc.py"
 
 set -e
 
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./discovery.sh iso
+nvmftestinit $1
+
 if ! hash nvme; then
        echo "nvme command not found; skipping discovery test"
        exit 0
 fi
 
-if ! rdma_nic_available; then
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
        echo "no NIC for nvmf test"
        exit 0
 fi
 
 timing_enter discovery
-
+timing_enter start_nvmf_tgt
 # Start up the NVMf target in another process
-$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
+$NVMF_APP -m 0xF &
 nvmfpid=$!
 
-trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
 
-waitforlisten $nvmfpid ${RPC_PORT}
+waitforlisten $nvmfpid
+$rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4
+timing_exit start_nvmf_tgt
 
-bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
-bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+null_bdevs="$($rpc_py construct_null_bdev Null0 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE) "
+null_bdevs+="$($rpc_py construct_null_bdev Null1 $NULL_BDEV_SIZE $NULL_BLOCK_SIZE)"
 
 modprobe -v nvme-rdma
 
-$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -p "*"
-$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -s SPDK00000000000001 -n "$bdevs"
+$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+for null_bdev in $null_bdevs; do
+       $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $null_bdev
+done
+$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
 
 nvme discover -t rdma -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
 
@@ -46,10 +57,20 @@ echo "Perform nvmf subsystem discovery via RPC"
 $rpc_py get_nvmf_subsystems
 
 $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
-$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
+
+for null_bdev in $null_bdevs; do
+       $rpc_py delete_null_bdev $null_bdev
+done
+
+check_bdevs=$($rpc_py get_bdevs | jq -r '.[].name')
+if [ -n "$check_bdevs" ]; then
+       echo $check_bdevs
+       exit 1
+fi
 
 trap - SIGINT SIGTERM EXIT
 
 nvmfcleanup
 killprocess $nvmfpid
+nvmftestfini $1
 timing_exit discovery