]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/test/nvmf/filesystem/filesystem.sh
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / test / nvmf / filesystem / filesystem.sh
index f1337b710ee12bc9930021f2de13db3a75a3c14d..057fc57936364d6e08b066720c4ff664bf6bddad 100755 (executable)
@@ -2,83 +2,97 @@
 
 testdir=$(readlink -f $(dirname $0))
 rootdir=$(readlink -f $testdir/../../..)
-source $rootdir/scripts/autotest_common.sh
+source $rootdir/test/common/autotest_common.sh
 source $rootdir/test/nvmf/common.sh
 
 MALLOC_BDEV_SIZE=64
 MALLOC_BLOCK_SIZE=512
 
-rpc_py="python $rootdir/scripts/rpc.py"
+rpc_py="$rootdir/scripts/rpc.py"
 
 set -e
 
-if ! rdma_nic_available; then
+# pass the parameter 'iso' to this script when running it in isolation to trigger rdma device initialization.
+# e.g. sudo ./filesystem.sh iso
+nvmftestinit $1
+
+RDMA_IP_LIST=$(get_available_rdma_ips)
+NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
+if [ -z $NVMF_FIRST_TARGET_IP ]; then
        echo "no NIC for nvmf test"
        exit 0
 fi
 
 timing_enter fs_test
 
-# Start up the NVMf target in another process
-$rootdir/app/nvmf_tgt/nvmf_tgt -c $testdir/../nvmf.conf &
-nvmfpid=$!
+for incapsule in 0 4096; do
+       # Start up the NVMf target in another process
+       $NVMF_APP -m 0xF &
+       nvmfpid=$!
+
+       trap "process_shm --id $NVMF_APP_SHM_ID; killprocess $nvmfpid; nvmftestfini $1; exit 1" SIGINT SIGTERM EXIT
+
+       waitforlisten $nvmfpid
+       $rpc_py nvmf_create_transport -t RDMA -u 8192 -p 4 -c $incapsule
 
-trap "killprocess $nvmfpid; exit 1" SIGINT SIGTERM EXIT
+       bdevs="$($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+       bdevs+=" $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
 
-waitforlisten $nvmfpid ${RPC_PORT}
+       modprobe -v nvme-rdma
 
-bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
-bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
+       $rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
+       for bdev in $bdevs; do
+               $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
+       done
+       $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $NVMF_FIRST_TARGET_IP -s 4420
 
-modprobe -v nvme-rdma
+       nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
 
-$rpc_py construct_nvmf_subsystem Direct nqn.2016-06.io.spdk:cnode1 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -p "*"
-$rpc_py construct_nvmf_subsystem Virtual nqn.2016-06.io.spdk:cnode2 'transport:RDMA traddr:192.168.100.8 trsvcid:4420' '' -s SPDK00000000000001 -n "$bdevs"
+       waitforblk "nvme0n1"
+       waitforblk "nvme0n2"
 
-nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
-nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
+       mkdir -p /mnt/device
 
-mkdir -p /mnt/device
+       devs=`lsblk -l -o NAME | grep nvme`
 
-devs=`lsblk -l -o NAME | grep nvme`
+       for dev in $devs; do
+               timing_enter parted
+               parted -s /dev/$dev mklabel msdos  mkpart primary '0%' '100%'
+               timing_exit parted
+               sleep 1
 
-for dev in $devs; do
-       timing_enter parted
-       parted -s /dev/$dev mklabel msdos  mkpart primary '0%' '100%'
-       timing_exit parted
-       sleep 1
+               for fstype in "ext4" "btrfs" "xfs"; do
+                       timing_enter $fstype
+                       if [ $fstype = ext4 ]; then
+                               force=-F
+                       else
+                               force=-f
+                       fi
 
-       for fstype in "ext4" "btrfs" "xfs"; do
-               timing_enter $fstype
-               if [ $fstype = ext4 ]; then
-                       force=-F
-               else
-                       force=-f
-               fi
+                       mkfs.${fstype} $force /dev/${dev}p1
 
-               mkfs.${fstype} $force /dev/${dev}p1
+                       mount /dev/${dev}p1 /mnt/device
+                       touch /mnt/device/aaa
+                       sync
+                       rm /mnt/device/aaa
+                       sync
+                       umount /mnt/device
+                       timing_exit $fstype
+               done
 
-               mount /dev/${dev}p1 /mnt/device
-               touch /mnt/device/aaa
-               sync
-               rm /mnt/device/aaa
-               sync
-               umount /mnt/device
-               timing_exit $fstype
+               parted -s /dev/$dev rm 1
        done
 
-       parted -s /dev/$dev rm 1
-done
+       sync
+       nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
 
-sync
-nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
-nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" || true
+       $rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
 
-$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1
-$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode2
+       trap - SIGINT SIGTERM EXIT
 
-trap - SIGINT SIGTERM EXIT
+       nvmfcleanup
+       killprocess $nvmfpid
+done
 
-nvmfcleanup
-killprocess $nvmfpid
+nvmftestfini $1
 timing_exit fs_test