]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/test/vhost/migration/migration-tc2.sh
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / vhost / migration / migration-tc2.sh
CommitLineData
9f95a23c 1source $rootdir/test/nvmf/common.sh
11fdf7f2 2
f67539c2 3function migration_tc2_cleanup_nvmf_tgt() {
11fdf7f2
TL
4 local i
5
6 if [[ ! -r "$nvmf_dir/nvmf_tgt.pid" ]]; then
7 warning "Pid file '$nvmf_dir/nvmf_tgt.pid' does not exist. "
8 return
9 fi
10
f67539c2 11 if [[ -n "$1" ]]; then
11fdf7f2
TL
12 trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
13 pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid || true
14 sleep 5
15 if ! pkill -F $nvmf_dir/nvmf_tgt.pid; then
16 fail "failed to kill nvmf_tgt app"
17 fi
18 else
19 pkill --signal SIGTERM -F $nvmf_dir/nvmf_tgt.pid || true
f67539c2 20 for ((i = 0; i < 20; i++)); do
11fdf7f2
TL
21 if ! pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
22 break
23 fi
24 sleep 0.5
25 done
26
27 if pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
28 error "nvmf_tgt failed to shutdown"
29 fi
30 fi
31
32 rm $nvmf_dir/nvmf_tgt.pid
33 unset -v nvmf_dir rpc_nvmf
34}
35
f67539c2 36function migration_tc2_cleanup_vhost_config() {
11fdf7f2
TL
37 timing_enter migration_tc2_cleanup_vhost_config
38
39 trap 'migration_tc2_cleanup_nvmf_tgt SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
40
41 notice "Shutting down all VMs"
42 vm_shutdown_all
43
44 notice "Removing vhost devices & controllers via RPC ..."
45 # Delete bdev first to remove all LUNs and SCSI targets
f67539c2
TL
46 $rpc_0 bdev_nvme_detach_controller Nvme0
47 $rpc_0 vhost_delete_controller $incoming_vm_ctrlr
11fdf7f2
TL
48
49 $rpc_1 delete_nvme_controller Nvme0
f67539c2 50 $rpc_1 vhost_delete_controller $target_vm_ctrlr
11fdf7f2
TL
51
52 notice "killing vhost app"
9f95a23c
TL
53 vhost_kill 0
54 vhost_kill 1
11fdf7f2
TL
55
56 unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
57 unset -v rpc_0 rpc_1
58
59 trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
60 migration_tc2_cleanup_nvmf_tgt
61
62 timing_exit migration_tc2_cleanup_vhost_config
63}
64
f67539c2 65function migration_tc2_configure_vhost() {
11fdf7f2
TL
66 timing_enter migration_tc2_configure_vhost
67
68 # Those are global intentionally - they will be unset in cleanup handler
f67539c2 69 nvmf_dir="$VHOST_DIR/nvmf_tgt"
11fdf7f2
TL
70
71 incoming_vm=1
72 target_vm=2
73 incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
74 target_vm_ctrlr=naa.VhostScsi0.$target_vm
75
9f95a23c
TL
76 rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/rpc.sock"
77 rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
78 rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
11fdf7f2
TL
79
80 # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
81 # here to teardown in cleanup function
82 trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
83
84 # Run nvmf_tgt and two vhost instances:
85 # nvmf_tgt uses core id 2 (-m 0x4)
f67539c2
TL
86 # First uses core id 0
87 # Second uses core id 1
11fdf7f2
TL
88 # This force to use VM 1 and 2.
89 timing_enter start_nvmf_tgt
90 notice "Running nvmf_tgt..."
91 mkdir -p $nvmf_dir
92 rm -f $nvmf_dir/*
f67539c2 93 $SPDK_BIN_DIR/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock --wait-for-rpc &
11fdf7f2
TL
94 local nvmf_tgt_pid=$!
95 echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
96 waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
f67539c2 97 $rpc_nvmf framework_start_init
9f95a23c
TL
98 $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
99 $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
11fdf7f2
TL
100 timing_exit start_nvmf_tgt
101
f67539c2
TL
102 vhost_run 0 "-m 0x1 -s 512 -u"
103 vhost_run 1 "-m 0x2 -s 512 -u"
11fdf7f2 104
f67539c2
TL
105 local rdma_ip_list
106 local nvmf_target_ip
107 rdma_ip_list=$(get_available_rdma_ips)
108 nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
11fdf7f2
TL
109
110 if [[ -z "$nvmf_target_ip" ]]; then
111 fail "no NIC for nvmf target"
112 fi
113
114 notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
115
116 # Construct shared bdevs and controllers
f67539c2 117 $rpc_nvmf nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
11fdf7f2
TL
118 $rpc_nvmf nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
119 $rpc_nvmf nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
120
f67539c2
TL
121 $rpc_0 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
122 $rpc_0 vhost_create_scsi_controller $incoming_vm_ctrlr
123 $rpc_0 vhost_scsi_controller_add_target $incoming_vm_ctrlr 0 Nvme0n1
11fdf7f2 124
f67539c2
TL
125 $rpc_1 bdev_nvme_attach_controller -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
126 $rpc_1 vhost_create_scsi_controller $target_vm_ctrlr
127 $rpc_1 vhost_scsi_controller_add_target $target_vm_ctrlr 0 Nvme0n1
11fdf7f2
TL
128
129 notice "Setting up VMs"
130 vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
f67539c2 131 --migrate-to=$target_vm --memory=1024 --vhost-name=0
11fdf7f2 132 vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
f67539c2 133 --vhost-name=1
11fdf7f2
TL
134
135 # Run everything
136 vm_run $incoming_vm $target_vm
137
138 # Wait only for incoming VM, as target is waiting for migration
9f95a23c 139 vm_wait_for_boot 300 $incoming_vm
11fdf7f2
TL
140
141 notice "Configuration done"
142
143 timing_exit migration_tc2_configure_vhost
144}
145
f67539c2 146function migration_tc2_error_cleanup() {
11fdf7f2
TL
147 trap - SIGINT ERR EXIT
148 set -x
149
150 vm_kill_all
151 migration_tc2_cleanup_vhost_config
152 notice "Migration TC2 FAILED"
153}
154
f67539c2 155function migration_tc2() {
11fdf7f2
TL
156 # Use 2 VMs:
157 # incoming VM - the one we want to migrate
158 # targe VM - the one which will accept migration
9f95a23c 159 local job_file="$testdir/migration-tc2.job"
f67539c2
TL
160 local log_file
161 log_file="/root/$(basename ${job_file%%.*}).log"
11fdf7f2
TL
162
163 migration_tc2_configure_vhost
164
165 # Run fio before migration
166 notice "Starting FIO"
167 vm_check_scsi_location $incoming_vm
f67539c2 168 run_fio $fio_bin --job-file="$job_file" --no-wait-for-fio --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
11fdf7f2
TL
169
170 # Wait a while to let the FIO time to issue some IO
171 sleep 5
172
173 # Check if fio is still running before migration
174 if ! is_fio_running $incoming_vm; then
f67539c2 175 vm_exec $incoming_vm "cat $log_file"
11fdf7f2
TL
176 error "FIO is not running before migration: process crashed or finished too early"
177 fi
178
179 vm_migrate $incoming_vm
180 sleep 3
181
182 # Check if fio is still running after migration
183 if ! is_fio_running $target_vm; then
f67539c2 184 vm_exec $target_vm "cat $log_file"
11fdf7f2
TL
185 error "FIO is not running after migration: process crashed or finished too early"
186 fi
187
188 notice "Waiting for fio to finish"
189 local timeout=40
190 while is_fio_running $target_vm; do
191 sleep 1
192 echo -n "."
f67539c2 193 if ((timeout-- == 0)); then
11fdf7f2
TL
194 error "timeout while waiting for FIO!"
195 fi
196 done
197
198 notice "Fio result is:"
f67539c2 199 vm_exec $target_vm "cat $log_file"
11fdf7f2
TL
200
201 migration_tc2_cleanup_vhost_config
202 notice "Migration TC2 SUCCESS"
203}