]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/vhost/migration/migration-tc2.sh
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / test / vhost / migration / migration-tc2.sh
1 source $rootdir/test/nvmf/common.sh
2
3 function migration_tc2_cleanup_nvmf_tgt()
4 {
5 local i
6
7 if [[ ! -r "$nvmf_dir/nvmf_tgt.pid" ]]; then
8 warning "Pid file '$nvmf_dir/nvmf_tgt.pid' does not exist. "
9 return
10 fi
11
12 if [[ ! -z "$1" ]]; then
13 trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
14 pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid || true
15 sleep 5
16 if ! pkill -F $nvmf_dir/nvmf_tgt.pid; then
17 fail "failed to kill nvmf_tgt app"
18 fi
19 else
20 pkill --signal SIGTERM -F $nvmf_dir/nvmf_tgt.pid || true
21 for (( i=0; i<20; i++ )); do
22 if ! pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
23 break
24 fi
25 sleep 0.5
26 done
27
28 if pkill --signal 0 -F $nvmf_dir/nvmf_tgt.pid; then
29 error "nvmf_tgt failed to shutdown"
30 fi
31 fi
32
33 rm $nvmf_dir/nvmf_tgt.pid
34 unset -v nvmf_dir rpc_nvmf
35 }
36
37 function migration_tc2_cleanup_vhost_config()
38 {
39 timing_enter migration_tc2_cleanup_vhost_config
40
41 trap 'migration_tc2_cleanup_nvmf_tgt SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
42
43 notice "Shutting down all VMs"
44 vm_shutdown_all
45
46 notice "Removing vhost devices & controllers via RPC ..."
47 # Delete bdev first to remove all LUNs and SCSI targets
48 $rpc_0 delete_nvme_controller Nvme0
49 $rpc_0 remove_vhost_controller $incoming_vm_ctrlr
50
51 $rpc_1 delete_nvme_controller Nvme0
52 $rpc_1 remove_vhost_controller $target_vm_ctrlr
53
54 notice "killing vhost app"
55 vhost_kill 0
56 vhost_kill 1
57
58 unset -v incoming_vm target_vm incoming_vm_ctrlr target_vm_ctrlr
59 unset -v rpc_0 rpc_1
60
61 trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
62 migration_tc2_cleanup_nvmf_tgt
63
64 timing_exit migration_tc2_cleanup_vhost_config
65 }
66
67 function migration_tc2_configure_vhost()
68 {
69 timing_enter migration_tc2_configure_vhost
70
71 # Those are global intentionally - they will be unset in cleanup handler
72 nvmf_dir="$TEST_DIR/nvmf_tgt"
73
74 incoming_vm=1
75 target_vm=2
76 incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm
77 target_vm_ctrlr=naa.VhostScsi0.$target_vm
78
79 rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/rpc.sock"
80 rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
81 rpc_1="$rootdir/scripts/rpc.py -s $(get_vhost_dir 1)/rpc.sock"
82
83 # Default cleanup/error handlers will not shutdown nvmf_tgt app so setup it
84 # here to teardown in cleanup function
85 trap 'migration_tc2_error_cleanup; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT
86
87 # Run nvmf_tgt and two vhost instances:
88 # nvmf_tgt uses core id 2 (-m 0x4)
89 # First uses core id 0 (vhost_0_reactor_mask=0x1)
90 # Second uses core id 1 (vhost_1_reactor_mask=0x2)
91 # This force to use VM 1 and 2.
92 timing_enter start_nvmf_tgt
93 notice "Running nvmf_tgt..."
94 mkdir -p $nvmf_dir
95 rm -f $nvmf_dir/*
96 $rootdir/app/nvmf_tgt/nvmf_tgt -s 512 -m 0x4 -r $nvmf_dir/rpc.sock --wait-for-rpc &
97 local nvmf_tgt_pid=$!
98 echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid
99 waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/rpc.sock"
100 $rpc_nvmf start_subsystem_init
101 $rpc_nvmf nvmf_create_transport -t RDMA -u 8192
102 $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config
103 timing_exit start_nvmf_tgt
104
105 vhost_run --memory=512 --vhost-num=0 --no-pci
106 # Those are global intentionally
107 vhost_1_reactor_mask=0x2
108 vhost_1_master_core=1
109 vhost_run --memory=512 --vhost-num=1 --no-pci
110
111 local rdma_ip_list=$(get_available_rdma_ips)
112 local nvmf_target_ip=$(echo "$rdma_ip_list" | head -n 1)
113
114 if [[ -z "$nvmf_target_ip" ]]; then
115 fail "no NIC for nvmf target"
116 fi
117
118 notice "Configuring nvmf_tgt, vhost devices & controllers via RPC ..."
119
120 # Construct shared bdevs and controllers
121 $rpc_nvmf nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
122 $rpc_nvmf nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Nvme0n1
123 $rpc_nvmf nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t rdma -a $nvmf_target_ip -s 4420
124
125 $rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
126 $rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr
127 $rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1
128
129 $rpc_1 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $nvmf_target_ip -s 4420 -n "nqn.2016-06.io.spdk:cnode1"
130 $rpc_1 construct_vhost_scsi_controller $target_vm_ctrlr
131 $rpc_1 add_vhost_scsi_lun $target_vm_ctrlr 0 Nvme0n1
132
133 notice "Setting up VMs"
134 vm_setup --os="$os_image" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \
135 --migrate-to=$target_vm --memory=1024 --vhost-num=0
136 vm_setup --force=$target_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 --incoming=$incoming_vm --memory=1024 \
137 --vhost-num=1
138
139 # Run everything
140 vm_run $incoming_vm $target_vm
141
142 # Wait only for incoming VM, as target is waiting for migration
143 vm_wait_for_boot 300 $incoming_vm
144
145 notice "Configuration done"
146
147 timing_exit migration_tc2_configure_vhost
148 }
149
150 function migration_tc2_error_cleanup()
151 {
152 trap - SIGINT ERR EXIT
153 set -x
154
155 vm_kill_all
156 migration_tc2_cleanup_vhost_config
157 notice "Migration TC2 FAILED"
158 }
159
160 function migration_tc2()
161 {
162 # Use 2 VMs:
163 # incoming VM - the one we want to migrate
164 # targe VM - the one which will accept migration
165 local job_file="$testdir/migration-tc2.job"
166
167 migration_tc2_configure_vhost
168
169 # Run fio before migration
170 notice "Starting FIO"
171 vm_check_scsi_location $incoming_vm
172 run_fio $fio_bin --job-file="$job_file" --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)"
173
174 # Wait a while to let the FIO time to issue some IO
175 sleep 5
176
177 # Check if fio is still running before migration
178 if ! is_fio_running $incoming_vm; then
179 vm_ssh $incoming_vm "cat /root/$(basename ${job_file}).out"
180 error "FIO is not running before migration: process crashed or finished too early"
181 fi
182
183 vm_migrate $incoming_vm
184 sleep 3
185
186 # Check if fio is still running after migration
187 if ! is_fio_running $target_vm; then
188 vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
189 error "FIO is not running after migration: process crashed or finished too early"
190 fi
191
192 notice "Waiting for fio to finish"
193 local timeout=40
194 while is_fio_running $target_vm; do
195 sleep 1
196 echo -n "."
197 if (( timeout-- == 0 )); then
198 error "timeout while waiting for FIO!"
199 fi
200 done
201
202 notice "Fio result is:"
203 vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
204
205 migration_tc2_cleanup_vhost_config
206 notice "Migration TC2 SUCCESS"
207 }
208
209 migration_tc2