]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | source $rootdir/test/nvmf/common.sh |
2 | source $testdir/autotest.config | |
11fdf7f2 TL |
3 | |
4 | incoming_vm=1 | |
5 | target_vm=2 | |
6 | incoming_vm_ctrlr=naa.VhostScsi0.$incoming_vm | |
7 | target_vm_ctrlr=naa.VhostScsi0.$target_vm | |
8 | share_dir=$TEST_DIR/share | |
9 | spdk_repo_share_dir=$TEST_DIR/share_spdk | |
9f95a23c | 10 | job_file=$testdir/migration-tc3.job |
11fdf7f2 TL |
11 | |
12 | if [ -z "$MGMT_TARGET_IP" ]; then | |
13 | error "No IP address of target is given" | |
14 | fi | |
15 | ||
16 | if [ -z "$MGMT_INITIATOR_IP" ]; then | |
17 | error "No IP address of initiator is given" | |
18 | fi | |
19 | ||
20 | if [ -z "$RDMA_TARGET_IP" ]; then | |
21 | error "No IP address of targets RDMA capable NIC is given" | |
22 | fi | |
23 | ||
24 | if [ -z "$RDMA_INITIATOR_IP" ]; then | |
25 | error "No IP address of initiators RDMA capable NIC is given" | |
26 | fi | |
27 | ||
28 | function ssh_remote() | |
29 | { | |
30 | local ssh_cmd="ssh -i $SPDK_VHOST_SSH_KEY_FILE \ | |
31 | -o UserKnownHostsFile=/dev/null \ | |
32 | -o StrictHostKeyChecking=no -o ControlMaster=auto \ | |
33 | root@$1" | |
34 | ||
35 | shift | |
36 | $ssh_cmd "$@" | |
37 | } | |
38 | ||
39 | function wait_for_remote() | |
40 | { | |
41 | local timeout=40 | |
42 | set +x | |
43 | while [[ ! -f $share_dir/DONE ]]; do | |
44 | echo -n "." | |
45 | if (( timeout-- == 0 )); then | |
46 | error "timeout while waiting for FIO!" | |
47 | fi | |
48 | sleep 1 | |
49 | done | |
50 | set -x | |
51 | rm -f $share_dir/DONE | |
52 | } | |
53 | ||
54 | function check_rdma_connection() | |
55 | { | |
56 | local nic_name=$(ip -4 -o addr show to $RDMA_TARGET_IP up | cut -d' ' -f2) | |
57 | if [[ -z $nic_name ]]; then | |
58 | error "There is no NIC with IP address $RDMA_TARGET_IP configured" | |
59 | fi | |
60 | ||
61 | if ! ls /sys/class/infiniband/*/device/net/$nic_name &> /dev/null; then | |
62 | error "$nic_name with IP $RDMA_TARGET_IP is not a RDMA capable NIC" | |
63 | fi | |
64 | ||
65 | } | |
66 | ||
67 | function host1_cleanup_nvmf() | |
68 | { | |
69 | notice "Shutting down nvmf_tgt on local server" | |
70 | if [[ ! -z "$1" ]]; then | |
71 | pkill --signal $1 -F $nvmf_dir/nvmf_tgt.pid | |
72 | else | |
73 | pkill -F $nvmf_dir/nvmf_tgt.pid | |
74 | fi | |
75 | rm -f $nvmf_dir/nvmf_tgt.pid | |
76 | } | |
77 | ||
78 | function host1_cleanup_vhost() | |
79 | { | |
80 | trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT | |
81 | notice "Shutting down VM $incoming_vm" | |
82 | vm_kill $incoming_vm | |
83 | ||
84 | notice "Removing bdev & controller from vhost on local server" | |
85 | $rpc_0 delete_nvme_controller Nvme0 | |
86 | $rpc_0 remove_vhost_controller $incoming_vm_ctrlr | |
87 | ||
88 | notice "Shutting down vhost app" | |
9f95a23c | 89 | vhost_kill 0 |
11fdf7f2 TL |
90 | |
91 | host1_cleanup_nvmf | |
92 | } | |
93 | ||
94 | function host1_start_nvmf() | |
95 | { | |
96 | nvmf_dir="$TEST_DIR/nvmf_tgt" | |
9f95a23c | 97 | rpc_nvmf="$rootdir/scripts/rpc.py -s $nvmf_dir/nvmf_rpc.sock" |
11fdf7f2 TL |
98 | |
99 | notice "Starting nvmf_tgt instance on local server" | |
100 | mkdir -p $nvmf_dir | |
101 | rm -rf $nvmf_dir/* | |
102 | ||
103 | trap 'host1_cleanup_nvmf SIGKILL; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT | |
9f95a23c | 104 | $rootdir/app/nvmf_tgt/nvmf_tgt -s 512 -m 0xF -r $nvmf_dir/nvmf_rpc.sock --wait-for-rpc & |
11fdf7f2 TL |
105 | nvmf_tgt_pid=$! |
106 | echo $nvmf_tgt_pid > $nvmf_dir/nvmf_tgt.pid | |
107 | waitforlisten "$nvmf_tgt_pid" "$nvmf_dir/nvmf_rpc.sock" | |
108 | $rpc_nvmf start_subsystem_init | |
9f95a23c TL |
109 | $rpc_nvmf nvmf_create_transport -t RDMA -u 8192 |
110 | $rootdir/scripts/gen_nvme.sh --json | $rpc_nvmf load_subsystem_config | |
11fdf7f2 TL |
111 | |
112 | $rpc_nvmf nvmf_subsystem_create nqn.2018-02.io.spdk:cnode1 -a -s SPDK01 | |
113 | $rpc_nvmf nvmf_subsystem_add_ns nqn.2018-02.io.spdk:cnode1 Nvme0n1 | |
114 | $rpc_nvmf nvmf_subsystem_add_listener nqn.2018-02.io.spdk:cnode1 -t rdma -a $RDMA_TARGET_IP -s 4420 | |
115 | } | |
116 | ||
117 | function host1_start_vhost() | |
118 | { | |
9f95a23c | 119 | rpc_0="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock" |
11fdf7f2 TL |
120 | |
121 | notice "Starting vhost0 instance on local server" | |
122 | trap 'host1_cleanup_vhost; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT | |
9f95a23c | 123 | vhost_run --vhost-num=0 --no-pci |
11fdf7f2 TL |
124 | $rpc_0 construct_nvme_bdev -b Nvme0 -t rdma -f ipv4 -a $RDMA_TARGET_IP -s 4420 -n "nqn.2018-02.io.spdk:cnode1" |
125 | $rpc_0 construct_vhost_scsi_controller $incoming_vm_ctrlr | |
126 | $rpc_0 add_vhost_scsi_lun $incoming_vm_ctrlr 0 Nvme0n1 | |
127 | ||
128 | vm_setup --os="$share_dir/migration.qcow2" --force=$incoming_vm --disk-type=spdk_vhost_scsi --disks=VhostScsi0 \ | |
129 | --migrate-to=$target_vm --memory=512 --queue_num=1 | |
130 | ||
131 | # TODO: Fix loop calculating cpu_num in common.sh | |
132 | # We need -smp 1 and -queue_num 1 for this test to work, and this loop | |
133 | # in some cases calculates wrong cpu_num. | |
134 | sed -i "s#smp 2#smp 1#g" $VM_BASE_DIR/$incoming_vm/run.sh | |
135 | vm_run $incoming_vm | |
136 | vm_wait_for_boot 300 $incoming_vm | |
137 | } | |
138 | ||
139 | function cleanup_share() | |
140 | { | |
141 | set +e | |
142 | notice "Cleaning up share directory on remote and local server" | |
143 | ssh_remote $MGMT_INITIATOR_IP "umount $VM_BASE_DIR" | |
144 | ssh_remote $MGMT_INITIATOR_IP "umount $share_dir; rm -f $share_dir/* rm -rf $spdk_repo_share_dir" | |
145 | rm -f $share_dir/migration.qcow2 | |
146 | rm -f $share_dir/spdk.tar.gz | |
147 | set -e | |
148 | } | |
149 | ||
150 | function host_1_create_share() | |
151 | { | |
152 | notice "Creating share directory on local server to re-use on remote" | |
153 | mkdir -p $share_dir | |
154 | mkdir -p $VM_BASE_DIR # This dir would've been created later but we need it now | |
155 | rm -rf $share_dir/spdk.tar.gz $share_dir/spdk || true | |
156 | cp $os_image $share_dir/migration.qcow2 | |
9f95a23c | 157 | tar --exclude="*.o"--exclude="*.d" --exclude="*.git" -C $rootdir -zcf $share_dir/spdk.tar.gz . |
11fdf7f2 TL |
158 | } |
159 | ||
160 | function host_2_create_share() | |
161 | { | |
162 | # Copy & compile the sources for later use on remote server. | |
163 | ssh_remote $MGMT_INITIATOR_IP "uname -a" | |
164 | ssh_remote $MGMT_INITIATOR_IP "mkdir -p $share_dir" | |
165 | ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir" | |
166 | ssh_remote $MGMT_INITIATOR_IP "mkdir -p $VM_BASE_DIR" | |
167 | ssh_remote $MGMT_INITIATOR_IP "sshfs -o\ | |
168 | ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\ | |
169 | -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$VM_BASE_DIR $VM_BASE_DIR" | |
170 | ssh_remote $MGMT_INITIATOR_IP "sshfs -o\ | |
171 | ssh_command=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto\ | |
172 | -i $SPDK_VHOST_SSH_KEY_FILE\" root@$MGMT_TARGET_IP:$share_dir $share_dir" | |
173 | ssh_remote $MGMT_INITIATOR_IP "mkdir -p $spdk_repo_share_dir/spdk" | |
174 | ssh_remote $MGMT_INITIATOR_IP "tar -zxf $share_dir/spdk.tar.gz -C $spdk_repo_share_dir/spdk --strip-components=1" | |
175 | ssh_remote $MGMT_INITIATOR_IP "cd $spdk_repo_share_dir/spdk; make clean; ./configure --with-rdma --enable-debug; make -j40" | |
176 | } | |
177 | ||
178 | function host_2_start_vhost() | |
179 | { | |
180 | ssh_remote $MGMT_INITIATOR_IP "nohup $spdk_repo_share_dir/spdk/test/vhost/migration/migration.sh\ | |
9f95a23c | 181 | --test-cases=3b --os=$share_dir/migration.qcow2\ |
11fdf7f2 TL |
182 | --rdma-tgt-ip=$RDMA_TARGET_IP &>$share_dir/output.log &" |
183 | notice "Waiting for remote to be done with vhost & VM setup..." | |
184 | wait_for_remote | |
185 | } | |
186 | ||
187 | function setup_share() | |
188 | { | |
189 | trap 'cleanup_share; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR EXIT | |
190 | host_1_create_share | |
191 | host_2_create_share | |
192 | } | |
193 | ||
194 | function migration_tc3() | |
195 | { | |
196 | check_rdma_connection | |
197 | setup_share | |
198 | host1_start_nvmf | |
199 | host1_start_vhost | |
200 | host_2_start_vhost | |
201 | ||
202 | # Do migration | |
203 | notice "Starting fio on local VM" | |
204 | vm_check_scsi_location $incoming_vm | |
205 | ||
206 | run_fio $fio_bin --job-file="$job_file" --local --vm="${incoming_vm}$(printf ':/dev/%s' $SCSI_DISK)" | |
207 | sleep 5 | |
208 | ||
209 | if ! is_fio_running $incoming_vm; then | |
210 | vh_ssh $incoming_vm "cat /root/$(basename ${job_file}).out" | |
211 | error "Fio not running on local VM before starting migration!" | |
212 | fi | |
213 | ||
214 | vm_migrate $incoming_vm $RDMA_INITIATOR_IP | |
215 | sleep 1 | |
216 | ||
217 | # Verify migration on remote host and clean up vhost | |
218 | ssh_remote $MGMT_INITIATOR_IP "pkill -CONT -F $TEST_DIR/tc3b.pid" | |
219 | notice "Waiting for remote to finish FIO on VM and clean up..." | |
220 | wait_for_remote | |
221 | ||
222 | # Clean up local stuff | |
223 | host1_cleanup_vhost | |
224 | cleanup_share | |
225 | } | |
226 | ||
227 | migration_tc3 |