]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/vhost/common/common.sh
update download target update for octopus release
[ceph.git] / ceph / src / spdk / test / vhost / common / common.sh
1 set -e
2
3 : ${SPDK_VHOST_VERBOSE=false}
4 : ${QEMU_PREFIX="/usr/local/qemu/spdk-2.12"}
5
6 BASE_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
7
8 # Default running dir -> spdk/..
9 [[ -z "$TEST_DIR" ]] && TEST_DIR=$BASE_DIR/../../../../
10
11 TEST_DIR="$(mkdir -p $TEST_DIR && cd $TEST_DIR && echo $PWD)"
12 SPDK_BUILD_DIR=$BASE_DIR/../../../
13
14 SPDK_VHOST_SCSI_TEST_DIR=$TEST_DIR/vhost
15
16 function message()
17 {
18 if ! $SPDK_VHOST_VERBOSE; then
19 local verbose_out=""
20 elif [[ ${FUNCNAME[2]} == "source" ]]; then
21 local verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
22 else
23 local verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
24 fi
25
26 local msg_type="$1"
27 shift
28 echo -e "${msg_type}${verbose_out}: $@"
29 }
30
31 function fail()
32 {
33 echo "===========" >&2
34 message "FAIL" "$@" >&2
35 echo "===========" >&2
36 exit 1
37 }
38
39 function error()
40 {
41 echo "===========" >&2
42 message "ERROR" "$@" >&2
43 echo "===========" >&2
44 # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
45 false
46 }
47
48 function warning()
49 {
50 message "WARN" "$@" >&2
51 }
52
53 function notice()
54 {
55 message "INFO" "$@"
56 }
57
58
59 # SSH key file
60 : ${SPDK_VHOST_SSH_KEY_FILE="$(readlink -e $HOME/.ssh/spdk_vhost_id_rsa)"}
61 if [[ ! -r "$SPDK_VHOST_SSH_KEY_FILE" ]]; then
62 error "Could not find SSH key file $SPDK_VHOST_SSH_KEY_FILE"
63 exit 1
64 fi
65 echo "Using SSH key file $SPDK_VHOST_SSH_KEY_FILE"
66
67 VM_BASE_DIR="$TEST_DIR/vms"
68
69
70 mkdir -p $TEST_DIR
71
72 #
73 # Source config describing QEMU and VHOST cores and NUMA
74 #
75 source $(readlink -f $(dirname ${BASH_SOURCE[0]}))/autotest.config
76
77 # Trace flag is optional, if it wasn't set earlier - disable it after sourcing
78 # autotest_common.sh
79 if [[ $- =~ x ]]; then
80 source $SPDK_BUILD_DIR/test/common/autotest_common.sh
81 else
82 source $SPDK_BUILD_DIR/test/common/autotest_common.sh
83 set +x
84 fi
85
86 function get_vhost_dir()
87 {
88 if [[ ! -z "$1" ]]; then
89 assert_number "$1"
90 local vhost_num=$1
91 else
92 local vhost_num=0
93 fi
94
95 echo "$SPDK_VHOST_SCSI_TEST_DIR${vhost_num}"
96 }
97
98 function spdk_vhost_list_all()
99 {
100 shopt -s nullglob
101 local vhost_list="$(echo $SPDK_VHOST_SCSI_TEST_DIR[0-9]*)"
102 shopt -u nullglob
103
104 if [[ ! -z "$vhost_list" ]]; then
105 vhost_list="$(basename --multiple $vhost_list)"
106 echo "${vhost_list//vhost/}"
107 fi
108 }
109
110 function spdk_vhost_run()
111 {
112 local param
113 local vhost_num=0
114 local vhost_conf_path=""
115 local memory=1024
116
117 for param in "$@"; do
118 case $param in
119 --vhost-num=*)
120 vhost_num="${param#*=}"
121 assert_number "$vhost_num"
122 ;;
123 --conf-path=*) local vhost_conf_path="${param#*=}" ;;
124 --json-path=*) local vhost_json_path="${param#*=}" ;;
125 --memory=*) local memory=${param#*=} ;;
126 --no-pci*) local no_pci="-u" ;;
127 *)
128 error "Invalid parameter '$param'"
129 return 1
130 ;;
131 esac
132 done
133
134 local vhost_dir="$(get_vhost_dir $vhost_num)"
135 local vhost_app="$SPDK_BUILD_DIR/app/vhost/vhost"
136 local vhost_log_file="$vhost_dir/vhost.log"
137 local vhost_pid_file="$vhost_dir/vhost.pid"
138 local vhost_socket="$vhost_dir/usvhost"
139 local vhost_conf_template="$vhost_conf_path/vhost.conf.in"
140 local vhost_conf_file="$vhost_conf_path/vhost.conf"
141 notice "starting vhost app in background"
142 [[ -r "$vhost_pid_file" ]] && spdk_vhost_kill $vhost_num
143 [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
144 mkdir -p $vhost_dir
145
146 if [[ ! -x $vhost_app ]]; then
147 error "application not found: $vhost_app"
148 return 1
149 fi
150
151 local reactor_mask="vhost_${vhost_num}_reactor_mask"
152 reactor_mask="${!reactor_mask}"
153
154 local master_core="vhost_${vhost_num}_master_core"
155 master_core="${!master_core}"
156
157 if [[ -z "$reactor_mask" ]] || [[ -z "$master_core" ]]; then
158 error "Parameters vhost_${vhost_num}_reactor_mask or vhost_${vhost_num}_master_core not found in autotest.config file"
159 return 1
160 fi
161
162 local cmd="$vhost_app -m $reactor_mask -p $master_core -s $memory -r $vhost_dir/rpc.sock $no_pci"
163 if [[ -n "$vhost_conf_path" ]]; then
164 cp $vhost_conf_template $vhost_conf_file
165 $SPDK_BUILD_DIR/scripts/gen_nvme.sh >> $vhost_conf_file
166 cmd="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -s $memory -r $vhost_dir/rpc.sock $no_pci"
167 fi
168
169 notice "Loging to: $vhost_log_file"
170 notice "Socket: $vhost_socket"
171 notice "Command: $cmd"
172
173 timing_enter vhost_start
174 cd $vhost_dir; $cmd &
175 vhost_pid=$!
176 echo $vhost_pid > $vhost_pid_file
177
178 notice "waiting for app to run..."
179 waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
180 #do not generate nvmes if pci access is disabled
181 if [[ -z "$vhost_conf_path" ]] && [[ -z "$no_pci" ]]; then
182 $SPDK_BUILD_DIR/scripts/gen_nvme.sh "--json" | $SPDK_BUILD_DIR/scripts/rpc.py\
183 -s $vhost_dir/rpc.sock load_subsystem_config
184 fi
185
186 if [[ -n "$vhost_json_path" ]]; then
187 $SPDK_BUILD_DIR/scripts/rpc.py -s $vhost_dir/rpc.sock load_config < "$vhost_json_path/conf.json"
188 fi
189
190 notice "vhost started - pid=$vhost_pid"
191 timing_exit vhost_start
192
193 rm -f $vhost_conf_file
194 }
195
196 function spdk_vhost_kill()
197 {
198 local rc=0
199 local vhost_num=0
200 if [[ ! -z "$1" ]]; then
201 vhost_num=$1
202 assert_number "$vhost_num"
203 fi
204
205 local vhost_pid_file="$(get_vhost_dir $vhost_num)/vhost.pid"
206
207 if [[ ! -r $vhost_pid_file ]]; then
208 warning "no vhost pid file found"
209 return 0
210 fi
211
212 timing_enter vhost_kill
213 local vhost_pid="$(cat $vhost_pid_file)"
214 notice "killing vhost (PID $vhost_pid) app"
215
216 if /bin/kill -INT $vhost_pid >/dev/null; then
217 notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
218 for ((i=0; i<60; i++)); do
219 if /bin/kill -0 $vhost_pid; then
220 echo "."
221 sleep 1
222 else
223 break
224 fi
225 done
226 if /bin/kill -0 $vhost_pid; then
227 error "ERROR: vhost was NOT killed - sending SIGABRT"
228 /bin/kill -ABRT $vhost_pid
229 rm $vhost_pid_file
230 rc=1
231 else
232 while kill -0 $vhost_pid; do
233 echo "."
234 done
235 fi
236 elif /bin/kill -0 $vhost_pid; then
237 error "vhost NOT killed - you need to kill it manually"
238 rc=1
239 else
240 notice "vhost was no running"
241 fi
242
243 timing_exit vhost_kill
244 if [[ $rc == 0 ]]; then
245 rm $vhost_pid_file
246 fi
247
248 return $rc
249 }
250
251 ###
252 # Mgmt functions
253 ###
254
255 function assert_number()
256 {
257 [[ "$1" =~ [0-9]+ ]] && return 0
258
259 error "Invalid or missing paramter: need number but got '$1'"
260 return 1;
261 }
262
263 # Helper to validate VM number
264 # param $1 VM number
265 #
266 function vm_num_is_valid()
267 {
268 [[ "$1" =~ ^[0-9]+$ ]] && return 0
269
270 error "Invalid or missing paramter: vm number '$1'"
271 return 1;
272 }
273
274
275 # Print network socket for given VM number
276 # param $1 virtual machine number
277 #
278 function vm_ssh_socket()
279 {
280 vm_num_is_valid $1 || return 1
281 local vm_dir="$VM_BASE_DIR/$1"
282
283 cat $vm_dir/ssh_socket
284 }
285
286 function vm_fio_socket()
287 {
288 vm_num_is_valid $1 || return 1
289 local vm_dir="$VM_BASE_DIR/$1"
290
291 cat $vm_dir/fio_socket
292 }
293
294 function vm_create_ssh_config()
295 {
296 local ssh_config="$VM_BASE_DIR/ssh_config"
297 if [[ ! -f $ssh_config ]]; then
298 (
299 echo "Host *"
300 echo " ControlPersist=10m"
301 echo " ConnectTimeout=1"
302 echo " Compression=no"
303 echo " ControlMaster=auto"
304 echo " UserKnownHostsFile=/dev/null"
305 echo " StrictHostKeyChecking=no"
306 echo " User root"
307 echo " ControlPath=/tmp/%r@%h:%p.ssh"
308 echo ""
309 ) > $ssh_config
310 # Control path created at /tmp because of live migration test case 3.
311 # In case of using sshfs share for the test - control path cannot be
312 # on share because remote server will fail on ssh commands.
313 fi
314 }
315
316 # Execute ssh command on given VM
317 # param $1 virtual machine number
318 #
319 function vm_ssh()
320 {
321 vm_num_is_valid $1 || return 1
322 vm_create_ssh_config
323 local ssh_config="$VM_BASE_DIR/ssh_config"
324
325 local ssh_cmd="ssh -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
326 -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
327
328 shift
329 $ssh_cmd "$@"
330 }
331
332 # Execute scp command on given VM
333 # param $1 virtual machine number
334 #
335 function vm_scp()
336 {
337 vm_num_is_valid $1 || return 1
338 vm_create_ssh_config
339 local ssh_config="$VM_BASE_DIR/ssh_config"
340
341 local scp_cmd="scp -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
342 -P $(vm_ssh_socket $1) "
343
344 shift
345 $scp_cmd "$@"
346 }
347
348
349 # check if specified VM is running
350 # param $1 VM num
351 function vm_is_running()
352 {
353 vm_num_is_valid $1 || return 1
354 local vm_dir="$VM_BASE_DIR/$1"
355
356 if [[ ! -r $vm_dir/qemu.pid ]]; then
357 return 1
358 fi
359
360 local vm_pid="$(cat $vm_dir/qemu.pid)"
361
362 if /bin/kill -0 $vm_pid; then
363 return 0
364 else
365 if [[ $EUID -ne 0 ]]; then
366 warning "not root - assuming VM running since can't be checked"
367 return 0
368 fi
369
370 # not running - remove pid file
371 rm $vm_dir/qemu.pid
372 return 1
373 fi
374 }
375
376 # check if specified VM is running
377 # param $1 VM num
378 function vm_os_booted()
379 {
380 vm_num_is_valid $1 || return 1
381 local vm_dir="$VM_BASE_DIR/$1"
382
383 if [[ ! -r $vm_dir/qemu.pid ]]; then
384 error "VM $1 is not running"
385 return 1
386 fi
387
388 if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_ssh $1 "true" 2>/dev/null; then
389 # Shutdown existing master. Ignore errors as it might not exist.
390 VM_SSH_OPTIONS="-O exit" vm_ssh $1 "true" 2>/dev/null
391 return 1
392 fi
393
394 return 0
395 }
396
397
398 # Shutdown given VM
399 # param $1 virtual machine number
400 # return non-zero in case of error.
401 function vm_shutdown()
402 {
403 vm_num_is_valid $1 || return 1
404 local vm_dir="$VM_BASE_DIR/$1"
405 if [[ ! -d "$vm_dir" ]]; then
406 error "VM$1 ($vm_dir) not exist - setup it first"
407 return 1
408 fi
409
410 if ! vm_is_running $1; then
411 notice "VM$1 ($vm_dir) is not running"
412 return 0
413 fi
414
415 # Temporarily disabling exit flag for next ssh command, since it will
416 # "fail" due to shutdown
417 notice "Shutting down virtual machine $vm_dir"
418 set +e
419 vm_ssh $1 "nohup sh -c 'shutdown -h -P now'" || true
420 notice "VM$1 is shutting down - wait a while to complete"
421 set -e
422 }
423
424 # Kill given VM
425 # param $1 virtual machine number
426 #
427 function vm_kill()
428 {
429 vm_num_is_valid $1 || return 1
430 local vm_dir="$VM_BASE_DIR/$1"
431
432 if [[ ! -r $vm_dir/qemu.pid ]]; then
433 return 0
434 fi
435
436 local vm_pid="$(cat $vm_dir/qemu.pid)"
437
438 notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
439 # First kill should fail, second one must fail
440 if /bin/kill $vm_pid; then
441 notice "process $vm_pid killed"
442 rm $vm_dir/qemu.pid
443 elif vm_is_running $1; then
444 error "Process $vm_pid NOT killed"
445 return 1
446 fi
447 }
448
449 # List all VM numbers in VM_BASE_DIR
450 #
451 function vm_list_all()
452 {
453 local vms="$(shopt -s nullglob; echo $VM_BASE_DIR/[0-9]*)"
454 if [[ ! -z "$vms" ]]; then
455 basename --multiple $vms
456 fi
457 }
458
459 # Kills all VM in $VM_BASE_DIR
460 #
461 function vm_kill_all()
462 {
463 local vm
464 for vm in $(vm_list_all); do
465 vm_kill $vm
466 done
467 }
468
469 # Shutdown all VM in $VM_BASE_DIR
470 #
471 function vm_shutdown_all()
472 {
473 local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
474 # XXX: temporally disable to debug shutdown issue
475 # set +x
476
477 local vms=$(vm_list_all)
478 local vm
479
480 for vm in $vms; do
481 vm_shutdown $vm
482 done
483
484 notice "Waiting for VMs to shutdown..."
485 local timeo=30
486 while [[ $timeo -gt 0 ]]; do
487 local all_vms_down=1
488 for vm in $vms; do
489 if vm_is_running $vm; then
490 all_vms_down=0
491 break
492 fi
493 done
494
495 if [[ $all_vms_down == 1 ]]; then
496 notice "All VMs successfully shut down"
497 $shell_restore_x
498 return 0
499 fi
500
501 ((timeo-=1))
502 sleep 1
503 done
504
505 $shell_restore_x
506 error "Timeout waiting for some VMs to shutdown"
507 return 1
508 }
509
510 function vm_setup()
511 {
512 local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
513 local OPTIND optchar vm_num
514
515 local os=""
516 local os_mode=""
517 local qemu_args=""
518 local disk_type_g=NOT_DEFINED
519 local read_only="false"
520 local disks=""
521 local raw_cache=""
522 local vm_incoming=""
523 local vm_migrate_to=""
524 local force_vm=""
525 local guest_memory=1024
526 local queue_number=""
527 local vhost_dir="$(get_vhost_dir)"
528 while getopts ':-:' optchar; do
529 case "$optchar" in
530 -)
531 case "$OPTARG" in
532 os=*) local os="${OPTARG#*=}" ;;
533 os-mode=*) local os_mode="${OPTARG#*=}" ;;
534 qemu-args=*) local qemu_args="${qemu_args} ${OPTARG#*=}" ;;
535 disk-type=*) local disk_type_g="${OPTARG#*=}" ;;
536 read-only=*) local read_only="${OPTARG#*=}" ;;
537 disks=*) local disks="${OPTARG#*=}" ;;
538 raw-cache=*) local raw_cache=",cache${OPTARG#*=}" ;;
539 force=*) local force_vm=${OPTARG#*=} ;;
540 memory=*) local guest_memory=${OPTARG#*=} ;;
541 queue_num=*) local queue_number=${OPTARG#*=} ;;
542 incoming=*) local vm_incoming="${OPTARG#*=}" ;;
543 migrate-to=*) local vm_migrate_to="${OPTARG#*=}" ;;
544 vhost-num=*) local vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
545 spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
546 *)
547 error "unknown argument $OPTARG"
548 return 1
549 esac
550 ;;
551 *)
552 error "vm_create Unknown param $OPTARG"
553 return 1
554 ;;
555 esac
556 done
557
558 # Find next directory we can use
559 if [[ ! -z $force_vm ]]; then
560 vm_num=$force_vm
561
562 vm_num_is_valid $vm_num || return 1
563 local vm_dir="$VM_BASE_DIR/$vm_num"
564 [[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
565 else
566 local vm_dir=""
567
568 set +x
569 for (( i=0; i<=256; i++)); do
570 local vm_dir="$VM_BASE_DIR/$i"
571 [[ ! -d $vm_dir ]] && break
572 done
573 $shell_restore_x
574
575 vm_num=$i
576 fi
577
578 if [[ $i -eq 256 ]]; then
579 error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
580 return 1
581 fi
582
583 if [[ ! -z "$vm_migrate_to" && ! -z "$vm_incoming" ]]; then
584 error "'--incoming' and '--migrate-to' cannot be used together"
585 return 1
586 elif [[ ! -z "$vm_incoming" ]]; then
587 if [[ ! -z "$os_mode" || ! -z "$os_img" ]]; then
588 error "'--incoming' can't be used together with '--os' nor '--os-mode'"
589 return 1
590 fi
591
592 os_mode="original"
593 os="$VM_BASE_DIR/$vm_incoming/os.qcow2"
594 elif [[ ! -z "$vm_migrate_to" ]]; then
595 [[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
596 os_mode=backing
597 fi
598
599 notice "Creating new VM in $vm_dir"
600 mkdir -p $vm_dir
601
602 if [[ "$os_mode" == "backing" ]]; then
603 notice "Creating backing file for OS image file: $os"
604 if ! $QEMU_PREFIX/bin/qemu-img create -f qcow2 -b $os $vm_dir/os.qcow2; then
605 error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
606 return 1
607 fi
608
609 local os=$vm_dir/os.qcow2
610 elif [[ "$os_mode" == "original" ]]; then
611 warning "Using original OS image file: $os"
612 elif [[ "$os_mode" != "snapshot" ]]; then
613 if [[ -z "$os_mode" ]]; then
614 notice "No '--os-mode' parameter provided - using 'snapshot'"
615 os_mode="snapshot"
616 else
617 error "Invalid '--os-mode=$os_mode'"
618 return 1
619 fi
620 fi
621
622 # WARNING:
623 # each cmd+= must contain ' ${eol}' at the end
624 #
625 local eol="\\\\\n "
626 local qemu_mask_param="VM_${vm_num}_qemu_mask"
627 local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
628
629 if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
630 error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
631 return 1
632 fi
633
634 local task_mask=${!qemu_mask_param}
635
636 notice "TASK MASK: $task_mask"
637 local cmd="taskset -a -c $task_mask $QEMU_PREFIX/bin/qemu-system-x86_64 ${eol}"
638 local vm_socket_offset=$(( 10000 + 100 * vm_num ))
639
640 local ssh_socket=$(( vm_socket_offset + 0 ))
641 local fio_socket=$(( vm_socket_offset + 1 ))
642 local monitor_port=$(( vm_socket_offset + 2 ))
643 local migration_port=$(( vm_socket_offset + 3 ))
644 local gdbserver_socket=$(( vm_socket_offset + 4 ))
645 local vnc_socket=$(( 100 + vm_num ))
646 local qemu_pid_file="$vm_dir/qemu.pid"
647 local cpu_num=0
648
649 set +x
650 # cpu list for taskset can be comma separated or range
651 # or both at the same time, so first split on commas
652 cpu_list=$(echo $task_mask | tr "," "\n")
653 queue_number=0
654 for c in $cpu_list; do
655 # if range is detected - count how many cpus
656 if [[ $c =~ [0-9]+-[0-9]+ ]]; then
657 val=$(($c-1))
658 val=${val#-}
659 else
660 val=1
661 fi
662 cpu_num=$((cpu_num+val))
663 queue_number=$((queue_number+val))
664 done
665
666 if [ -z $queue_number ]; then
667 queue_number=$cpu_num
668 fi
669
670 $shell_restore_x
671
672 local node_num=${!qemu_numa_node_param}
673 local boot_disk_present=false
674 notice "NUMA NODE: $node_num"
675 cmd+="-m $guest_memory --enable-kvm -cpu host -smp $cpu_num -vga std -vnc :$vnc_socket -daemonize ${eol}"
676 cmd+="-object memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind ${eol}"
677 [[ $os_mode == snapshot ]] && cmd+="-snapshot ${eol}"
678 [[ ! -z "$vm_incoming" ]] && cmd+=" -incoming tcp:0:$migration_port ${eol}"
679 cmd+="-monitor telnet:127.0.0.1:$monitor_port,server,nowait ${eol}"
680 cmd+="-numa node,memdev=mem ${eol}"
681 cmd+="-pidfile $qemu_pid_file ${eol}"
682 cmd+="-serial file:$vm_dir/serial.log ${eol}"
683 cmd+="-D $vm_dir/qemu.log ${eol}"
684 cmd+="-net user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765 ${eol}"
685 cmd+="-net nic ${eol}"
686 if [[ -z "$boot_from" ]]; then
687 cmd+="-drive file=$os,if=none,id=os_disk ${eol}"
688 cmd+="-device ide-hd,drive=os_disk,bootindex=0 ${eol}"
689 fi
690
691 if ( [[ $disks == '' ]] && [[ $disk_type_g == virtio* ]] ); then
692 disks=1
693 fi
694
695 for disk in ${disks//:/ }; do
696 if [[ $disk = *","* ]]; then
697 disk_type=${disk#*,}
698 disk=${disk%,*}
699 else
700 disk_type=$disk_type_g
701 fi
702
703 case $disk_type in
704 virtio)
705 local raw_name="RAWSCSI"
706 local raw_disk=$vm_dir/test.img
707
708 if [[ ! -z $disk ]]; then
709 [[ ! -b $disk ]] && touch $disk
710 local raw_disk=$(readlink -f $disk)
711 fi
712
713 # Create disk file if it not exist or it is smaller than 1G
714 if ( [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]] ) || \
715 [[ ! -e $raw_disk ]]; then
716 if [[ $raw_disk =~ /dev/.* ]]; then
717 error \
718 "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
719 " this is probably not what you want."
720 return 1
721 fi
722
723 notice "Creating Virtio disc $raw_disk"
724 dd if=/dev/zero of=$raw_disk bs=1024k count=1024
725 else
726 notice "Using existing image $raw_disk"
727 fi
728
729 cmd+="-device virtio-scsi-pci,num_queues=$queue_number ${eol}"
730 cmd+="-device scsi-hd,drive=hd$i,vendor=$raw_name ${eol}"
731 cmd+="-drive if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache ${eol}"
732 ;;
733 spdk_vhost_scsi)
734 notice "using socket $vhost_dir/naa.$disk.$vm_num"
735 cmd+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
736 cmd+="-device vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk"
737 if [[ "$disk" == "$boot_from" ]]; then
738 cmd+=",bootindex=0"
739 boot_disk_present=true
740 fi
741 cmd+=" ${eol}"
742 ;;
743 spdk_vhost_blk)
744 notice "using socket $vhost_dir/naa.$disk.$vm_num"
745 cmd+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
746 cmd+="-device vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk"
747 if [[ "$disk" == "$boot_from" ]]; then
748 cmd+=",bootindex=0"
749 boot_disk_present=true
750 fi
751 cmd+=" ${eol}"
752 ;;
753 kernel_vhost)
754 if [[ -z $disk ]]; then
755 error "need WWN for $disk_type"
756 return 1
757 elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
758 error "$disk_type - disk(wnn)=$disk does not look like WNN number"
759 return 1
760 fi
761 notice "Using kernel vhost disk wwn=$disk"
762 cmd+=" -device vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number ${eol}"
763 ;;
764 *)
765 error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
766 return 1
767 esac
768 done
769
770 if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
771 error "Boot from $boot_from is selected but device is not present"
772 return 1
773 fi
774
775 [[ ! -z $qemu_args ]] && cmd+=" $qemu_args ${eol}"
776 # remove last $eol
777 cmd="${cmd%\\\\\\n }"
778
779 notice "Saving to $vm_dir/run.sh"
780 (
781 echo '#!/bin/bash'
782 echo 'if [[ $EUID -ne 0 ]]; then '
783 echo ' echo "Go away user come back as root"'
784 echo ' exit 1'
785 echo 'fi';
786 echo
787 echo -e "qemu_cmd=\"$cmd\"";
788 echo
789 echo "echo 'Running VM in $vm_dir'"
790 echo "rm -f $qemu_pid_file"
791 echo '$qemu_cmd'
792 echo "echo 'Waiting for QEMU pid file'"
793 echo "sleep 1"
794 echo "[[ ! -f $qemu_pid_file ]] && sleep 1"
795 echo "[[ ! -f $qemu_pid_file ]] && echo 'ERROR: no qemu pid file found' && exit 1"
796 echo
797 echo "chmod +r $vm_dir/*"
798 echo
799 echo "echo '=== qemu.log ==='"
800 echo "cat $vm_dir/qemu.log"
801 echo "echo '=== qemu.log ==='"
802 echo '# EOF'
803 ) > $vm_dir/run.sh
804 chmod +x $vm_dir/run.sh
805
806 # Save generated sockets redirection
807 echo $ssh_socket > $vm_dir/ssh_socket
808 echo $fio_socket > $vm_dir/fio_socket
809 echo $monitor_port > $vm_dir/monitor_port
810
811 rm -f $vm_dir/migration_port
812 [[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
813
814 echo $gdbserver_socket > $vm_dir/gdbserver_socket
815 echo $vnc_socket >> $vm_dir/vnc_socket
816
817 [[ -z $vm_incoming ]] || ln -fs $VM_BASE_DIR/$vm_incoming $vm_dir/vm_incoming
818 [[ -z $vm_migrate_to ]] || ln -fs $VM_BASE_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
819 }
820
821 function vm_run()
822 {
823 local OPTIND optchar vm
824 local run_all=false
825 local vms_to_run=""
826
827 while getopts 'a-:' optchar; do
828 case "$optchar" in
829 a) run_all=true ;;
830 *)
831 error "Unknown param $OPTARG"
832 return 1
833 ;;
834 esac
835 done
836
837 if $run_all; then
838 vms_to_run="$(vm_list_all)"
839 else
840 shift $((OPTIND-1))
841 for vm in $@; do
842 vm_num_is_valid $1 || return 1
843 if [[ ! -x $VM_BASE_DIR/$vm/run.sh ]]; then
844 error "VM$vm not defined - setup it first"
845 return 1
846 fi
847 vms_to_run+=" $vm"
848 done
849 fi
850
851 for vm in $vms_to_run; do
852 if vm_is_running $vm; then
853 warning "VM$vm ($VM_BASE_DIR/$vm) already running"
854 continue
855 fi
856
857 notice "running $VM_BASE_DIR/$vm/run.sh"
858 if ! $VM_BASE_DIR/$vm/run.sh; then
859 error "FAILED to run vm $vm"
860 return 1
861 fi
862 done
863 }
864
865 # Wait for all created VMs to boot.
866 # param $1 max wait time
867 function vm_wait_for_boot()
868 {
869 assert_number $1
870
871 local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
872 set +x
873
874 local all_booted=false
875 local timeout_time=$1
876 [[ $timeout_time -lt 10 ]] && timeout_time=10
877 local timeout_time=$(date -d "+$timeout_time seconds" +%s)
878
879 notice "Waiting for VMs to boot"
880 shift
881 if [[ "$@" == "" ]]; then
882 local vms_to_check="$VM_BASE_DIR/[0-9]*"
883 else
884 local vms_to_check=""
885 for vm in $@; do
886 vms_to_check+=" $VM_BASE_DIR/$vm"
887 done
888 fi
889
890 for vm in $vms_to_check; do
891 local vm_num=$(basename $vm)
892 local i=0
893 notice "waiting for VM$vm_num ($vm)"
894 while ! vm_os_booted $vm_num; do
895 if ! vm_is_running $vm_num; then
896
897 warning "VM $vm_num is not running"
898 warning "================"
899 warning "QEMU LOG:"
900 if [[ -r $vm/qemu.log ]]; then
901 cat $vm/qemu.log
902 else
903 warning "LOG not found"
904 fi
905
906 warning "VM LOG:"
907 if [[ -r $vm/serial.log ]]; then
908 cat $vm/serial.log
909 else
910 warning "LOG not found"
911 fi
912 warning "================"
913 $shell_restore_x
914 return 1
915 fi
916
917 if [[ $(date +%s) -gt $timeout_time ]]; then
918 warning "timeout waiting for machines to boot"
919 $shell_restore_x
920 return 1
921 fi
922 if (( i > 30 )); then
923 local i=0
924 echo
925 fi
926 echo -n "."
927 sleep 1
928 done
929 echo ""
930 notice "VM$vm_num ready"
931 #Change Timeout for stopping services to prevent lengthy powerdowns
932 vm_ssh $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
933 done
934
935 notice "all VMs ready"
936 $shell_restore_x
937 return 0
938 }
939
940 function vm_start_fio_server()
941 {
942 local OPTIND optchar
943 local readonly=''
944 while getopts ':-:' optchar; do
945 case "$optchar" in
946 -)
947 case "$OPTARG" in
948 fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
949 readonly) local readonly="--readonly" ;;
950 *) error "Invalid argument '$OPTARG'" && return 1;;
951 esac
952 ;;
953 *) error "Invalid argument '$OPTARG'" && return 1;;
954 esac
955 done
956
957 shift $(( OPTIND - 1 ))
958 for vm_num in $@; do
959 notice "Starting fio server on VM$vm_num"
960 if [[ $fio_bin != "" ]]; then
961 cat $fio_bin | vm_ssh $vm_num 'cat > /root/fio; chmod +x /root/fio'
962 vm_ssh $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
963 else
964 vm_ssh $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
965 fi
966 done
967 }
968
969 function vm_check_scsi_location()
970 {
971 # Script to find wanted disc
972 local script='shopt -s nullglob; \
973 for entry in /sys/block/sd*; do \
974 disk_type="$(cat $entry/device/vendor)"; \
975 if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then \
976 fname=$(basename $entry); \
977 echo -n " $fname"; \
978 fi; \
979 done'
980
981 SCSI_DISK="$(echo "$script" | vm_ssh $1 bash -s)"
982
983 if [[ -z "$SCSI_DISK" ]]; then
984 error "no test disk found!"
985 return 1
986 fi
987 }
988
989 # Script to perform scsi device reset on all disks in VM
990 # param $1 VM num
991 # param $2..$n Disks to perform reset on
992 function vm_reset_scsi_devices()
993 {
994 for disk in "${@:2}"; do
995 notice "VM$1 Performing device reset on disk $disk"
996 vm_ssh $1 sg_reset /dev/$disk -vNd
997 done
998 }
999
1000 function vm_check_blk_location()
1001 {
1002 local script='shopt -s nullglob; cd /sys/block; echo vd*'
1003 SCSI_DISK="$(echo "$script" | vm_ssh $1 bash -s)"
1004
1005 if [[ -z "$SCSI_DISK" ]]; then
1006 error "no blk test disk found!"
1007 return 1
1008 fi
1009 }
1010
1011 function run_fio()
1012 {
1013 local arg
1014 local job_file=""
1015 local fio_bin=""
1016 local vms=()
1017 local out=""
1018 local fio_disks=""
1019 local vm
1020 local run_server_mode=true
1021
1022 for arg in $@; do
1023 case "$arg" in
1024 --job-file=*) local job_file="${arg#*=}" ;;
1025 --fio-bin=*) local fio_bin="${arg#*=}" ;;
1026 --vm=*) vms+=( "${arg#*=}" ) ;;
1027 --out=*)
1028 local out="${arg#*=}"
1029 mkdir -p $out
1030 ;;
1031 --local) run_server_mode=false ;;
1032 --json) json="--json" ;;
1033 *)
1034 error "Invalid argument '$arg'"
1035 return 1
1036 ;;
1037 esac
1038 done
1039
1040 if [[ ! -z "$fio_bin" && ! -r "$fio_bin" ]]; then
1041 error "FIO binary '$fio_bin' does not exist"
1042 return 1
1043 fi
1044
1045 if [[ ! -r "$job_file" ]]; then
1046 error "Fio job '$job_file' does not exist"
1047 return 1
1048 fi
1049
1050 local job_fname=$(basename "$job_file")
1051 # prepare job file for each VM
1052 for vm in ${vms[@]}; do
1053 local vm_num=${vm%%:*}
1054 local vmdisks=${vm#*:}
1055
1056 sed "s@filename=@filename=$vmdisks@" $job_file | vm_ssh $vm_num "cat > /root/$job_fname"
1057 fio_disks+="127.0.0.1:$(vm_fio_socket $vm_num):$vmdisks,"
1058
1059 vm_ssh $vm_num cat /root/$job_fname
1060 if ! $run_server_mode; then
1061 if [[ ! -z "$fio_bin" ]]; then
1062 cat $fio_bin | vm_ssh $vm_num 'cat > /root/fio; chmod +x /root/fio'
1063 fi
1064
1065 notice "Running local fio on VM $vm_num"
1066 vm_ssh $vm_num "nohup /root/fio /root/$job_fname 1>/root/$job_fname.out 2>/root/$job_fname.out </dev/null & echo \$! > /root/fio.pid"
1067 fi
1068 done
1069
1070 if ! $run_server_mode; then
1071 # Give FIO time to run
1072 sleep 0.5
1073 return 0
1074 fi
1075
1076 $SPDK_BUILD_DIR/test/vhost/common/run_fio.py --job-file=/root/$job_fname \
1077 $([[ ! -z "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \
1078 --out=$out $json ${fio_disks%,}
1079 }
1080
1081 # Shutdown or kill any running VM and SPDK APP.
1082 #
1083 function at_app_exit()
1084 {
1085 local vhost_num
1086
1087 notice "APP EXITING"
1088 notice "killing all VMs"
1089 vm_kill_all
1090 # Kill vhost application
1091 notice "killing vhost app"
1092
1093 for vhost_num in $(spdk_vhost_list_all); do
1094 spdk_vhost_kill $vhost_num
1095 done
1096
1097 notice "EXIT DONE"
1098 }
1099
1100 function error_exit()
1101 {
1102 trap - ERR
1103 print_backtrace
1104 set +e
1105 error "Error on $1 $2"
1106
1107 at_app_exit
1108 exit 1
1109 }