3 : ${SPDK_VHOST_VERBOSE=false}
4 : ${QEMU_PREFIX="/usr/local/qemu/spdk-2.12"}
6 BASE_DIR
=$
(readlink
-f $
(dirname ${BASH_SOURCE[0]}))
8 # Default running dir -> spdk/..
9 [[ -z "$TEST_DIR" ]] && TEST_DIR
=$BASE_DIR/..
/..
/..
/..
/
11 TEST_DIR
="$(mkdir -p $TEST_DIR && cd $TEST_DIR && echo $PWD)"
12 SPDK_BUILD_DIR
=$BASE_DIR/..
/..
/..
/
14 SPDK_VHOST_SCSI_TEST_DIR
=$TEST_DIR/vhost
18 if ! $SPDK_VHOST_VERBOSE; then
20 elif [[ ${FUNCNAME[2]} == "source" ]]; then
21 local verbose_out
=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
23 local verbose_out
=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
28 echo -e "${msg_type}${verbose_out}: $@"
33 echo "===========" >&2
34 message
"FAIL" "$@" >&2
35 echo "===========" >&2
41 echo "===========" >&2
42 message
"ERROR" "$@" >&2
43 echo "===========" >&2
44 # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
50 message
"WARN" "$@" >&2
60 : ${SPDK_VHOST_SSH_KEY_FILE="$(readlink -e $HOME/.ssh/spdk_vhost_id_rsa)"}
61 if [[ ! -r "$SPDK_VHOST_SSH_KEY_FILE" ]]; then
62 error
"Could not find SSH key file $SPDK_VHOST_SSH_KEY_FILE"
65 echo "Using SSH key file $SPDK_VHOST_SSH_KEY_FILE"
67 VM_BASE_DIR
="$TEST_DIR/vms"
73 # Source config describing QEMU and VHOST cores and NUMA
75 source $
(readlink
-f $
(dirname ${BASH_SOURCE[0]}))/autotest.config
77 # Trace flag is optional, if it wasn't set earlier - disable it after sourcing
79 if [[ $
- =~ x
]]; then
80 source $SPDK_BUILD_DIR/test
/common
/autotest_common.sh
82 source $SPDK_BUILD_DIR/test
/common
/autotest_common.sh
86 function get_vhost_dir
()
88 if [[ ! -z "$1" ]]; then
95 echo "$SPDK_VHOST_SCSI_TEST_DIR${vhost_num}"
98 function spdk_vhost_list_all
()
101 local vhost_list
="$(echo $SPDK_VHOST_SCSI_TEST_DIR[0-9]*)"
104 if [[ ! -z "$vhost_list" ]]; then
105 vhost_list
="$(basename --multiple $vhost_list)"
106 echo "${vhost_list//vhost/}"
110 function spdk_vhost_run
()
114 local vhost_conf_path
=""
117 for param
in "$@"; do
120 vhost_num
="${param#*=}"
121 assert_number
"$vhost_num"
123 --conf-path=*) local vhost_conf_path
="${param#*=}" ;;
124 --json-path=*) local vhost_json_path
="${param#*=}" ;;
125 --memory=*) local memory
=${param#*=} ;;
126 --no-pci*) local no_pci
="-u" ;;
128 error
"Invalid parameter '$param'"
134 local vhost_dir
="$(get_vhost_dir $vhost_num)"
135 local vhost_app
="$SPDK_BUILD_DIR/app/vhost/vhost"
136 local vhost_log_file
="$vhost_dir/vhost.log"
137 local vhost_pid_file
="$vhost_dir/vhost.pid"
138 local vhost_socket
="$vhost_dir/usvhost"
139 local vhost_conf_template
="$vhost_conf_path/vhost.conf.in"
140 local vhost_conf_file
="$vhost_conf_path/vhost.conf"
141 notice
"starting vhost app in background"
142 [[ -r "$vhost_pid_file" ]] && spdk_vhost_kill
$vhost_num
143 [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
146 if [[ ! -x $vhost_app ]]; then
147 error
"application not found: $vhost_app"
151 local reactor_mask
="vhost_${vhost_num}_reactor_mask"
152 reactor_mask
="${!reactor_mask}"
154 local master_core
="vhost_${vhost_num}_master_core"
155 master_core
="${!master_core}"
157 if [[ -z "$reactor_mask" ]] ||
[[ -z "$master_core" ]]; then
158 error
"Parameters vhost_${vhost_num}_reactor_mask or vhost_${vhost_num}_master_core not found in autotest.config file"
162 local cmd
="$vhost_app -m $reactor_mask -p $master_core -s $memory -r $vhost_dir/rpc.sock $no_pci"
163 if [[ -n "$vhost_conf_path" ]]; then
164 cp $vhost_conf_template $vhost_conf_file
165 $SPDK_BUILD_DIR/scripts
/gen_nvme.sh
>> $vhost_conf_file
166 cmd
="$vhost_app -m $reactor_mask -p $master_core -c $vhost_conf_file -s $memory -r $vhost_dir/rpc.sock $no_pci"
169 notice
"Loging to: $vhost_log_file"
170 notice
"Socket: $vhost_socket"
171 notice
"Command: $cmd"
173 timing_enter vhost_start
174 cd $vhost_dir; $cmd &
176 echo $vhost_pid > $vhost_pid_file
178 notice
"waiting for app to run..."
179 waitforlisten
"$vhost_pid" "$vhost_dir/rpc.sock"
180 #do not generate nvmes if pci access is disabled
181 if [[ -z "$vhost_conf_path" ]] && [[ -z "$no_pci" ]]; then
182 $SPDK_BUILD_DIR/scripts
/gen_nvme.sh
"--json" |
$SPDK_BUILD_DIR/scripts
/rpc.py\
183 -s $vhost_dir/rpc.sock load_subsystem_config
186 if [[ -n "$vhost_json_path" ]]; then
187 $SPDK_BUILD_DIR/scripts
/rpc.py
-s $vhost_dir/rpc.sock load_config
< "$vhost_json_path/conf.json"
190 notice
"vhost started - pid=$vhost_pid"
191 timing_exit vhost_start
193 rm -f $vhost_conf_file
196 function spdk_vhost_kill
()
200 if [[ ! -z "$1" ]]; then
202 assert_number
"$vhost_num"
205 local vhost_pid_file
="$(get_vhost_dir $vhost_num)/vhost.pid"
207 if [[ ! -r $vhost_pid_file ]]; then
208 warning
"no vhost pid file found"
212 timing_enter vhost_kill
213 local vhost_pid
="$(cat $vhost_pid_file)"
214 notice
"killing vhost (PID $vhost_pid) app"
216 if /bin
/kill -INT $vhost_pid >/dev
/null
; then
217 notice
"sent SIGINT to vhost app - waiting 60 seconds to exit"
218 for ((i
=0; i
<60; i
++)); do
219 if /bin
/kill -0 $vhost_pid; then
226 if /bin
/kill -0 $vhost_pid; then
227 error
"ERROR: vhost was NOT killed - sending SIGABRT"
228 /bin
/kill -ABRT $vhost_pid
232 while kill -0 $vhost_pid; do
236 elif /bin
/kill -0 $vhost_pid; then
237 error
"vhost NOT killed - you need to kill it manually"
240 notice
"vhost was no running"
243 timing_exit vhost_kill
244 if [[ $rc == 0 ]]; then
255 function assert_number
()
257 [[ "$1" =~
[0-9]+ ]] && return 0
259 error
"Invalid or missing paramter: need number but got '$1'"
263 # Helper to validate VM number
266 function vm_num_is_valid
()
268 [[ "$1" =~ ^
[0-9]+$
]] && return 0
270 error
"Invalid or missing paramter: vm number '$1'"
275 # Print network socket for given VM number
276 # param $1 virtual machine number
278 function vm_ssh_socket
()
280 vm_num_is_valid
$1 ||
return 1
281 local vm_dir
="$VM_BASE_DIR/$1"
283 cat $vm_dir/ssh_socket
286 function vm_fio_socket
()
288 vm_num_is_valid
$1 ||
return 1
289 local vm_dir
="$VM_BASE_DIR/$1"
291 cat $vm_dir/fio_socket
294 function vm_create_ssh_config
()
296 local ssh_config
="$VM_BASE_DIR/ssh_config"
297 if [[ ! -f $ssh_config ]]; then
300 echo " ControlPersist=10m"
301 echo " ConnectTimeout=1"
302 echo " Compression=no"
303 echo " ControlMaster=auto"
304 echo " UserKnownHostsFile=/dev/null"
305 echo " StrictHostKeyChecking=no"
307 echo " ControlPath=/tmp/%r@%h:%p.ssh"
310 # Control path created at /tmp because of live migration test case 3.
311 # In case of using sshfs share for the test - control path cannot be
312 # on share because remote server will fail on ssh commands.
316 # Execute ssh command on given VM
317 # param $1 virtual machine number
321 vm_num_is_valid
$1 ||
return 1
323 local ssh_config
="$VM_BASE_DIR/ssh_config"
325 local ssh_cmd
="ssh -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
326 -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
332 # Execute scp command on given VM
333 # param $1 virtual machine number
337 vm_num_is_valid
$1 ||
return 1
339 local ssh_config
="$VM_BASE_DIR/ssh_config"
341 local scp_cmd
="scp -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
342 -P $(vm_ssh_socket $1) "
349 # check if specified VM is running
351 function vm_is_running
()
353 vm_num_is_valid
$1 ||
return 1
354 local vm_dir
="$VM_BASE_DIR/$1"
356 if [[ ! -r $vm_dir/qemu.pid
]]; then
360 local vm_pid
="$(cat $vm_dir/qemu.pid)"
362 if /bin
/kill -0 $vm_pid; then
365 if [[ $EUID -ne 0 ]]; then
366 warning
"not root - assuming VM running since can't be checked"
370 # not running - remove pid file
376 # check if specified VM is running
378 function vm_os_booted
()
380 vm_num_is_valid
$1 ||
return 1
381 local vm_dir
="$VM_BASE_DIR/$1"
383 if [[ ! -r $vm_dir/qemu.pid
]]; then
384 error
"VM $1 is not running"
388 if ! VM_SSH_OPTIONS
="-o ControlMaster=no" vm_ssh
$1 "true" 2>/dev
/null
; then
389 # Shutdown existing master. Ignore errors as it might not exist.
390 VM_SSH_OPTIONS
="-O exit" vm_ssh
$1 "true" 2>/dev
/null
399 # param $1 virtual machine number
400 # return non-zero in case of error.
401 function vm_shutdown
()
403 vm_num_is_valid
$1 ||
return 1
404 local vm_dir
="$VM_BASE_DIR/$1"
405 if [[ ! -d "$vm_dir" ]]; then
406 error
"VM$1 ($vm_dir) not exist - setup it first"
410 if ! vm_is_running
$1; then
411 notice
"VM$1 ($vm_dir) is not running"
415 # Temporarily disabling exit flag for next ssh command, since it will
416 # "fail" due to shutdown
417 notice
"Shutting down virtual machine $vm_dir"
419 vm_ssh
$1 "nohup sh -c 'shutdown -h -P now'" || true
420 notice
"VM$1 is shutting down - wait a while to complete"
425 # param $1 virtual machine number
429 vm_num_is_valid
$1 ||
return 1
430 local vm_dir
="$VM_BASE_DIR/$1"
432 if [[ ! -r $vm_dir/qemu.pid
]]; then
436 local vm_pid
="$(cat $vm_dir/qemu.pid)"
438 notice
"Killing virtual machine $vm_dir (pid=$vm_pid)"
439 # First kill should fail, second one must fail
440 if /bin
/kill $vm_pid; then
441 notice
"process $vm_pid killed"
443 elif vm_is_running
$1; then
444 error
"Process $vm_pid NOT killed"
449 # List all VM numbers in VM_BASE_DIR
451 function vm_list_all
()
453 local vms
="$(shopt -s nullglob; echo $VM_BASE_DIR/[0-9]*)"
454 if [[ ! -z "$vms" ]]; then
455 basename --multiple $vms
459 # Kills all VM in $VM_BASE_DIR
461 function vm_kill_all
()
464 for vm
in $
(vm_list_all
); do
469 # Shutdown all VM in $VM_BASE_DIR
471 function vm_shutdown_all
()
473 local shell_restore_x
="$( [[ "$
-" =~ x ]] && echo 'set -x' )"
474 # XXX: temporally disable to debug shutdown issue
477 local vms
=$
(vm_list_all
)
484 notice
"Waiting for VMs to shutdown..."
486 while [[ $timeo -gt 0 ]]; do
489 if vm_is_running
$vm; then
495 if [[ $all_vms_down == 1 ]]; then
496 notice
"All VMs successfully shut down"
506 error
"Timeout waiting for some VMs to shutdown"
512 local shell_restore_x
="$( [[ "$
-" =~ x ]] && echo 'set -x' )"
513 local OPTIND optchar vm_num
518 local disk_type_g
=NOT_DEFINED
519 local read_only
="false"
523 local vm_migrate_to
=""
525 local guest_memory
=1024
526 local queue_number
=""
527 local vhost_dir
="$(get_vhost_dir)"
528 while getopts ':-:' optchar
; do
532 os
=*) local os
="${OPTARG#*=}" ;;
533 os-mode
=*) local os_mode
="${OPTARG#*=}" ;;
534 qemu-args
=*) local qemu_args
="${qemu_args} ${OPTARG#*=}" ;;
535 disk-type
=*) local disk_type_g
="${OPTARG#*=}" ;;
536 read-only
=*) local read_only
="${OPTARG#*=}" ;;
537 disks
=*) local disks
="${OPTARG#*=}" ;;
538 raw-cache
=*) local raw_cache
=",cache${OPTARG#*=}" ;;
539 force
=*) local force_vm
=${OPTARG#*=} ;;
540 memory
=*) local guest_memory
=${OPTARG#*=} ;;
541 queue_num
=*) local queue_number
=${OPTARG#*=} ;;
542 incoming
=*) local vm_incoming
="${OPTARG#*=}" ;;
543 migrate-to
=*) local vm_migrate_to
="${OPTARG#*=}" ;;
544 vhost-num
=*) local vhost_dir
="$(get_vhost_dir ${OPTARG#*=})" ;;
545 spdk-boot
=*) local boot_from
="${OPTARG#*=}" ;;
547 error
"unknown argument $OPTARG"
552 error
"vm_create Unknown param $OPTARG"
558 # Find next directory we can use
559 if [[ ! -z $force_vm ]]; then
562 vm_num_is_valid
$vm_num ||
return 1
563 local vm_dir
="$VM_BASE_DIR/$vm_num"
564 [[ -d $vm_dir ]] && warning
"removing existing VM in '$vm_dir'"
569 for (( i
=0; i
<=256; i
++)); do
570 local vm_dir
="$VM_BASE_DIR/$i"
571 [[ ! -d $vm_dir ]] && break
578 if [[ $i -eq 256 ]]; then
579 error
"no free VM found. do some cleanup (256 VMs created, are you insane?)"
583 if [[ ! -z "$vm_migrate_to" && ! -z "$vm_incoming" ]]; then
584 error
"'--incoming' and '--migrate-to' cannot be used together"
586 elif [[ ! -z "$vm_incoming" ]]; then
587 if [[ ! -z "$os_mode" ||
! -z "$os_img" ]]; then
588 error
"'--incoming' can't be used together with '--os' nor '--os-mode'"
593 os
="$VM_BASE_DIR/$vm_incoming/os.qcow2"
594 elif [[ ! -z "$vm_migrate_to" ]]; then
595 [[ "$os_mode" != "backing" ]] && warning
"Using 'backing' mode for OS since '--migrate-to' is used"
599 notice
"Creating new VM in $vm_dir"
602 if [[ "$os_mode" == "backing" ]]; then
603 notice
"Creating backing file for OS image file: $os"
604 if ! $QEMU_PREFIX/bin
/qemu-img create
-f qcow2
-b $os $vm_dir/os.qcow2
; then
605 error
"Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
609 local os
=$vm_dir/os.qcow2
610 elif [[ "$os_mode" == "original" ]]; then
611 warning
"Using original OS image file: $os"
612 elif [[ "$os_mode" != "snapshot" ]]; then
613 if [[ -z "$os_mode" ]]; then
614 notice
"No '--os-mode' parameter provided - using 'snapshot'"
617 error
"Invalid '--os-mode=$os_mode'"
623 # each cmd+= must contain ' ${eol}' at the end
626 local qemu_mask_param
="VM_${vm_num}_qemu_mask"
627 local qemu_numa_node_param
="VM_${vm_num}_qemu_numa_node"
629 if [[ -z "${!qemu_mask_param}" ]] ||
[[ -z "${!qemu_numa_node_param}" ]]; then
630 error
"Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
634 local task_mask
=${!qemu_mask_param}
636 notice
"TASK MASK: $task_mask"
637 local cmd
="taskset -a -c $task_mask $QEMU_PREFIX/bin/qemu-system-x86_64 ${eol}"
638 local vm_socket_offset
=$
(( 10000 + 100 * vm_num
))
640 local ssh_socket
=$
(( vm_socket_offset
+ 0 ))
641 local fio_socket
=$
(( vm_socket_offset
+ 1 ))
642 local monitor_port
=$
(( vm_socket_offset
+ 2 ))
643 local migration_port
=$
(( vm_socket_offset
+ 3 ))
644 local gdbserver_socket
=$
(( vm_socket_offset
+ 4 ))
645 local vnc_socket
=$
(( 100 + vm_num
))
646 local qemu_pid_file
="$vm_dir/qemu.pid"
650 # cpu list for taskset can be comma separated or range
651 # or both at the same time, so first split on commas
652 cpu_list
=$
(echo $task_mask |
tr "," "\n")
654 for c
in $cpu_list; do
655 # if range is detected - count how many cpus
656 if [[ $c =~
[0-9]+-[0-9]+ ]]; then
662 cpu_num
=$
((cpu_num
+val
))
663 queue_number
=$
((queue_number
+val
))
666 if [ -z $queue_number ]; then
667 queue_number
=$cpu_num
672 local node_num
=${!qemu_numa_node_param}
673 local boot_disk_present
=false
674 notice
"NUMA NODE: $node_num"
675 cmd
+="-m $guest_memory --enable-kvm -cpu host -smp $cpu_num -vga std -vnc :$vnc_socket -daemonize ${eol}"
676 cmd
+="-object memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind ${eol}"
677 [[ $os_mode == snapshot
]] && cmd
+="-snapshot ${eol}"
678 [[ ! -z "$vm_incoming" ]] && cmd
+=" -incoming tcp:0:$migration_port ${eol}"
679 cmd
+="-monitor telnet:127.0.0.1:$monitor_port,server,nowait ${eol}"
680 cmd
+="-numa node,memdev=mem ${eol}"
681 cmd
+="-pidfile $qemu_pid_file ${eol}"
682 cmd
+="-serial file:$vm_dir/serial.log ${eol}"
683 cmd
+="-D $vm_dir/qemu.log ${eol}"
684 cmd
+="-net user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765 ${eol}"
685 cmd
+="-net nic ${eol}"
686 if [[ -z "$boot_from" ]]; then
687 cmd
+="-drive file=$os,if=none,id=os_disk ${eol}"
688 cmd
+="-device ide-hd,drive=os_disk,bootindex=0 ${eol}"
691 if ( [[ $disks == '' ]] && [[ $disk_type_g == virtio
* ]] ); then
695 for disk
in ${disks//:/ }; do
696 if [[ $disk = *","* ]]; then
700 disk_type
=$disk_type_g
705 local raw_name
="RAWSCSI"
706 local raw_disk
=$vm_dir/test.img
708 if [[ ! -z $disk ]]; then
709 [[ ! -b $disk ]] && touch $disk
710 local raw_disk
=$
(readlink
-f $disk)
713 # Create disk file if it not exist or it is smaller than 1G
714 if ( [[ -f $raw_disk ]] && [[ $
(stat
--printf="%s" $raw_disk) -lt $
((1024 * 1024 * 1024)) ]] ) || \
715 [[ ! -e $raw_disk ]]; then
716 if [[ $raw_disk =~
/dev
/.
* ]]; then
718 "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
719 " this is probably not what you want."
723 notice
"Creating Virtio disc $raw_disk"
724 dd if=/dev
/zero of
=$raw_disk bs
=1024k count
=1024
726 notice
"Using existing image $raw_disk"
729 cmd
+="-device virtio-scsi-pci,num_queues=$queue_number ${eol}"
730 cmd
+="-device scsi-hd,drive=hd$i,vendor=$raw_name ${eol}"
731 cmd
+="-drive if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache ${eol}"
734 notice
"using socket $vhost_dir/naa.$disk.$vm_num"
735 cmd
+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
736 cmd
+="-device vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk"
737 if [[ "$disk" == "$boot_from" ]]; then
739 boot_disk_present
=true
744 notice
"using socket $vhost_dir/naa.$disk.$vm_num"
745 cmd
+="-chardev socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num ${eol}"
746 cmd
+="-device vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk"
747 if [[ "$disk" == "$boot_from" ]]; then
749 boot_disk_present
=true
754 if [[ -z $disk ]]; then
755 error
"need WWN for $disk_type"
757 elif [[ ! $disk =~ ^
[[:alpha
:]]{3}[.
][[:xdigit
:]]+$
]]; then
758 error
"$disk_type - disk(wnn)=$disk does not look like WNN number"
761 notice
"Using kernel vhost disk wwn=$disk"
762 cmd
+=" -device vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number ${eol}"
765 error
"unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
770 if [[ -n $boot_from ]] && [[ $boot_disk_present == false
]]; then
771 error
"Boot from $boot_from is selected but device is not present"
775 [[ ! -z $qemu_args ]] && cmd
+=" $qemu_args ${eol}"
777 cmd
="${cmd%\\\\\\n }"
779 notice
"Saving to $vm_dir/run.sh"
782 echo 'if [[ $EUID -ne 0 ]]; then '
783 echo ' echo "Go away user come back as root"'
787 echo -e "qemu_cmd=\"$cmd\"";
789 echo "echo 'Running VM in $vm_dir'"
790 echo "rm -f $qemu_pid_file"
792 echo "echo 'Waiting for QEMU pid file'"
794 echo "[[ ! -f $qemu_pid_file ]] && sleep 1"
795 echo "[[ ! -f $qemu_pid_file ]] && echo 'ERROR: no qemu pid file found' && exit 1"
797 echo "chmod +r $vm_dir/*"
799 echo "echo '=== qemu.log ==='"
800 echo "cat $vm_dir/qemu.log"
801 echo "echo '=== qemu.log ==='"
804 chmod +x
$vm_dir/run.sh
806 # Save generated sockets redirection
807 echo $ssh_socket > $vm_dir/ssh_socket
808 echo $fio_socket > $vm_dir/fio_socket
809 echo $monitor_port > $vm_dir/monitor_port
811 rm -f $vm_dir/migration_port
812 [[ -z $vm_incoming ]] ||
echo $migration_port > $vm_dir/migration_port
814 echo $gdbserver_socket > $vm_dir/gdbserver_socket
815 echo $vnc_socket >> $vm_dir/vnc_socket
817 [[ -z $vm_incoming ]] ||
ln -fs $VM_BASE_DIR/$vm_incoming $vm_dir/vm_incoming
818 [[ -z $vm_migrate_to ]] ||
ln -fs $VM_BASE_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
823 local OPTIND optchar vm
827 while getopts 'a-:' optchar
; do
831 error
"Unknown param $OPTARG"
838 vms_to_run
="$(vm_list_all)"
842 vm_num_is_valid
$1 ||
return 1
843 if [[ ! -x $VM_BASE_DIR/$vm/run.sh
]]; then
844 error
"VM$vm not defined - setup it first"
851 for vm
in $vms_to_run; do
852 if vm_is_running
$vm; then
853 warning
"VM$vm ($VM_BASE_DIR/$vm) already running"
857 notice
"running $VM_BASE_DIR/$vm/run.sh"
858 if ! $VM_BASE_DIR/$vm/run.sh
; then
859 error
"FAILED to run vm $vm"
865 # Wait for all created VMs to boot.
866 # param $1 max wait time
867 function vm_wait_for_boot
()
871 local shell_restore_x
="$( [[ "$
-" =~ x ]] && echo 'set -x' )"
874 local all_booted
=false
875 local timeout_time
=$1
876 [[ $timeout_time -lt 10 ]] && timeout_time
=10
877 local timeout_time
=$
(date -d "+$timeout_time seconds" +%s
)
879 notice
"Waiting for VMs to boot"
881 if [[ "$@" == "" ]]; then
882 local vms_to_check
="$VM_BASE_DIR/[0-9]*"
884 local vms_to_check
=""
886 vms_to_check
+=" $VM_BASE_DIR/$vm"
890 for vm
in $vms_to_check; do
891 local vm_num
=$
(basename $vm)
893 notice
"waiting for VM$vm_num ($vm)"
894 while ! vm_os_booted
$vm_num; do
895 if ! vm_is_running
$vm_num; then
897 warning
"VM $vm_num is not running"
898 warning
"================"
900 if [[ -r $vm/qemu.log
]]; then
903 warning
"LOG not found"
907 if [[ -r $vm/serial.log
]]; then
910 warning
"LOG not found"
912 warning
"================"
917 if [[ $
(date +%s
) -gt $timeout_time ]]; then
918 warning
"timeout waiting for machines to boot"
922 if (( i
> 30 )); then
930 notice
"VM$vm_num ready"
931 #Change Timeout for stopping services to prevent lengthy powerdowns
932 vm_ssh
$vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
935 notice
"all VMs ready"
940 function vm_start_fio_server
()
944 while getopts ':-:' optchar
; do
948 fio-bin
=*) local fio_bin
="${OPTARG#*=}" ;;
949 readonly) local readonly="--readonly" ;;
950 *) error
"Invalid argument '$OPTARG'" && return 1;;
953 *) error
"Invalid argument '$OPTARG'" && return 1;;
957 shift $
(( OPTIND
- 1 ))
959 notice
"Starting fio server on VM$vm_num"
960 if [[ $fio_bin != "" ]]; then
961 cat $fio_bin | vm_ssh
$vm_num 'cat > /root/fio; chmod +x /root/fio'
962 vm_ssh
$vm_num /root
/fio
$readonly --eta=never
--server --daemonize=/root
/fio.pid
964 vm_ssh
$vm_num fio
$readonly --eta=never
--server --daemonize=/root
/fio.pid
969 function vm_check_scsi_location
()
971 # Script to find wanted disc
972 local script='shopt -s nullglob; \
973 for entry in /sys/block/sd*; do \
974 disk_type="$(cat $entry/device/vendor)"; \
975 if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then \
976 fname=$(basename $entry); \
981 SCSI_DISK
="$(echo "$script" | vm_ssh $1 bash -s)"
983 if [[ -z "$SCSI_DISK" ]]; then
984 error
"no test disk found!"
989 # Script to perform scsi device reset on all disks in VM
991 # param $2..$n Disks to perform reset on
992 function vm_reset_scsi_devices
()
994 for disk
in "${@:2}"; do
995 notice
"VM$1 Performing device reset on disk $disk"
996 vm_ssh
$1 sg_reset
/dev
/$disk -vNd
1000 function vm_check_blk_location
()
1002 local script='shopt -s nullglob; cd /sys/block; echo vd*'
1003 SCSI_DISK
="$(echo "$script" | vm_ssh $1 bash -s)"
1005 if [[ -z "$SCSI_DISK" ]]; then
1006 error
"no blk test disk found!"
1020 local run_server_mode
=true
1024 --job-file=*) local job_file
="${arg#*=}" ;;
1025 --fio-bin=*) local fio_bin
="${arg#*=}" ;;
1026 --vm=*) vms
+=( "${arg#*=}" ) ;;
1028 local out
="${arg#*=}"
1031 --local) run_server_mode
=false
;;
1032 --json) json
="--json" ;;
1034 error
"Invalid argument '$arg'"
1040 if [[ ! -z "$fio_bin" && ! -r "$fio_bin" ]]; then
1041 error
"FIO binary '$fio_bin' does not exist"
1045 if [[ ! -r "$job_file" ]]; then
1046 error
"Fio job '$job_file' does not exist"
1050 local job_fname
=$
(basename "$job_file")
1051 # prepare job file for each VM
1052 for vm
in ${vms[@]}; do
1053 local vm_num
=${vm%%:*}
1054 local vmdisks
=${vm#*:}
1056 sed "s@filename=@filename=$vmdisks@" $job_file | vm_ssh
$vm_num "cat > /root/$job_fname"
1057 fio_disks
+="127.0.0.1:$(vm_fio_socket $vm_num):$vmdisks,"
1059 vm_ssh
$vm_num cat /root
/$job_fname
1060 if ! $run_server_mode; then
1061 if [[ ! -z "$fio_bin" ]]; then
1062 cat $fio_bin | vm_ssh
$vm_num 'cat > /root/fio; chmod +x /root/fio'
1065 notice
"Running local fio on VM $vm_num"
1066 vm_ssh
$vm_num "nohup /root/fio /root/$job_fname 1>/root/$job_fname.out 2>/root/$job_fname.out </dev/null & echo \$! > /root/fio.pid"
1070 if ! $run_server_mode; then
1071 # Give FIO time to run
1076 $SPDK_BUILD_DIR/test
/vhost
/common
/run_fio.py
--job-file=/root
/$job_fname \
1077 $
([[ ! -z "$fio_bin" ]] && echo "--fio-bin=$fio_bin") \
1078 --out=$out $json ${fio_disks%,}
1081 # Shutdown or kill any running VM and SPDK APP.
1083 function at_app_exit
()
1087 notice
"APP EXITING"
1088 notice
"killing all VMs"
1090 # Kill vhost application
1091 notice
"killing vhost app"
1093 for vhost_num
in $
(spdk_vhost_list_all
); do
1094 spdk_vhost_kill
$vhost_num
1100 function error_exit
()
1105 error
"Error on $1 $2"