1 : ${SPDK_VHOST_VERBOSE=false}
2 : ${VHOST_DIR="$HOME/vhost_test"}
3 : ${QEMU_BIN="qemu-system-x86_64"}
4 : ${QEMU_IMG_BIN="qemu-img"}
6 TEST_DIR
=$
(readlink
-f $rootdir/..
)
8 TARGET_DIR
=$VHOST_DIR/vhost
11 #TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
12 VM_IMAGE
=$HOME/vhost_vm_image.qcow2
14 if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
15 error
'QEMU is not installed on this system. Unable to run vhost tests.'
24 # Source config describing QEMU and VHOST cores and NUMA
26 source $rootdir/test
/vhost
/common
/autotest.config
28 function vhosttestinit
() {
29 if [ "$TEST_MODE" == "iso" ]; then
30 $rootdir/scripts
/setup.sh
32 # Look for the VM image
33 if [[ ! -f $VM_IMAGE ]]; then
34 echo "VM image not found at $VM_IMAGE"
35 echo "Download to $HOME? [yn]"
37 if [ "$download" = "y" ]; then
38 curl https
://ci.spdk.io
/download
/test_resources
/vhost_vm_image.
tar.gz |
tar xz
-C $HOME
43 # Look for the VM image
44 if [[ ! -f $VM_IMAGE ]]; then
45 error
"VM image not found at $VM_IMAGE"
50 function vhosttestfini
() {
51 if [ "$TEST_MODE" == "iso" ]; then
52 $rootdir/scripts
/setup.sh
reset
58 if ! $SPDK_VHOST_VERBOSE; then
60 elif [[ ${FUNCNAME[2]} == "source" ]]; then
61 verbose_out
=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
63 verbose_out
=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
68 echo -e "${msg_type}${verbose_out}: $*"
72 echo "===========" >&2
73 message
"FAIL" "$@" >&2
74 echo "===========" >&2
79 echo "===========" >&2
80 message
"ERROR" "$@" >&2
81 echo "===========" >&2
82 # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
87 message
"WARN" "$@" >&2
94 function check_qemu_packedring_support
() {
95 qemu_version
=$
($QEMU_BIN -version |
grep -Po "(?<=version )\d+.\d+.\d+")
96 if [[ "$qemu_version" < "4.2.0" ]]; then
97 error
"This qemu binary does not support packed ring"
101 function get_vhost_dir
() {
102 local vhost_name
="$1"
104 if [[ -z "$vhost_name" ]]; then
105 error
"vhost name must be provided to get_vhost_dir"
109 echo "$TARGET_DIR/${vhost_name}"
112 function vhost_run
() {
113 local vhost_name
="$1"
114 local run_gen_nvme
=true
116 if [[ -z "$vhost_name" ]]; then
117 error
"vhost name must be provided to vhost_run"
122 if [[ "$1" == "--no-gen-nvme" ]]; then
123 notice
"Skipping gen_nvmf.sh NVMe bdev configuration"
129 vhost_dir
="$(get_vhost_dir $vhost_name)"
130 local vhost_app
="$SPDK_BIN_DIR/vhost"
131 local vhost_log_file
="$vhost_dir/vhost.log"
132 local vhost_pid_file
="$vhost_dir/vhost.pid"
133 local vhost_socket
="$vhost_dir/usvhost"
134 notice
"starting vhost app in background"
135 [[ -r "$vhost_pid_file" ]] && vhost_kill
$vhost_name
136 [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
139 if [[ ! -x $vhost_app ]]; then
140 error
"application not found: $vhost_app"
144 local cmd
="$vhost_app -r $vhost_dir/rpc.sock $*"
146 notice
"Loging to: $vhost_log_file"
147 notice
"Socket: $vhost_socket"
148 notice
"Command: $cmd"
150 timing_enter vhost_start
154 echo $vhost_pid > $vhost_pid_file
156 notice
"waiting for app to run..."
157 waitforlisten
"$vhost_pid" "$vhost_dir/rpc.sock"
158 #do not generate nvmes if pci access is disabled
159 if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
160 $rootdir/scripts
/gen_nvme.sh
"--json" |
$rootdir/scripts
/rpc.py
-s $vhost_dir/rpc.sock load_subsystem_config
163 notice
"vhost started - pid=$vhost_pid"
164 timing_exit vhost_start
167 function vhost_kill
() {
169 local vhost_name
="$1"
171 if [[ -z "$vhost_name" ]]; then
172 error
"Must provide vhost name to vhost_kill"
177 vhost_dir
="$(get_vhost_dir $vhost_name)"
178 local vhost_pid_file
="$vhost_dir/vhost.pid"
180 if [[ ! -r $vhost_pid_file ]]; then
181 warning
"no vhost pid file found"
185 timing_enter vhost_kill
187 vhost_pid
="$(cat $vhost_pid_file)"
188 notice
"killing vhost (PID $vhost_pid) app"
190 if kill -INT $vhost_pid > /dev
/null
; then
191 notice
"sent SIGINT to vhost app - waiting 60 seconds to exit"
192 for ((i
= 0; i
< 60; i
++)); do
193 if kill -0 $vhost_pid; then
200 if kill -0 $vhost_pid; then
201 error
"ERROR: vhost was NOT killed - sending SIGABRT"
202 kill -ABRT $vhost_pid
206 while kill -0 $vhost_pid; do
210 elif kill -0 $vhost_pid; then
211 error
"vhost NOT killed - you need to kill it manually"
214 notice
"vhost was not running"
217 timing_exit vhost_kill
218 if [[ $rc == 0 ]]; then
227 function vhost_rpc
() {
228 local vhost_name
="$1"
230 if [[ -z "$vhost_name" ]]; then
231 error
"vhost name must be provided to vhost_rpc"
236 $rootdir/scripts
/rpc.py
-s $
(get_vhost_dir
$vhost_name)/rpc.sock
"$@"
243 function assert_number
() {
244 [[ "$1" =~
[0-9]+ ]] && return 0
246 error
"Invalid or missing paramter: need number but got '$1'"
250 # Run command on vm with given password
251 # First argument - vm number
252 # Second argument - ssh password for vm
254 function vm_sshpass
() {
255 vm_num_is_valid
$1 ||
return 1
258 ssh_cmd
="sshpass -p $2 ssh \
259 -o UserKnownHostsFile=/dev/null \
260 -o StrictHostKeyChecking=no \
262 -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
268 # Helper to validate VM number
271 function vm_num_is_valid
() {
272 [[ "$1" =~ ^
[0-9]+$
]] && return 0
274 error
"Invalid or missing paramter: vm number '$1'"
278 # Print network socket for given VM number
279 # param $1 virtual machine number
281 function vm_ssh_socket
() {
282 vm_num_is_valid
$1 ||
return 1
283 local vm_dir
="$VM_DIR/$1"
285 cat $vm_dir/ssh_socket
288 function vm_fio_socket
() {
289 vm_num_is_valid
$1 ||
return 1
290 local vm_dir
="$VM_DIR/$1"
292 cat $vm_dir/fio_socket
295 # Execute command on given VM
296 # param $1 virtual machine number
299 vm_num_is_valid
$1 ||
return 1
304 sshpass
-p "$VM_PASSWORD" ssh \
305 -o UserKnownHostsFile
=/dev
/null \
306 -o StrictHostKeyChecking
=no \
308 -p $
(vm_ssh_socket
$vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
312 # Execute scp command on given VM
313 # param $1 virtual machine number
316 vm_num_is_valid
$1 ||
return 1
321 sshpass
-p "$VM_PASSWORD" scp \
322 -o UserKnownHostsFile
=/dev
/null \
323 -o StrictHostKeyChecking
=no \
325 -P $
(vm_ssh_socket
$vm_num) $VM_SSH_OPTIONS \
329 # check if specified VM is running
331 function vm_is_running
() {
332 vm_num_is_valid
$1 ||
return 1
333 local vm_dir
="$VM_DIR/$1"
335 if [[ ! -r $vm_dir/qemu.pid
]]; then
340 vm_pid
="$(cat $vm_dir/qemu.pid)"
342 if /bin
/kill -0 $vm_pid; then
345 if [[ $EUID -ne 0 ]]; then
346 warning
"not root - assuming VM running since can't be checked"
350 # not running - remove pid file
356 # check if specified VM is running
358 function vm_os_booted
() {
359 vm_num_is_valid
$1 ||
return 1
360 local vm_dir
="$VM_DIR/$1"
362 if [[ ! -r $vm_dir/qemu.pid
]]; then
363 error
"VM $1 is not running"
367 if ! VM_SSH_OPTIONS
="-o ControlMaster=no" vm_exec
$1 "true" 2> /dev
/null
; then
368 # Shutdown existing master. Ignore errors as it might not exist.
369 VM_SSH_OPTIONS
="-O exit" vm_exec
$1 "true" 2> /dev
/null
377 # param $1 virtual machine number
378 # return non-zero in case of error.
379 function vm_shutdown
() {
380 vm_num_is_valid
$1 ||
return 1
381 local vm_dir
="$VM_DIR/$1"
382 if [[ ! -d "$vm_dir" ]]; then
383 error
"VM$1 ($vm_dir) not exist - setup it first"
387 if ! vm_is_running
$1; then
388 notice
"VM$1 ($vm_dir) is not running"
392 # Temporarily disabling exit flag for next ssh command, since it will
393 # "fail" due to shutdown
394 notice
"Shutting down virtual machine $vm_dir"
396 vm_exec
$1 "nohup sh -c 'shutdown -h -P now'" || true
397 notice
"VM$1 is shutting down - wait a while to complete"
402 # param $1 virtual machine number
405 vm_num_is_valid
$1 ||
return 1
406 local vm_dir
="$VM_DIR/$1"
408 if [[ ! -r $vm_dir/qemu.pid
]]; then
413 vm_pid
="$(cat $vm_dir/qemu.pid)"
415 notice
"Killing virtual machine $vm_dir (pid=$vm_pid)"
416 # First kill should fail, second one must fail
417 if /bin
/kill $vm_pid; then
418 notice
"process $vm_pid killed"
421 elif vm_is_running
$1; then
422 error
"Process $vm_pid NOT killed"
427 # List all VM numbers in VM_DIR
429 function vm_list_all
() {
435 if [[ -n "$vms" ]]; then
436 basename --multiple $vms
440 # Kills all VM in $VM_DIR
442 function vm_kill_all
() {
444 for vm
in $
(vm_list_all
); do
451 # Shutdown all VM in $VM_DIR
453 function vm_shutdown_all
() {
454 # XXX: temporarily disable to debug shutdown issue
465 notice
"Waiting for VMs to shutdown..."
467 while [[ $timeo -gt 0 ]]; do
470 if vm_is_running
$vm; then
476 if [[ $all_vms_down == 1 ]]; then
477 notice
"All VMs successfully shut down"
491 function vm_setup
() {
493 local OPTIND optchar vm_num
498 local disk_type_g
=NOT_DEFINED
499 local read_only
="false"
500 # List created of a strings separated with a ":"
504 local vm_migrate_to
=""
506 local guest_memory
=1024
507 local queue_number
=""
510 vhost_dir
="$(get_vhost_dir 0)"
511 while getopts ':-:' optchar
; do
515 os
=*) os
="${OPTARG#*=}" ;;
516 os-mode
=*) os_mode
="${OPTARG#*=}" ;;
517 qemu-args
=*) qemu_args
+=("${OPTARG#*=}") ;;
518 disk-type
=*) disk_type_g
="${OPTARG#*=}" ;;
519 read-only
=*) read_only
="${OPTARG#*=}" ;;
520 disks
=*) IFS
=":" read -ra disks
<<< "${OPTARG#*=}" ;;
521 raw-cache
=*) raw_cache
=",cache${OPTARG#*=}" ;;
522 force
=*) force_vm
=${OPTARG#*=} ;;
523 memory
=*) guest_memory
=${OPTARG#*=} ;;
524 queue_num
=*) queue_number
=${OPTARG#*=} ;;
525 incoming
=*) vm_incoming
="${OPTARG#*=}" ;;
526 migrate-to
=*) vm_migrate_to
="${OPTARG#*=}" ;;
527 vhost-name
=*) vhost_dir
="$(get_vhost_dir ${OPTARG#*=})" ;;
528 spdk-boot
=*) local boot_from
="${OPTARG#*=}" ;;
529 packed
) packed
=true
;;
531 error
"unknown argument $OPTARG"
537 error
"vm_create Unknown param $OPTARG"
543 # Find next directory we can use
544 if [[ -n $force_vm ]]; then
547 vm_num_is_valid
$vm_num ||
return 1
548 local vm_dir
="$VM_DIR/$vm_num"
549 [[ -d $vm_dir ]] && warning
"removing existing VM in '$vm_dir'"
554 for ((i
= 0; i
<= 256; i
++)); do
555 local vm_dir
="$VM_DIR/$i"
556 [[ ! -d $vm_dir ]] && break
563 if [[ $vm_num -eq 256 ]]; then
564 error
"no free VM found. do some cleanup (256 VMs created, are you insane?)"
568 if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
569 error
"'--incoming' and '--migrate-to' cannot be used together"
571 elif [[ -n "$vm_incoming" ]]; then
572 if [[ -n "$os_mode" ||
-n "$os" ]]; then
573 error
"'--incoming' can't be used together with '--os' nor '--os-mode'"
578 os
="$VM_DIR/$vm_incoming/os.qcow2"
579 elif [[ -n "$vm_migrate_to" ]]; then
580 [[ "$os_mode" != "backing" ]] && warning
"Using 'backing' mode for OS since '--migrate-to' is used"
584 notice
"Creating new VM in $vm_dir"
587 if [[ "$os_mode" == "backing" ]]; then
588 notice
"Creating backing file for OS image file: $os"
589 if ! $QEMU_IMG_BIN create
-f qcow2
-b $os $vm_dir/os.qcow2
; then
590 error
"Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
594 local os
=$vm_dir/os.qcow2
595 elif [[ "$os_mode" == "original" ]]; then
596 warning
"Using original OS image file: $os"
597 elif [[ "$os_mode" != "snapshot" ]]; then
598 if [[ -z "$os_mode" ]]; then
599 notice
"No '--os-mode' parameter provided - using 'snapshot'"
602 error
"Invalid '--os-mode=$os_mode'"
607 local qemu_mask_param
="VM_${vm_num}_qemu_mask"
608 local qemu_numa_node_param
="VM_${vm_num}_qemu_numa_node"
610 if [[ -z "${!qemu_mask_param}" ]] ||
[[ -z "${!qemu_numa_node_param}" ]]; then
611 error
"Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
615 local task_mask
=${!qemu_mask_param}
617 notice
"TASK MASK: $task_mask"
618 local cmd
=(taskset
-a -c "$task_mask" "$QEMU_BIN")
619 local vm_socket_offset
=$
((10000 + 100 * vm_num
))
621 local ssh_socket
=$
((vm_socket_offset
+ 0))
622 local fio_socket
=$
((vm_socket_offset
+ 1))
623 local monitor_port
=$
((vm_socket_offset
+ 2))
624 local migration_port
=$
((vm_socket_offset
+ 3))
625 local gdbserver_socket
=$
((vm_socket_offset
+ 4))
626 local vnc_socket
=$
((100 + vm_num
))
627 local qemu_pid_file
="$vm_dir/qemu.pid"
631 # cpu list for taskset can be comma separated or range
632 # or both at the same time, so first split on commas
633 cpu_list
=$
(echo $task_mask |
tr "," "\n")
635 for c
in $cpu_list; do
636 # if range is detected - count how many cpus
637 if [[ $c =~
[0-9]+-[0-9]+ ]]; then
643 cpu_num
=$
((cpu_num
+ val
))
644 queue_number
=$
((queue_number
+ val
))
647 if [ -z $queue_number ]; then
648 queue_number
=$cpu_num
653 local node_num
=${!qemu_numa_node_param}
654 local boot_disk_present
=false
655 notice
"NUMA NODE: $node_num"
656 cmd
+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std
-vnc ":$vnc_socket" -daemonize)
657 cmd
+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
658 [[ $os_mode == snapshot
]] && cmd
+=(-snapshot)
659 [[ -n "$vm_incoming" ]] && cmd
+=(-incoming "tcp:0:$migration_port")
660 cmd
+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
661 cmd
+=(-numa "node,memdev=mem")
662 cmd
+=(-pidfile "$qemu_pid_file")
663 cmd
+=(-serial "file:$vm_dir/serial.log")
664 cmd
+=(-D "$vm_dir/qemu.log")
665 cmd
+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
666 cmd
+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
668 if [[ -z "$boot_from" ]]; then
669 cmd
+=(-drive "file=$os,if=none,id=os_disk")
670 cmd
+=(-device "ide-hd,drive=os_disk,bootindex=0")
673 if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio
* ]]; then
674 disks
=("default_virtio.img")
675 elif ((${#disks[@]} == 0)); then
676 error
"No disks defined, aborting"
680 for disk
in "${disks[@]}"; do
681 # Each disk can define its type in a form of a disk_name,type. The remaining parts
682 # of the string are dropped.
683 IFS
="," read -r disk disk_type _
<<< "$disk"
684 [[ -z $disk_type ]] && disk_type
=$disk_type_g
688 local raw_name
="RAWSCSI"
689 local raw_disk
=$vm_dir/test.img
691 if [[ -n $disk ]]; then
692 [[ ! -b $disk ]] && touch $disk
694 raw_disk
=$
(readlink
-f $disk)
697 # Create disk file if it not exist or it is smaller than 1G
698 if { [[ -f $raw_disk ]] && [[ $
(stat
--printf="%s" $raw_disk) -lt $
((1024 * 1024 * 1024)) ]]; } \
699 ||
[[ ! -e $raw_disk ]]; then
700 if [[ $raw_disk =~
/dev
/.
* ]]; then
702 "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
703 " this is probably not what you want."
707 notice
"Creating Virtio disc $raw_disk"
708 dd if=/dev
/zero of
=$raw_disk bs
=1024k count
=1024
710 notice
"Using existing image $raw_disk"
713 cmd
+=(-device "virtio-scsi-pci,num_queues=$queue_number")
714 cmd
+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
715 cmd
+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
718 notice
"using socket $vhost_dir/naa.$disk.$vm_num"
719 cmd
+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
720 cmd
+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
721 if [[ "$disk" == "$boot_from" ]]; then
722 cmd
[-1]+=,bootindex
=0
723 boot_disk_present
=true
727 notice
"using socket $vhost_dir/naa.$disk.$vm_num"
728 cmd
+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
729 cmd
+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
730 if [[ "$disk" == "$boot_from" ]]; then
731 cmd
[-1]+=,bootindex
=0
732 boot_disk_present
=true
736 check_qemu_packedring_support
737 notice
"Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
742 if [[ -z $disk ]]; then
743 error
"need WWN for $disk_type"
745 elif [[ ! $disk =~ ^
[[:alpha
:]]{3}[.
][[:xdigit
:]]+$
]]; then
746 error
"$disk_type - disk(wnn)=$disk does not look like WNN number"
749 notice
"Using kernel vhost disk wwn=$disk"
750 cmd
+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
753 error
"unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
759 if [[ -n $boot_from ]] && [[ $boot_disk_present == false
]]; then
760 error
"Boot from $boot_from is selected but device is not present"
764 ((${#qemu_args[@]})) && cmd
+=("${qemu_args[@]}")
765 notice
"Saving to $vm_dir/run.sh"
766 cat <<- RUN > "$vm_dir/run.sh"
769 echo "=== qemu.log ==="
770 [[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
771 echo "=== qemu.log ==="
774 if [[ \$EUID -ne 0 ]]; then
775 echo "Go away user come back as root"
781 qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
783 echo "Running VM in $vm_dir"
787 echo "Waiting for QEMU pid file"
789 [[ ! -f $qemu_pid_file ]] && sleep 1
790 [[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
794 chmod +x
$vm_dir/run.sh
796 # Save generated sockets redirection
797 echo $ssh_socket > $vm_dir/ssh_socket
798 echo $fio_socket > $vm_dir/fio_socket
799 echo $monitor_port > $vm_dir/monitor_port
801 rm -f $vm_dir/migration_port
802 [[ -z $vm_incoming ]] ||
echo $migration_port > $vm_dir/migration_port
804 echo $gdbserver_socket > $vm_dir/gdbserver_socket
805 echo $vnc_socket >> $vm_dir/vnc_socket
807 [[ -z $vm_incoming ]] ||
ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
808 [[ -z $vm_migrate_to ]] ||
ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
812 local OPTIND optchar vm
816 while getopts 'a-:' optchar
; do
820 error
"Unknown param $OPTARG"
827 vms_to_run
="$(vm_list_all)"
829 shift $
((OPTIND
- 1))
831 vm_num_is_valid
$1 ||
return 1
832 if [[ ! -x $VM_DIR/$vm/run.sh
]]; then
833 error
"VM$vm not defined - setup it first"
840 for vm
in $vms_to_run; do
841 if vm_is_running
$vm; then
842 warning
"VM$vm ($VM_DIR/$vm) already running"
846 notice
"running $VM_DIR/$vm/run.sh"
847 if ! $VM_DIR/$vm/run.sh
; then
848 error
"FAILED to run vm $vm"
854 function vm_print_logs
() {
856 warning
"================"
858 if [[ -r $VM_DIR/$vm_num/qemu.log
]]; then
859 cat $VM_DIR/$vm_num/qemu.log
861 warning
"LOG qemu.log not found"
865 if [[ -r $VM_DIR/$vm_num/serial.log
]]; then
866 cat $VM_DIR/$vm_num/serial.log
868 warning
"LOG serial.log not found"
871 warning
"SEABIOS LOG:"
872 if [[ -r $VM_DIR/$vm_num/seabios.log
]]; then
873 cat $VM_DIR/$vm_num/seabios.log
875 warning
"LOG seabios.log not found"
877 warning
"================"
880 # Wait for all created VMs to boot.
881 # param $1 max wait time
882 function vm_wait_for_boot
() {
887 local all_booted
=false
888 local timeout_time
=$1
889 [[ $timeout_time -lt 10 ]] && timeout_time
=10
891 timeout_time
=$
(date -d "+$timeout_time seconds" +%s
)
893 notice
"Waiting for VMs to boot"
895 if [[ "$*" == "" ]]; then
896 local vms_to_check
="$VM_DIR/[0-9]*"
898 local vms_to_check
=""
900 vms_to_check
+=" $VM_DIR/$vm"
904 for vm
in $vms_to_check; do
906 vm_num
=$
(basename $vm)
908 notice
"waiting for VM$vm_num ($vm)"
909 while ! vm_os_booted
$vm_num; do
910 if ! vm_is_running
$vm_num; then
911 warning
"VM $vm_num is not running"
912 vm_print_logs
$vm_num
917 if [[ $
(date +%s
) -gt $timeout_time ]]; then
918 warning
"timeout waiting for machines to boot"
919 vm_print_logs
$vm_num
931 notice
"VM$vm_num ready"
932 #Change Timeout for stopping services to prevent lengthy powerdowns
933 #Check that remote system is not Cygwin in case of Windows VMs
935 vm_os
=$
(vm_exec
$vm_num "uname -o")
936 if [[ "$vm_os" != "Cygwin" ]]; then
937 vm_exec
$vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
941 notice
"all VMs ready"
946 function vm_start_fio_server
() {
950 while getopts ':-:' optchar
; do
954 fio-bin
=*) local fio_bin
="${OPTARG#*=}" ;;
955 readonly) local readonly="--readonly" ;;
956 *) error
"Invalid argument '$OPTARG'" && return 1 ;;
959 *) error
"Invalid argument '$OPTARG'" && return 1 ;;
963 shift $
((OPTIND
- 1))
964 for vm_num
in "$@"; do
965 notice
"Starting fio server on VM$vm_num"
966 if [[ $fio_bin != "" ]]; then
967 vm_exec
$vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
968 vm_exec
$vm_num /root
/fio
$readonly --eta=never
--server --daemonize=/root
/fio.pid
970 vm_exec
$vm_num fio
$readonly --eta=never
--server --daemonize=/root
/fio.pid
975 function vm_check_scsi_location
() {
976 # Script to find wanted disc
977 local script='shopt -s nullglob;
978 for entry in /sys/block/sd*; do
979 disk_type="$(cat $entry/device/vendor)";
980 if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
981 fname=$(basename $entry);
986 SCSI_DISK
="$(echo "$script" | vm_exec $1 bash -s)"
988 if [[ -z "$SCSI_DISK" ]]; then
989 error
"no test disk found!"
994 # Note: to use this function your VM should be run with
995 # appropriate memory and with SPDK source already cloned
996 # and compiled in /root/spdk.
997 function vm_check_virtio_location
() {
998 vm_exec
$1 NRHUGE
=512 /root
/spdk
/scripts
/setup.sh
999 vm_exec
$1 "cat > /root/bdev.conf" <<- EOF
1004 vm_exec
$1 "cat /root/bdev.conf"
1006 vm_exec
$1 "bash -s" <<- EOF
1008 rootdir="/root/spdk"
1009 source /root/spdk/test/common/autotest_common.sh
1010 discover_bdevs /root/spdk /root/bdev.conf | jq -r '[.[].name] | join(" ")' > /root/fio_bdev_filenames
1014 SCSI_DISK
=$
(vm_exec
$1 cat /root
/fio_bdev_filenames
)
1015 if [[ -z "$SCSI_DISK" ]]; then
1016 error
"no virtio test disk found!"
1021 # Script to perform scsi device reset on all disks in VM
1023 # param $2..$n Disks to perform reset on
1024 function vm_reset_scsi_devices
() {
1025 for disk
in "${@:2}"; do
1026 notice
"VM$1 Performing device reset on disk $disk"
1027 vm_exec
$1 sg_reset
/dev
/$disk -vNd
1031 function vm_check_blk_location
() {
1032 local script='shopt -s nullglob; cd /sys/block; echo vd*'
1033 SCSI_DISK
="$(echo "$script" | vm_exec $1 bash -s)"
1035 if [[ -z "$SCSI_DISK" ]]; then
1036 error
"no blk test disk found!"
1041 function run_fio
() {
1048 local run_server_mode
=true
1049 local run_plugin_mode
=false
1051 local fio_output_format
="normal"
1052 local fio_gtod_reduce
=false
1053 local wait_for_fio
=true
1057 --job-file=*) local job_file
="${arg#*=}" ;;
1058 --fio-bin=*) local fio_bin
="${arg#*=}" ;;
1059 --vm=*) vms
+=("${arg#*=}") ;;
1061 local out
="${arg#*=}"
1064 --local) run_server_mode
=false
;;
1066 notice
"Using plugin mode. Disabling server mode."
1067 run_plugin_mode
=true
1068 run_server_mode
=false
1070 --json) fio_output_format
="json" ;;
1071 --hide-results) hide_results
=true
;;
1072 --no-wait-for-fio) wait_for_fio
=false
;;
1073 --gtod-reduce) fio_gtod_reduce
=true
;;
1075 error
"Invalid argument '$arg'"
1081 if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1082 error
"FIO binary '$fio_bin' does not exist"
1086 if [[ -z "$fio_bin" ]]; then
1090 if [[ ! -r "$job_file" ]]; then
1091 error
"Fio job '$job_file' does not exist"
1095 fio_start_cmd
="$fio_bin --eta=never "
1098 job_fname
=$
(basename "$job_file")
1099 log_fname
="${job_fname%%.*}.log"
1100 fio_start_cmd
+=" --output=$out/$log_fname --output-format=$fio_output_format "
1102 # prepare job file for each VM
1103 for vm
in "${vms[@]}"; do
1104 local vm_num
=${vm%%:*}
1105 local vmdisks
=${vm#*:}
1107 sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec
$vm_num "cat > /root/$job_fname"
1109 if $fio_gtod_reduce; then
1110 vm_exec
$vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
1113 vm_exec
$vm_num cat /root
/$job_fname
1115 if $run_server_mode; then
1116 fio_start_cmd
+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
1119 if ! $run_server_mode; then
1120 if [[ -n "$fio_bin" ]]; then
1121 if ! $run_plugin_mode; then
1122 vm_exec
$vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
1123 vm_fio_bin
="/root/fio"
1125 vm_fio_bin
="/usr/src/fio/fio"
1129 notice
"Running local fio on VM $vm_num"
1130 vm_exec
$vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
1131 vm_exec_pids
+=("$!")
1135 if ! $run_server_mode; then
1136 if ! $wait_for_fio; then
1139 echo "Waiting for guest fio instances to finish.."
1140 wait "${vm_exec_pids[@]}"
1142 for vm
in "${vms[@]}"; do
1143 local vm_num
=${vm%%:*}
1144 vm_exec
$vm_num cat /root
/$log_fname > "$out/vm${vm_num}_${log_fname}"
1152 if [[ "$fio_output_format" == "json" ]]; then
1153 # Fio in client-server mode produces a lot of "trash" output
1154 # preceding JSON structure, making it not possible to parse.
1155 # Remove these lines from file.
1156 # shellcheck disable=SC2005
1157 echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
1160 if [[ ! $hide_results ]]; then
1165 # Parsing fio results for json output and client-server mode only!
1166 function parse_fio_results
() {
1167 local fio_log_dir
=$1
1168 local fio_log_filename
=$2
1169 local fio_csv_filename
1171 # Variables used in parsing loop
1173 local rwmode mixread mixwrite
1174 local lat_key lat_divisor
1175 local client_stats iops bw
1176 local read_avg_lat read_min_lat read_max_lat
1177 local write_avg_lat write_min_lat write_min_lat
1182 results
["avg_lat"]=0
1183 results
["min_lat"]=0
1184 results
["max_lat"]=0
1186 # Loop using the log filename to see if there are any other
1187 # matching files. This is in case we ran fio test multiple times.
1188 log_files
=("$fio_log_dir/$fio_log_filename"*)
1189 for log_file
in "${log_files[@]}"; do
1190 rwmode
=$
(jq
-r '.["client_stats"][0]["job options"]["rw"]' "$log_file")
1193 if [[ $rwmode = *"rw"* ]]; then
1194 mixread
=$
(jq
-r '.["client_stats"][0]["job options"]["rwmixread"]' "$log_file")
1195 mixread
=$
(bc -l <<< "scale=3; $mixread/100")
1196 mixwrite
=$
(bc -l <<< "scale=3; 1-$mixread")
1199 client_stats
=$
(jq
-r '.["client_stats"][] | select(.jobname == "All clients")' "$log_file")
1201 # Check latency unit and later normalize to microseconds
1204 if jq
-er '.read["lat_ns"]' &> /dev
/null
<<< $client_stats; then
1209 # Horrific bash float point arithmetic oprations below.
1210 # Viewer discretion is advised.
1211 iops
=$
(jq
-r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
1212 bw
=$
(jq
-r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
1213 read_avg_lat
=$
(jq
-r --arg lat_key
$lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
1214 read_min_lat
=$
(jq
-r --arg lat_key
$lat_key '.read[$lat_key]["min"]' <<< $client_stats)
1215 read_max_lat
=$
(jq
-r --arg lat_key
$lat_key '.read[$lat_key]["max"]' <<< $client_stats)
1216 write_avg_lat
=$
(jq
-r --arg lat_key
$lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
1217 write_min_lat
=$
(jq
-r --arg lat_key
$lat_key '.write[$lat_key]["min"]' <<< $client_stats)
1218 write_max_lat
=$
(jq
-r --arg lat_key
$lat_key '.write[$lat_key]["max"]' <<< $client_stats)
1220 results
["iops"]=$
(bc -l <<< "${results[iops]} + $iops")
1221 results
["bw"]=$
(bc -l <<< "${results[bw]} + $bw")
1222 results
["avg_lat"]=$
(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
1223 results
["min_lat"]=$
(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
1224 results
["max_lat"]=$
(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
1227 results
["iops"]=$
(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
1228 results
["bw"]=$
(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
1229 results
["avg_lat"]=$
(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
1230 results
["min_lat"]=$
(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
1231 results
["max_lat"]=$
(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
1233 fio_csv_filename
="${fio_log_filename%%.*}.csv"
1234 cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
1235 iops,bw,avg_lat,min_lat,max_lat
1236 ${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
1240 # Shutdown or kill any running VM and SPDK APP.
1242 function at_app_exit
() {
1245 notice
"APP EXITING"
1246 notice
"killing all VMs"
1248 # Kill vhost application
1249 notice
"killing vhost app"
1251 for vhost_name
in "$TARGET_DIR"/*; do
1252 vhost_kill
$vhost_name
1258 function error_exit
() {
1262 error
"Error on $1 $2"