]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/vhost/common.sh
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / vhost / common.sh
1 : ${SPDK_VHOST_VERBOSE=false}
2 : ${VHOST_DIR="$HOME/vhost_test"}
3 : ${QEMU_BIN="qemu-system-x86_64"}
4 : ${QEMU_IMG_BIN="qemu-img"}
5
6 TEST_DIR=$(readlink -f $rootdir/..)
7 VM_DIR=$VHOST_DIR/vms
8 TARGET_DIR=$VHOST_DIR/vhost
9 VM_PASSWORD="root"
10
11 #TODO: Move vhost_vm_image.qcow2 into VHOST_DIR on test systems.
12 VM_IMAGE=$HOME/vhost_vm_image.qcow2
13
14 if ! hash $QEMU_IMG_BIN $QEMU_BIN; then
15 error 'QEMU is not installed on this system. Unable to run vhost tests.'
16 exit 1
17 fi
18
19 mkdir -p $VHOST_DIR
20 mkdir -p $VM_DIR
21 mkdir -p $TARGET_DIR
22
23 #
24 # Source config describing QEMU and VHOST cores and NUMA
25 #
26 source $rootdir/test/vhost/common/autotest.config
27
28 function vhosttestinit() {
29 if [ "$TEST_MODE" == "iso" ]; then
30 $rootdir/scripts/setup.sh
31
32 # Look for the VM image
33 if [[ ! -f $VM_IMAGE ]]; then
34 echo "VM image not found at $VM_IMAGE"
35 echo "Download to $HOME? [yn]"
36 read -r download
37 if [ "$download" = "y" ]; then
38 curl https://ci.spdk.io/download/test_resources/vhost_vm_image.tar.gz | tar xz -C $HOME
39 fi
40 fi
41 fi
42
43 # Look for the VM image
44 if [[ ! -f $VM_IMAGE ]]; then
45 error "VM image not found at $VM_IMAGE"
46 exit 1
47 fi
48 }
49
50 function vhosttestfini() {
51 if [ "$TEST_MODE" == "iso" ]; then
52 $rootdir/scripts/setup.sh reset
53 fi
54 }
55
56 function message() {
57 local verbose_out
58 if ! $SPDK_VHOST_VERBOSE; then
59 verbose_out=""
60 elif [[ ${FUNCNAME[2]} == "source" ]]; then
61 verbose_out=" (file $(basename ${BASH_SOURCE[1]}):${BASH_LINENO[1]})"
62 else
63 verbose_out=" (function ${FUNCNAME[2]}:${BASH_LINENO[1]})"
64 fi
65
66 local msg_type="$1"
67 shift
68 echo -e "${msg_type}${verbose_out}: $*"
69 }
70
71 function fail() {
72 echo "===========" >&2
73 message "FAIL" "$@" >&2
74 echo "===========" >&2
75 exit 1
76 }
77
78 function error() {
79 echo "===========" >&2
80 message "ERROR" "$@" >&2
81 echo "===========" >&2
82 # Don't 'return 1' since the stack trace will be incomplete (why?) missing upper command.
83 false
84 }
85
86 function warning() {
87 message "WARN" "$@" >&2
88 }
89
90 function notice() {
91 message "INFO" "$@"
92 }
93
94 function check_qemu_packedring_support() {
95 qemu_version=$($QEMU_BIN -version | grep -Po "(?<=version )\d+.\d+.\d+")
96 if [[ "$qemu_version" < "4.2.0" ]]; then
97 error "This qemu binary does not support packed ring"
98 fi
99 }
100
101 function get_vhost_dir() {
102 local vhost_name="$1"
103
104 if [[ -z "$vhost_name" ]]; then
105 error "vhost name must be provided to get_vhost_dir"
106 return 1
107 fi
108
109 echo "$TARGET_DIR/${vhost_name}"
110 }
111
112 function vhost_run() {
113 local vhost_name="$1"
114 local run_gen_nvme=true
115
116 if [[ -z "$vhost_name" ]]; then
117 error "vhost name must be provided to vhost_run"
118 return 1
119 fi
120 shift
121
122 if [[ "$1" == "--no-gen-nvme" ]]; then
123 notice "Skipping gen_nvmf.sh NVMe bdev configuration"
124 run_gen_nvme=false
125 shift
126 fi
127
128 local vhost_dir
129 vhost_dir="$(get_vhost_dir $vhost_name)"
130 local vhost_app="$SPDK_BIN_DIR/vhost"
131 local vhost_log_file="$vhost_dir/vhost.log"
132 local vhost_pid_file="$vhost_dir/vhost.pid"
133 local vhost_socket="$vhost_dir/usvhost"
134 notice "starting vhost app in background"
135 [[ -r "$vhost_pid_file" ]] && vhost_kill $vhost_name
136 [[ -d $vhost_dir ]] && rm -f $vhost_dir/*
137 mkdir -p $vhost_dir
138
139 if [[ ! -x $vhost_app ]]; then
140 error "application not found: $vhost_app"
141 return 1
142 fi
143
144 local cmd="$vhost_app -r $vhost_dir/rpc.sock $*"
145
146 notice "Loging to: $vhost_log_file"
147 notice "Socket: $vhost_socket"
148 notice "Command: $cmd"
149
150 timing_enter vhost_start
151 cd $vhost_dir
152 $cmd &
153 vhost_pid=$!
154 echo $vhost_pid > $vhost_pid_file
155
156 notice "waiting for app to run..."
157 waitforlisten "$vhost_pid" "$vhost_dir/rpc.sock"
158 #do not generate nvmes if pci access is disabled
159 if [[ "$cmd" != *"--no-pci"* ]] && [[ "$cmd" != *"-u"* ]] && $run_gen_nvme; then
160 $rootdir/scripts/gen_nvme.sh "--json" | $rootdir/scripts/rpc.py -s $vhost_dir/rpc.sock load_subsystem_config
161 fi
162
163 notice "vhost started - pid=$vhost_pid"
164 timing_exit vhost_start
165 }
166
167 function vhost_kill() {
168 local rc=0
169 local vhost_name="$1"
170
171 if [[ -z "$vhost_name" ]]; then
172 error "Must provide vhost name to vhost_kill"
173 return 0
174 fi
175
176 local vhost_dir
177 vhost_dir="$(get_vhost_dir $vhost_name)"
178 local vhost_pid_file="$vhost_dir/vhost.pid"
179
180 if [[ ! -r $vhost_pid_file ]]; then
181 warning "no vhost pid file found"
182 return 0
183 fi
184
185 timing_enter vhost_kill
186 local vhost_pid
187 vhost_pid="$(cat $vhost_pid_file)"
188 notice "killing vhost (PID $vhost_pid) app"
189
190 if kill -INT $vhost_pid > /dev/null; then
191 notice "sent SIGINT to vhost app - waiting 60 seconds to exit"
192 for ((i = 0; i < 60; i++)); do
193 if kill -0 $vhost_pid; then
194 echo "."
195 sleep 1
196 else
197 break
198 fi
199 done
200 if kill -0 $vhost_pid; then
201 error "ERROR: vhost was NOT killed - sending SIGABRT"
202 kill -ABRT $vhost_pid
203 rm $vhost_pid_file
204 rc=1
205 else
206 while kill -0 $vhost_pid; do
207 echo "."
208 done
209 fi
210 elif kill -0 $vhost_pid; then
211 error "vhost NOT killed - you need to kill it manually"
212 rc=1
213 else
214 notice "vhost was not running"
215 fi
216
217 timing_exit vhost_kill
218 if [[ $rc == 0 ]]; then
219 rm $vhost_pid_file
220 fi
221
222 rm -rf "$vhost_dir"
223
224 return $rc
225 }
226
227 function vhost_rpc() {
228 local vhost_name="$1"
229
230 if [[ -z "$vhost_name" ]]; then
231 error "vhost name must be provided to vhost_rpc"
232 return 1
233 fi
234 shift
235
236 $rootdir/scripts/rpc.py -s $(get_vhost_dir $vhost_name)/rpc.sock "$@"
237 }
238
239 ###
240 # Mgmt functions
241 ###
242
243 function assert_number() {
244 [[ "$1" =~ [0-9]+ ]] && return 0
245
246 error "Invalid or missing paramter: need number but got '$1'"
247 return 1
248 }
249
250 # Run command on vm with given password
251 # First argument - vm number
252 # Second argument - ssh password for vm
253 #
254 function vm_sshpass() {
255 vm_num_is_valid $1 || return 1
256
257 local ssh_cmd
258 ssh_cmd="sshpass -p $2 ssh \
259 -o UserKnownHostsFile=/dev/null \
260 -o StrictHostKeyChecking=no \
261 -o User=root \
262 -p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
263
264 shift 2
265 $ssh_cmd "$@"
266 }
267
268 # Helper to validate VM number
269 # param $1 VM number
270 #
271 function vm_num_is_valid() {
272 [[ "$1" =~ ^[0-9]+$ ]] && return 0
273
274 error "Invalid or missing paramter: vm number '$1'"
275 return 1
276 }
277
278 # Print network socket for given VM number
279 # param $1 virtual machine number
280 #
281 function vm_ssh_socket() {
282 vm_num_is_valid $1 || return 1
283 local vm_dir="$VM_DIR/$1"
284
285 cat $vm_dir/ssh_socket
286 }
287
288 function vm_fio_socket() {
289 vm_num_is_valid $1 || return 1
290 local vm_dir="$VM_DIR/$1"
291
292 cat $vm_dir/fio_socket
293 }
294
295 # Execute command on given VM
296 # param $1 virtual machine number
297 #
298 function vm_exec() {
299 vm_num_is_valid $1 || return 1
300
301 local vm_num="$1"
302 shift
303
304 sshpass -p "$VM_PASSWORD" ssh \
305 -o UserKnownHostsFile=/dev/null \
306 -o StrictHostKeyChecking=no \
307 -o User=root \
308 -p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
309 "$@"
310 }
311
312 # Execute scp command on given VM
313 # param $1 virtual machine number
314 #
315 function vm_scp() {
316 vm_num_is_valid $1 || return 1
317
318 local vm_num="$1"
319 shift
320
321 sshpass -p "$VM_PASSWORD" scp \
322 -o UserKnownHostsFile=/dev/null \
323 -o StrictHostKeyChecking=no \
324 -o User=root \
325 -P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
326 "$@"
327 }
328
329 # check if specified VM is running
330 # param $1 VM num
331 function vm_is_running() {
332 vm_num_is_valid $1 || return 1
333 local vm_dir="$VM_DIR/$1"
334
335 if [[ ! -r $vm_dir/qemu.pid ]]; then
336 return 1
337 fi
338
339 local vm_pid
340 vm_pid="$(cat $vm_dir/qemu.pid)"
341
342 if /bin/kill -0 $vm_pid; then
343 return 0
344 else
345 if [[ $EUID -ne 0 ]]; then
346 warning "not root - assuming VM running since can't be checked"
347 return 0
348 fi
349
350 # not running - remove pid file
351 rm $vm_dir/qemu.pid
352 return 1
353 fi
354 }
355
356 # check if specified VM is running
357 # param $1 VM num
358 function vm_os_booted() {
359 vm_num_is_valid $1 || return 1
360 local vm_dir="$VM_DIR/$1"
361
362 if [[ ! -r $vm_dir/qemu.pid ]]; then
363 error "VM $1 is not running"
364 return 1
365 fi
366
367 if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2> /dev/null; then
368 # Shutdown existing master. Ignore errors as it might not exist.
369 VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2> /dev/null
370 return 1
371 fi
372
373 return 0
374 }
375
376 # Shutdown given VM
377 # param $1 virtual machine number
378 # return non-zero in case of error.
379 function vm_shutdown() {
380 vm_num_is_valid $1 || return 1
381 local vm_dir="$VM_DIR/$1"
382 if [[ ! -d "$vm_dir" ]]; then
383 error "VM$1 ($vm_dir) not exist - setup it first"
384 return 1
385 fi
386
387 if ! vm_is_running $1; then
388 notice "VM$1 ($vm_dir) is not running"
389 return 0
390 fi
391
392 # Temporarily disabling exit flag for next ssh command, since it will
393 # "fail" due to shutdown
394 notice "Shutting down virtual machine $vm_dir"
395 set +e
396 vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
397 notice "VM$1 is shutting down - wait a while to complete"
398 set -e
399 }
400
401 # Kill given VM
402 # param $1 virtual machine number
403 #
404 function vm_kill() {
405 vm_num_is_valid $1 || return 1
406 local vm_dir="$VM_DIR/$1"
407
408 if [[ ! -r $vm_dir/qemu.pid ]]; then
409 return 0
410 fi
411
412 local vm_pid
413 vm_pid="$(cat $vm_dir/qemu.pid)"
414
415 notice "Killing virtual machine $vm_dir (pid=$vm_pid)"
416 # First kill should fail, second one must fail
417 if /bin/kill $vm_pid; then
418 notice "process $vm_pid killed"
419 rm $vm_dir/qemu.pid
420 rm -rf $vm_dir
421 elif vm_is_running $1; then
422 error "Process $vm_pid NOT killed"
423 return 1
424 fi
425 }
426
427 # List all VM numbers in VM_DIR
428 #
429 function vm_list_all() {
430 local vms
431 vms="$(
432 shopt -s nullglob
433 echo $VM_DIR/[0-9]*
434 )"
435 if [[ -n "$vms" ]]; then
436 basename --multiple $vms
437 fi
438 }
439
440 # Kills all VM in $VM_DIR
441 #
442 function vm_kill_all() {
443 local vm
444 for vm in $(vm_list_all); do
445 vm_kill $vm
446 done
447
448 rm -rf $VM_DIR
449 }
450
451 # Shutdown all VM in $VM_DIR
452 #
453 function vm_shutdown_all() {
454 # XXX: temporarily disable to debug shutdown issue
455 # xtrace_disable
456
457 local vms
458 vms=$(vm_list_all)
459 local vm
460
461 for vm in $vms; do
462 vm_shutdown $vm
463 done
464
465 notice "Waiting for VMs to shutdown..."
466 local timeo=30
467 while [[ $timeo -gt 0 ]]; do
468 local all_vms_down=1
469 for vm in $vms; do
470 if vm_is_running $vm; then
471 all_vms_down=0
472 break
473 fi
474 done
475
476 if [[ $all_vms_down == 1 ]]; then
477 notice "All VMs successfully shut down"
478 xtrace_restore
479 return 0
480 fi
481
482 ((timeo -= 1))
483 sleep 1
484 done
485
486 rm -rf $VM_DIR
487
488 xtrace_restore
489 }
490
491 function vm_setup() {
492 xtrace_disable
493 local OPTIND optchar vm_num
494
495 local os=""
496 local os_mode=""
497 local qemu_args=()
498 local disk_type_g=NOT_DEFINED
499 local read_only="false"
500 # List created of a strings separated with a ":"
501 local disks=()
502 local raw_cache=""
503 local vm_incoming=""
504 local vm_migrate_to=""
505 local force_vm=""
506 local guest_memory=1024
507 local queue_number=""
508 local vhost_dir
509 local packed=false
510 vhost_dir="$(get_vhost_dir 0)"
511 while getopts ':-:' optchar; do
512 case "$optchar" in
513 -)
514 case "$OPTARG" in
515 os=*) os="${OPTARG#*=}" ;;
516 os-mode=*) os_mode="${OPTARG#*=}" ;;
517 qemu-args=*) qemu_args+=("${OPTARG#*=}") ;;
518 disk-type=*) disk_type_g="${OPTARG#*=}" ;;
519 read-only=*) read_only="${OPTARG#*=}" ;;
520 disks=*) IFS=":" read -ra disks <<< "${OPTARG#*=}" ;;
521 raw-cache=*) raw_cache=",cache${OPTARG#*=}" ;;
522 force=*) force_vm=${OPTARG#*=} ;;
523 memory=*) guest_memory=${OPTARG#*=} ;;
524 queue_num=*) queue_number=${OPTARG#*=} ;;
525 incoming=*) vm_incoming="${OPTARG#*=}" ;;
526 migrate-to=*) vm_migrate_to="${OPTARG#*=}" ;;
527 vhost-name=*) vhost_dir="$(get_vhost_dir ${OPTARG#*=})" ;;
528 spdk-boot=*) local boot_from="${OPTARG#*=}" ;;
529 packed) packed=true ;;
530 *)
531 error "unknown argument $OPTARG"
532 return 1
533 ;;
534 esac
535 ;;
536 *)
537 error "vm_create Unknown param $OPTARG"
538 return 1
539 ;;
540 esac
541 done
542
543 # Find next directory we can use
544 if [[ -n $force_vm ]]; then
545 vm_num=$force_vm
546
547 vm_num_is_valid $vm_num || return 1
548 local vm_dir="$VM_DIR/$vm_num"
549 [[ -d $vm_dir ]] && warning "removing existing VM in '$vm_dir'"
550 else
551 local vm_dir=""
552
553 set +x
554 for ((i = 0; i <= 256; i++)); do
555 local vm_dir="$VM_DIR/$i"
556 [[ ! -d $vm_dir ]] && break
557 done
558 xtrace_restore
559
560 vm_num=$i
561 fi
562
563 if [[ $vm_num -eq 256 ]]; then
564 error "no free VM found. do some cleanup (256 VMs created, are you insane?)"
565 return 1
566 fi
567
568 if [[ -n "$vm_migrate_to" && -n "$vm_incoming" ]]; then
569 error "'--incoming' and '--migrate-to' cannot be used together"
570 return 1
571 elif [[ -n "$vm_incoming" ]]; then
572 if [[ -n "$os_mode" || -n "$os" ]]; then
573 error "'--incoming' can't be used together with '--os' nor '--os-mode'"
574 return 1
575 fi
576
577 os_mode="original"
578 os="$VM_DIR/$vm_incoming/os.qcow2"
579 elif [[ -n "$vm_migrate_to" ]]; then
580 [[ "$os_mode" != "backing" ]] && warning "Using 'backing' mode for OS since '--migrate-to' is used"
581 os_mode=backing
582 fi
583
584 notice "Creating new VM in $vm_dir"
585 mkdir -p $vm_dir
586
587 if [[ "$os_mode" == "backing" ]]; then
588 notice "Creating backing file for OS image file: $os"
589 if ! $QEMU_IMG_BIN create -f qcow2 -b $os $vm_dir/os.qcow2; then
590 error "Failed to create OS backing file in '$vm_dir/os.qcow2' using '$os'"
591 return 1
592 fi
593
594 local os=$vm_dir/os.qcow2
595 elif [[ "$os_mode" == "original" ]]; then
596 warning "Using original OS image file: $os"
597 elif [[ "$os_mode" != "snapshot" ]]; then
598 if [[ -z "$os_mode" ]]; then
599 notice "No '--os-mode' parameter provided - using 'snapshot'"
600 os_mode="snapshot"
601 else
602 error "Invalid '--os-mode=$os_mode'"
603 return 1
604 fi
605 fi
606
607 local qemu_mask_param="VM_${vm_num}_qemu_mask"
608 local qemu_numa_node_param="VM_${vm_num}_qemu_numa_node"
609
610 if [[ -z "${!qemu_mask_param}" ]] || [[ -z "${!qemu_numa_node_param}" ]]; then
611 error "Parameters ${qemu_mask_param} or ${qemu_numa_node_param} not found in autotest.config file"
612 return 1
613 fi
614
615 local task_mask=${!qemu_mask_param}
616
617 notice "TASK MASK: $task_mask"
618 local cmd=(taskset -a -c "$task_mask" "$QEMU_BIN")
619 local vm_socket_offset=$((10000 + 100 * vm_num))
620
621 local ssh_socket=$((vm_socket_offset + 0))
622 local fio_socket=$((vm_socket_offset + 1))
623 local monitor_port=$((vm_socket_offset + 2))
624 local migration_port=$((vm_socket_offset + 3))
625 local gdbserver_socket=$((vm_socket_offset + 4))
626 local vnc_socket=$((100 + vm_num))
627 local qemu_pid_file="$vm_dir/qemu.pid"
628 local cpu_num=0
629
630 set +x
631 # cpu list for taskset can be comma separated or range
632 # or both at the same time, so first split on commas
633 cpu_list=$(echo $task_mask | tr "," "\n")
634 queue_number=0
635 for c in $cpu_list; do
636 # if range is detected - count how many cpus
637 if [[ $c =~ [0-9]+-[0-9]+ ]]; then
638 val=$((c - 1))
639 val=${val#-}
640 else
641 val=1
642 fi
643 cpu_num=$((cpu_num + val))
644 queue_number=$((queue_number + val))
645 done
646
647 if [ -z $queue_number ]; then
648 queue_number=$cpu_num
649 fi
650
651 xtrace_restore
652
653 local node_num=${!qemu_numa_node_param}
654 local boot_disk_present=false
655 notice "NUMA NODE: $node_num"
656 cmd+=(-m "$guest_memory" --enable-kvm -cpu host -smp "$cpu_num" -vga std -vnc ":$vnc_socket" -daemonize)
657 cmd+=(-object "memory-backend-file,id=mem,size=${guest_memory}M,mem-path=/dev/hugepages,share=on,prealloc=yes,host-nodes=$node_num,policy=bind")
658 [[ $os_mode == snapshot ]] && cmd+=(-snapshot)
659 [[ -n "$vm_incoming" ]] && cmd+=(-incoming "tcp:0:$migration_port")
660 cmd+=(-monitor "telnet:127.0.0.1:$monitor_port,server,nowait")
661 cmd+=(-numa "node,memdev=mem")
662 cmd+=(-pidfile "$qemu_pid_file")
663 cmd+=(-serial "file:$vm_dir/serial.log")
664 cmd+=(-D "$vm_dir/qemu.log")
665 cmd+=(-chardev "file,path=$vm_dir/seabios.log,id=seabios" -device "isa-debugcon,iobase=0x402,chardev=seabios")
666 cmd+=(-net "user,hostfwd=tcp::$ssh_socket-:22,hostfwd=tcp::$fio_socket-:8765")
667 cmd+=(-net nic)
668 if [[ -z "$boot_from" ]]; then
669 cmd+=(-drive "file=$os,if=none,id=os_disk")
670 cmd+=(-device "ide-hd,drive=os_disk,bootindex=0")
671 fi
672
673 if ((${#disks[@]} == 0)) && [[ $disk_type_g == virtio* ]]; then
674 disks=("default_virtio.img")
675 elif ((${#disks[@]} == 0)); then
676 error "No disks defined, aborting"
677 return 1
678 fi
679
680 for disk in "${disks[@]}"; do
681 # Each disk can define its type in a form of a disk_name,type. The remaining parts
682 # of the string are dropped.
683 IFS="," read -r disk disk_type _ <<< "$disk"
684 [[ -z $disk_type ]] && disk_type=$disk_type_g
685
686 case $disk_type in
687 virtio)
688 local raw_name="RAWSCSI"
689 local raw_disk=$vm_dir/test.img
690
691 if [[ -n $disk ]]; then
692 [[ ! -b $disk ]] && touch $disk
693 local raw_disk
694 raw_disk=$(readlink -f $disk)
695 fi
696
697 # Create disk file if it not exist or it is smaller than 1G
698 if { [[ -f $raw_disk ]] && [[ $(stat --printf="%s" $raw_disk) -lt $((1024 * 1024 * 1024)) ]]; } \
699 || [[ ! -e $raw_disk ]]; then
700 if [[ $raw_disk =~ /dev/.* ]]; then
701 error \
702 "ERROR: Virtio disk point to missing device ($raw_disk) -\n" \
703 " this is probably not what you want."
704 return 1
705 fi
706
707 notice "Creating Virtio disc $raw_disk"
708 dd if=/dev/zero of=$raw_disk bs=1024k count=1024
709 else
710 notice "Using existing image $raw_disk"
711 fi
712
713 cmd+=(-device "virtio-scsi-pci,num_queues=$queue_number")
714 cmd+=(-device "scsi-hd,drive=hd$i,vendor=$raw_name")
715 cmd+=(-drive "if=none,id=hd$i,file=$raw_disk,format=raw$raw_cache")
716 ;;
717 spdk_vhost_scsi)
718 notice "using socket $vhost_dir/naa.$disk.$vm_num"
719 cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
720 cmd+=(-device "vhost-user-scsi-pci,id=scsi_$disk,num_queues=$queue_number,chardev=char_$disk")
721 if [[ "$disk" == "$boot_from" ]]; then
722 cmd[-1]+=,bootindex=0
723 boot_disk_present=true
724 fi
725 ;;
726 spdk_vhost_blk)
727 notice "using socket $vhost_dir/naa.$disk.$vm_num"
728 cmd+=(-chardev "socket,id=char_$disk,path=$vhost_dir/naa.$disk.$vm_num")
729 cmd+=(-device "vhost-user-blk-pci,num-queues=$queue_number,chardev=char_$disk")
730 if [[ "$disk" == "$boot_from" ]]; then
731 cmd[-1]+=,bootindex=0
732 boot_disk_present=true
733 fi
734
735 if $packed; then
736 check_qemu_packedring_support
737 notice "Enabling packed ring support for VM $vm_num, controller $vhost_dir/naa.$disk.$vm_num"
738 cmd[-1]+=,packed=on
739 fi
740 ;;
741 kernel_vhost)
742 if [[ -z $disk ]]; then
743 error "need WWN for $disk_type"
744 return 1
745 elif [[ ! $disk =~ ^[[:alpha:]]{3}[.][[:xdigit:]]+$ ]]; then
746 error "$disk_type - disk(wnn)=$disk does not look like WNN number"
747 return 1
748 fi
749 notice "Using kernel vhost disk wwn=$disk"
750 cmd+=(-device "vhost-scsi-pci,wwpn=$disk,num_queues=$queue_number")
751 ;;
752 *)
753 error "unknown mode '$disk_type', use: virtio, spdk_vhost_scsi, spdk_vhost_blk or kernel_vhost"
754 return 1
755 ;;
756 esac
757 done
758
759 if [[ -n $boot_from ]] && [[ $boot_disk_present == false ]]; then
760 error "Boot from $boot_from is selected but device is not present"
761 return 1
762 fi
763
764 ((${#qemu_args[@]})) && cmd+=("${qemu_args[@]}")
765 notice "Saving to $vm_dir/run.sh"
766 cat <<- RUN > "$vm_dir/run.sh"
767 #!/bin/bash
768 qemu_log () {
769 echo "=== qemu.log ==="
770 [[ -s $vm_dir/qemu.log ]] && cat $vm_dir/qemu.log
771 echo "=== qemu.log ==="
772 }
773
774 if [[ \$EUID -ne 0 ]]; then
775 echo "Go away user come back as root"
776 exit 1
777 fi
778
779 trap "qemu_log" EXIT
780
781 qemu_cmd=($(printf '%s\n' "${cmd[@]}"))
782 chmod +r $vm_dir/*
783 echo "Running VM in $vm_dir"
784 rm -f $qemu_pid_file
785 "\${qemu_cmd[@]}"
786
787 echo "Waiting for QEMU pid file"
788 sleep 1
789 [[ ! -f $qemu_pid_file ]] && sleep 1
790 [[ ! -f $qemu_pid_file ]] && echo "ERROR: no qemu pid file found" && exit 1
791 exit 0
792 # EOF
793 RUN
794 chmod +x $vm_dir/run.sh
795
796 # Save generated sockets redirection
797 echo $ssh_socket > $vm_dir/ssh_socket
798 echo $fio_socket > $vm_dir/fio_socket
799 echo $monitor_port > $vm_dir/monitor_port
800
801 rm -f $vm_dir/migration_port
802 [[ -z $vm_incoming ]] || echo $migration_port > $vm_dir/migration_port
803
804 echo $gdbserver_socket > $vm_dir/gdbserver_socket
805 echo $vnc_socket >> $vm_dir/vnc_socket
806
807 [[ -z $vm_incoming ]] || ln -fs $VM_DIR/$vm_incoming $vm_dir/vm_incoming
808 [[ -z $vm_migrate_to ]] || ln -fs $VM_DIR/$vm_migrate_to $vm_dir/vm_migrate_to
809 }
810
811 function vm_run() {
812 local OPTIND optchar vm
813 local run_all=false
814 local vms_to_run=""
815
816 while getopts 'a-:' optchar; do
817 case "$optchar" in
818 a) run_all=true ;;
819 *)
820 error "Unknown param $OPTARG"
821 return 1
822 ;;
823 esac
824 done
825
826 if $run_all; then
827 vms_to_run="$(vm_list_all)"
828 else
829 shift $((OPTIND - 1))
830 for vm in "$@"; do
831 vm_num_is_valid $1 || return 1
832 if [[ ! -x $VM_DIR/$vm/run.sh ]]; then
833 error "VM$vm not defined - setup it first"
834 return 1
835 fi
836 vms_to_run+=" $vm"
837 done
838 fi
839
840 for vm in $vms_to_run; do
841 if vm_is_running $vm; then
842 warning "VM$vm ($VM_DIR/$vm) already running"
843 continue
844 fi
845
846 notice "running $VM_DIR/$vm/run.sh"
847 if ! $VM_DIR/$vm/run.sh; then
848 error "FAILED to run vm $vm"
849 return 1
850 fi
851 done
852 }
853
854 function vm_print_logs() {
855 vm_num=$1
856 warning "================"
857 warning "QEMU LOG:"
858 if [[ -r $VM_DIR/$vm_num/qemu.log ]]; then
859 cat $VM_DIR/$vm_num/qemu.log
860 else
861 warning "LOG qemu.log not found"
862 fi
863
864 warning "VM LOG:"
865 if [[ -r $VM_DIR/$vm_num/serial.log ]]; then
866 cat $VM_DIR/$vm_num/serial.log
867 else
868 warning "LOG serial.log not found"
869 fi
870
871 warning "SEABIOS LOG:"
872 if [[ -r $VM_DIR/$vm_num/seabios.log ]]; then
873 cat $VM_DIR/$vm_num/seabios.log
874 else
875 warning "LOG seabios.log not found"
876 fi
877 warning "================"
878 }
879
880 # Wait for all created VMs to boot.
881 # param $1 max wait time
882 function vm_wait_for_boot() {
883 assert_number $1
884
885 xtrace_disable
886
887 local all_booted=false
888 local timeout_time=$1
889 [[ $timeout_time -lt 10 ]] && timeout_time=10
890 local timeout_time
891 timeout_time=$(date -d "+$timeout_time seconds" +%s)
892
893 notice "Waiting for VMs to boot"
894 shift
895 if [[ "$*" == "" ]]; then
896 local vms_to_check="$VM_DIR/[0-9]*"
897 else
898 local vms_to_check=""
899 for vm in "$@"; do
900 vms_to_check+=" $VM_DIR/$vm"
901 done
902 fi
903
904 for vm in $vms_to_check; do
905 local vm_num
906 vm_num=$(basename $vm)
907 local i=0
908 notice "waiting for VM$vm_num ($vm)"
909 while ! vm_os_booted $vm_num; do
910 if ! vm_is_running $vm_num; then
911 warning "VM $vm_num is not running"
912 vm_print_logs $vm_num
913 xtrace_restore
914 return 1
915 fi
916
917 if [[ $(date +%s) -gt $timeout_time ]]; then
918 warning "timeout waiting for machines to boot"
919 vm_print_logs $vm_num
920 xtrace_restore
921 return 1
922 fi
923 if ((i > 30)); then
924 local i=0
925 echo
926 fi
927 echo -n "."
928 sleep 1
929 done
930 echo ""
931 notice "VM$vm_num ready"
932 #Change Timeout for stopping services to prevent lengthy powerdowns
933 #Check that remote system is not Cygwin in case of Windows VMs
934 local vm_os
935 vm_os=$(vm_exec $vm_num "uname -o")
936 if [[ "$vm_os" != "Cygwin" ]]; then
937 vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
938 fi
939 done
940
941 notice "all VMs ready"
942 xtrace_restore
943 return 0
944 }
945
946 function vm_start_fio_server() {
947 local OPTIND optchar
948 local readonly=''
949 local fio_bin=''
950 while getopts ':-:' optchar; do
951 case "$optchar" in
952 -)
953 case "$OPTARG" in
954 fio-bin=*) local fio_bin="${OPTARG#*=}" ;;
955 readonly) local readonly="--readonly" ;;
956 *) error "Invalid argument '$OPTARG'" && return 1 ;;
957 esac
958 ;;
959 *) error "Invalid argument '$OPTARG'" && return 1 ;;
960 esac
961 done
962
963 shift $((OPTIND - 1))
964 for vm_num in "$@"; do
965 notice "Starting fio server on VM$vm_num"
966 if [[ $fio_bin != "" ]]; then
967 vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
968 vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
969 else
970 vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
971 fi
972 done
973 }
974
975 function vm_check_scsi_location() {
976 # Script to find wanted disc
977 local script='shopt -s nullglob;
978 for entry in /sys/block/sd*; do
979 disk_type="$(cat $entry/device/vendor)";
980 if [[ $disk_type == INTEL* ]] || [[ $disk_type == RAWSCSI* ]] || [[ $disk_type == LIO-ORG* ]]; then
981 fname=$(basename $entry);
982 echo -n " $fname";
983 fi;
984 done'
985
986 SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
987
988 if [[ -z "$SCSI_DISK" ]]; then
989 error "no test disk found!"
990 return 1
991 fi
992 }
993
994 # Note: to use this function your VM should be run with
995 # appropriate memory and with SPDK source already cloned
996 # and compiled in /root/spdk.
997 function vm_check_virtio_location() {
998 vm_exec $1 NRHUGE=512 /root/spdk/scripts/setup.sh
999 vm_exec $1 "cat > /root/bdev.conf" <<- EOF
1000 [VirtioPci]
1001 Enable Yes
1002 EOF
1003
1004 vm_exec $1 "cat /root/bdev.conf"
1005
1006 vm_exec $1 "bash -s" <<- EOF
1007 set -e
1008 rootdir="/root/spdk"
1009 source /root/spdk/test/common/autotest_common.sh
1010 discover_bdevs /root/spdk /root/bdev.conf | jq -r '[.[].name] | join(" ")' > /root/fio_bdev_filenames
1011 exit 0
1012 EOF
1013
1014 SCSI_DISK=$(vm_exec $1 cat /root/fio_bdev_filenames)
1015 if [[ -z "$SCSI_DISK" ]]; then
1016 error "no virtio test disk found!"
1017 return 1
1018 fi
1019 }
1020
1021 # Script to perform scsi device reset on all disks in VM
1022 # param $1 VM num
1023 # param $2..$n Disks to perform reset on
1024 function vm_reset_scsi_devices() {
1025 for disk in "${@:2}"; do
1026 notice "VM$1 Performing device reset on disk $disk"
1027 vm_exec $1 sg_reset /dev/$disk -vNd
1028 done
1029 }
1030
1031 function vm_check_blk_location() {
1032 local script='shopt -s nullglob; cd /sys/block; echo vd*'
1033 SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
1034
1035 if [[ -z "$SCSI_DISK" ]]; then
1036 error "no blk test disk found!"
1037 return 1
1038 fi
1039 }
1040
1041 function run_fio() {
1042 local arg
1043 local job_file=""
1044 local fio_bin=""
1045 local vms=()
1046 local out=""
1047 local vm
1048 local run_server_mode=true
1049 local run_plugin_mode=false
1050 local fio_start_cmd
1051 local fio_output_format="normal"
1052 local fio_gtod_reduce=false
1053 local wait_for_fio=true
1054
1055 for arg in "$@"; do
1056 case "$arg" in
1057 --job-file=*) local job_file="${arg#*=}" ;;
1058 --fio-bin=*) local fio_bin="${arg#*=}" ;;
1059 --vm=*) vms+=("${arg#*=}") ;;
1060 --out=*)
1061 local out="${arg#*=}"
1062 mkdir -p $out
1063 ;;
1064 --local) run_server_mode=false ;;
1065 --plugin)
1066 notice "Using plugin mode. Disabling server mode."
1067 run_plugin_mode=true
1068 run_server_mode=false
1069 ;;
1070 --json) fio_output_format="json" ;;
1071 --hide-results) hide_results=true ;;
1072 --no-wait-for-fio) wait_for_fio=false ;;
1073 --gtod-reduce) fio_gtod_reduce=true ;;
1074 *)
1075 error "Invalid argument '$arg'"
1076 return 1
1077 ;;
1078 esac
1079 done
1080
1081 if [[ -n "$fio_bin" && ! -r "$fio_bin" ]]; then
1082 error "FIO binary '$fio_bin' does not exist"
1083 return 1
1084 fi
1085
1086 if [[ -z "$fio_bin" ]]; then
1087 fio_bin="fio"
1088 fi
1089
1090 if [[ ! -r "$job_file" ]]; then
1091 error "Fio job '$job_file' does not exist"
1092 return 1
1093 fi
1094
1095 fio_start_cmd="$fio_bin --eta=never "
1096
1097 local job_fname
1098 job_fname=$(basename "$job_file")
1099 log_fname="${job_fname%%.*}.log"
1100 fio_start_cmd+=" --output=$out/$log_fname --output-format=$fio_output_format "
1101
1102 # prepare job file for each VM
1103 for vm in "${vms[@]}"; do
1104 local vm_num=${vm%%:*}
1105 local vmdisks=${vm#*:}
1106
1107 sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
1108
1109 if $fio_gtod_reduce; then
1110 vm_exec $vm_num "echo 'gtod_reduce=1' >> /root/$job_fname"
1111 fi
1112
1113 vm_exec $vm_num cat /root/$job_fname
1114
1115 if $run_server_mode; then
1116 fio_start_cmd+="--client=127.0.0.1,$(vm_fio_socket $vm_num) --remote-config /root/$job_fname "
1117 fi
1118
1119 if ! $run_server_mode; then
1120 if [[ -n "$fio_bin" ]]; then
1121 if ! $run_plugin_mode; then
1122 vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio' < $fio_bin
1123 vm_fio_bin="/root/fio"
1124 else
1125 vm_fio_bin="/usr/src/fio/fio"
1126 fi
1127 fi
1128
1129 notice "Running local fio on VM $vm_num"
1130 vm_exec $vm_num "$vm_fio_bin --output=/root/$log_fname --output-format=$fio_output_format /root/$job_fname & echo \$! > /root/fio.pid" &
1131 vm_exec_pids+=("$!")
1132 fi
1133 done
1134
1135 if ! $run_server_mode; then
1136 if ! $wait_for_fio; then
1137 return 0
1138 fi
1139 echo "Waiting for guest fio instances to finish.."
1140 wait "${vm_exec_pids[@]}"
1141
1142 for vm in "${vms[@]}"; do
1143 local vm_num=${vm%%:*}
1144 vm_exec $vm_num cat /root/$log_fname > "$out/vm${vm_num}_${log_fname}"
1145 done
1146 return 0
1147 fi
1148
1149 $fio_start_cmd
1150 sleep 1
1151
1152 if [[ "$fio_output_format" == "json" ]]; then
1153 # Fio in client-server mode produces a lot of "trash" output
1154 # preceding JSON structure, making it not possible to parse.
1155 # Remove these lines from file.
1156 # shellcheck disable=SC2005
1157 echo "$(grep -vP '^[<\w]' "$out/$log_fname")" > "$out/$log_fname"
1158 fi
1159
1160 if [[ ! $hide_results ]]; then
1161 cat $out/$log_fname
1162 fi
1163 }
1164
1165 # Parsing fio results for json output and client-server mode only!
1166 function parse_fio_results() {
1167 local fio_log_dir=$1
1168 local fio_log_filename=$2
1169 local fio_csv_filename
1170
1171 # Variables used in parsing loop
1172 local log_file
1173 local rwmode mixread mixwrite
1174 local lat_key lat_divisor
1175 local client_stats iops bw
1176 local read_avg_lat read_min_lat read_max_lat
1177 local write_avg_lat write_min_lat write_min_lat
1178
1179 declare -A results
1180 results["iops"]=0
1181 results["bw"]=0
1182 results["avg_lat"]=0
1183 results["min_lat"]=0
1184 results["max_lat"]=0
1185
1186 # Loop using the log filename to see if there are any other
1187 # matching files. This is in case we ran fio test multiple times.
1188 log_files=("$fio_log_dir/$fio_log_filename"*)
1189 for log_file in "${log_files[@]}"; do
1190 rwmode=$(jq -r '.["client_stats"][0]["job options"]["rw"]' "$log_file")
1191 mixread=1
1192 mixwrite=1
1193 if [[ $rwmode = *"rw"* ]]; then
1194 mixread=$(jq -r '.["client_stats"][0]["job options"]["rwmixread"]' "$log_file")
1195 mixread=$(bc -l <<< "scale=3; $mixread/100")
1196 mixwrite=$(bc -l <<< "scale=3; 1-$mixread")
1197 fi
1198
1199 client_stats=$(jq -r '.["client_stats"][] | select(.jobname == "All clients")' "$log_file")
1200
1201 # Check latency unit and later normalize to microseconds
1202 lat_key="lat_us"
1203 lat_divisor=1
1204 if jq -er '.read["lat_ns"]' &> /dev/null <<< $client_stats; then
1205 lat_key="lat_ns"
1206 lat_divisor=1000
1207 fi
1208
1209 # Horrific bash float point arithmetic oprations below.
1210 # Viewer discretion is advised.
1211 iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
1212 bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)
1213 read_avg_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["mean"]' <<< $client_stats)
1214 read_min_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["min"]' <<< $client_stats)
1215 read_max_lat=$(jq -r --arg lat_key $lat_key '.read[$lat_key]["max"]' <<< $client_stats)
1216 write_avg_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["mean"]' <<< $client_stats)
1217 write_min_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["min"]' <<< $client_stats)
1218 write_max_lat=$(jq -r --arg lat_key $lat_key '.write[$lat_key]["max"]' <<< $client_stats)
1219
1220 results["iops"]=$(bc -l <<< "${results[iops]} + $iops")
1221 results["bw"]=$(bc -l <<< "${results[bw]} + $bw")
1222 results["avg_lat"]=$(bc -l <<< "${results[avg_lat]} + ($mixread*$read_avg_lat + $mixwrite*$write_avg_lat)/$lat_divisor")
1223 results["min_lat"]=$(bc -l <<< "${results[min_lat]} + ($mixread*$read_min_lat + $mixwrite*$write_min_lat)/$lat_divisor")
1224 results["max_lat"]=$(bc -l <<< "${results[max_lat]} + ($mixread*$read_max_lat + $mixwrite*$write_max_lat)/$lat_divisor")
1225 done
1226
1227 results["iops"]=$(bc -l <<< "scale=3; ${results[iops]} / ${#log_files[@]}")
1228 results["bw"]=$(bc -l <<< "scale=3; ${results[bw]} / ${#log_files[@]}")
1229 results["avg_lat"]=$(bc -l <<< "scale=3; ${results[avg_lat]} / ${#log_files[@]}")
1230 results["min_lat"]=$(bc -l <<< "scale=3; ${results[min_lat]} / ${#log_files[@]}")
1231 results["max_lat"]=$(bc -l <<< "scale=3; ${results[max_lat]} / ${#log_files[@]}")
1232
1233 fio_csv_filename="${fio_log_filename%%.*}.csv"
1234 cat <<- EOF > "$fio_log_dir/$fio_csv_filename"
1235 iops,bw,avg_lat,min_lat,max_lat
1236 ${results["iops"]},${results["bw"]},${results["avg_lat"]},${results["min_lat"]},${results["max_lat"]}
1237 EOF
1238 }
1239
1240 # Shutdown or kill any running VM and SPDK APP.
1241 #
1242 function at_app_exit() {
1243 local vhost_name
1244
1245 notice "APP EXITING"
1246 notice "killing all VMs"
1247 vm_kill_all
1248 # Kill vhost application
1249 notice "killing vhost app"
1250
1251 for vhost_name in "$TARGET_DIR"/*; do
1252 vhost_kill $vhost_name
1253 done
1254
1255 notice "EXIT DONE"
1256 }
1257
1258 function error_exit() {
1259 trap - ERR
1260 print_backtrace
1261 set +e
1262 error "Error on $1 $2"
1263
1264 at_app_exit
1265 exit 1
1266 }