]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/test/vhost/perf_bench/vhost_perf.sh
update download target update for octopus release
[ceph.git] / ceph / src / spdk / test / vhost / perf_bench / vhost_perf.sh
CommitLineData
11fdf7f2
TL
1#!/usr/bin/env bash
2set -e
3
4vm_count=1
5vm_memory=2048
6vm_image="/home/sys_sgsw/vhost_vm_image.qcow2"
7max_disks=""
8ctrl_type="spdk_vhost_scsi"
9use_split=false
10throttle=false
11
12lvol_stores=()
13lvol_bdevs=()
14used_vms=""
15
16fio_bin="--fio-bin=/home/sys_sgsw/fio_ubuntu"
17
18function usage()
19{
20 [[ ! -z $2 ]] && ( echo "$2"; echo ""; )
21 echo "Shortcut script for doing automated test"
22 echo "Usage: $(basename $1) [OPTIONS]"
23 echo
24 echo "-h, --help Print help and exit"
25 echo " --fio-bin=PATH Path to FIO binary on host.;"
26 echo " Binary will be copied to VM, static compilation"
27 echo " of binary is recommended."
28 echo " --fio-job=PATH Fio config to use for test."
29 echo " --vm-count=INT Total number of virtual machines to launch in this test;"
30 echo " Each VM will get one bdev (lvol or split vbdev)"
31 echo " to run FIO test."
32 echo " Default: 1"
33 echo " --vm-memory=INT Amount of RAM memory (in MB) to pass to a single VM."
34 echo " Default: 2048 MB"
35 echo " --vm-image=PATH OS image to use for running the VMs."
36 echo " Default: /home/sys_sgsw/vhost_vm_image.qcow2"
37 echo " --max-disks=INT Maximum number of NVMe drives to use in test."
38 echo " Default: will use all available NVMes."
39 echo " --ctrl-type=TYPE Controller type to use for test:"
40 echo " spdk_vhost_scsi - use spdk vhost scsi"
41 echo " spdk_vhost_blk - use spdk vhost block"
42 echo " Default: spdk_vhost_scsi"
43 echo " --use-split Use split vbdevs instead of Logical Volumes"
44 echo " --throttle=INT I/Os throttle rate in IOPS for each device on the VMs."
45 echo " --custom-cpu-cfg=PATH Custom CPU config for test."
46 echo " Default: spdk/test/vhost/common/autotest.config"
47 echo "-x set -x for script debug"
48 exit 0
49}
50
51function cleanup_lvol_cfg()
52{
53 notice "Removing lvol bdevs"
54 for lvol_bdev in "${lvol_bdevs[@]}"; do
55 $rpc_py destroy_lvol_bdev $lvol_bdev
56 notice "lvol bdev $lvol_bdev removed"
57 done
58
59 notice "Removing lvol stores"
60 for lvol_store in "${lvol_stores[@]}"; do
61 $rpc_py destroy_lvol_store -u $lvol_store
62 notice "lvol store $lvol_store removed"
63 done
64}
65
66function cleanup_split_cfg()
67{
68 notice "Removing split vbdevs"
69 for (( i=0; i<$max_disks; i++ ));do
70 $rpc_py destruct_split_vbdev Nvme${i}n1
71 done
72}
73
74while getopts 'xh-:' optchar; do
75 case "$optchar" in
76 -)
77 case "$OPTARG" in
78 help) usage $0 ;;
79 fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
80 fio-job=*) fio_job="${OPTARG#*=}" ;;
81 vm-count=*) vm_count="${OPTARG#*=}" ;;
82 vm-memory=*) vm_memory="${OPTARG#*=}" ;;
83 vm-image=*) vm_image="${OPTARG#*=}" ;;
84 max-disks=*) max_disks="${OPTARG#*=}" ;;
85 ctrl-type=*) ctrl_type="${OPTARG#*=}" ;;
86 use-split) use_split=true ;;
87 throttle) throttle=true ;;
88 custom-cpu-cfg=*) custom_cpu_cfg="${OPTARG#*=}" ;;
89 thin-provisioning) thin=" -t " ;;
90 multi-os) multi_os=true ;;
91 *) usage $0 "Invalid argument '$OPTARG'" ;;
92 esac
93 ;;
94 h) usage $0 ;;
95 x) set -x
96 x="-x" ;;
97 *) usage $0 "Invalid argument '$OPTARG'"
98 esac
99done
100
101. $(readlink -e "$(dirname $0)/../common/common.sh") || exit 1
102. $(readlink -e "$(dirname $0)/../../../scripts/common.sh") || exit 1
103COMMON_DIR="$(cd $(readlink -f $(dirname $0))/../common && pwd)"
104rpc_py="$SPDK_BUILD_DIR/scripts/rpc.py -s $(get_vhost_dir)/rpc.sock"
105
106if [[ -n $custom_cpu_cfg ]]; then
107 source $custom_cpu_cfg
108fi
109
110if [[ -z $fio_job ]]; then
111 warning "No FIO job specified! Will use default from common directory."
112 fio_job="$COMMON_DIR/fio_jobs/default_integrity.job"
113fi
114
115trap 'error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
116notice "Get NVMe disks:"
117nvmes=($(iter_pci_class_code 01 08 02))
118
119if [[ -z $max_disks ]]; then
120 max_disks=${#nvmes[@]}
121fi
122
123if [[ ${#nvmes[@]} -lt max_disks ]]; then
124 fail "Number of NVMe drives (${#nvmes[@]}) is lower than number of requested disks for test ($max_disks)"
125fi
126
127notice "running SPDK vhost"
128spdk_vhost_run
129notice "..."
130
131# Calculate number of needed splits per NVMe
132# so that each VM gets it's own bdev during test
133splits=()
134
135#Calculate least minimum number of splits on each disks
136for i in `seq 0 $((max_disks - 1))`; do
137 splits+=( $((vm_count / max_disks)) )
138done
139
140# Split up the remainder
141for i in `seq 0 $((vm_count % max_disks - 1))`; do
142 (( splits[i]++ ))
143done
144
145notice "Preparing NVMe setup..."
146notice "Using $max_disks physical NVMe drives"
147notice "Nvme split list: ${splits[@]}"
148# Prepare NVMes - Lvols or Splits
149if [[ $use_split == true ]]; then
150 notice "Using split vbdevs"
151 trap 'cleanup_split_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
152 split_bdevs=()
153 for (( i=0; i<$max_disks; i++ ));do
154 out=$($rpc_py construct_split_vbdev Nvme${i}n1 ${splits[$i]})
155 for s in $out; do
156 split_bdevs+=("$s")
157 done
158 done
159 bdevs=("${split_bdevs[@]}")
160else
161 notice "Using logical volumes"
162 trap 'cleanup_lvol_cfg; error_exit "${FUNCNAME}" "${LINENO}"' INT ERR
163 for (( i=0; i<$max_disks; i++ ));do
164 ls_guid=$($rpc_py construct_lvol_store Nvme${i}n1 lvs_$i)
165 lvol_stores+=("$ls_guid")
166 for (( j=0; j<${splits[$i]}; j++)); do
167 free_mb=$(get_lvs_free_mb "$ls_guid")
168 size=$((free_mb / (${splits[$i]}-j) ))
169 lb_name=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_$j $size)
170 lvol_bdevs+=("$lb_name")
171 done
172 done
173 bdevs=("${lvol_bdevs[@]}")
174fi
175
176# Prepare VMs and controllers
177for (( i=0; i<$vm_count; i++)); do
178 vm="vm_$i"
179
180 setup_cmd="vm_setup --disk-type=$ctrl_type --force=$i"
181 setup_cmd+=" --os=$vm_image"
182
183 if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
184 $rpc_py construct_vhost_scsi_controller naa.0.$i
185 $rpc_py add_vhost_scsi_lun naa.0.$i 0 ${bdevs[$i]}
186 setup_cmd+=" --disks=0"
187 elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
188 $rpc_py construct_vhost_blk_controller naa.$i.$i ${bdevs[$i]}
189 setup_cmd+=" --disks=$i"
190 fi
191 $setup_cmd
192 used_vms+=" $i"
193done
194
195# Start VMs
196# Run VMs
197vm_run $used_vms
198vm_wait_for_boot 300 $used_vms
199
200# Run FIO
201fio_disks=""
202for vm_num in $used_vms; do
203 vm_dir=$VM_BASE_DIR/$vm_num
204 host_name="VM-$vm_num"
205 vm_ssh $vm_num "hostname $host_name"
206 vm_start_fio_server $fio_bin $vm_num
207
208 if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
209 vm_check_scsi_location $vm_num
210 elif [[ "$ctrl_type" == "spdk_vhost_blk" ]]; then
211 vm_check_blk_location $vm_num
212 fi
213
214 fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
215done
216
217# Run FIO traffic
218run_fio $fio_bin --job-file="$fio_job" --out="$TEST_DIR/fio_results" --json $fio_disks
219
220notice "Shutting down virtual machines..."
221vm_shutdown_all
222
223#notice "Shutting down SPDK vhost app..."
224if [[ $use_split == true ]]; then
225 cleanup_split_cfg
226else
227 cleanup_lvol_cfg
228fi
229spdk_vhost_kill