]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/scripts/perf/vhost/run_vhost_test.py
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / scripts / perf / vhost / run_vhost_test.py
CommitLineData
11fdf7f2
TL
1import os
2import sys
3import argparse
4import multiprocessing
5import subprocess
6from subprocess import check_call, call, check_output, Popen, PIPE
7
8
9def range_incl(a, b):
10 return list(range(a, b + 1))
11
12
13def list_spdk_used_cpus(cpus):
14 cpu_list = []
15 for chunk in cpus.split(","):
16 if "-" in chunk:
17 _ = chunk.split("-")
18 _ = list(map(int, _))
19 cpu_list.extend(list(range_incl(*_)))
20 else:
21 cpu_list.append(int(chunk))
22 return cpu_list
23
24
25def gen_cpu_mask_config(output_dir, spdk_cpu_list, vm_count, vm_cpu_num):
26 spdk = gen_spdk_cpu_mask_config(spdk_cpu_list)
27 qemu = gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num)
28 file_path = os.path.join(output_dir, "mask_config")
29 with open(file_path, "w") as fh:
30 fh.write("".join([spdk, qemu]))
31 return file_path
32
33
34def gen_spdk_cpu_mask_config(spdk_cpu_list):
35 cpus = "vhost_0_reactor_mask=[%s]" % (spdk_cpu_list)
36
37 # Go through assigned CPUs and use the lowest CPU index as
38 # default primary core
39 cpu_indexes = list_spdk_used_cpus(spdk_cpu_list)
40 cpu_indexes.sort()
41 print(cpu_indexes)
42
43 pr_core = "vhost_0_master_core=%s" % (cpu_indexes[0])
44 return "\n".join([cpus, pr_core, "\n"])
45
46
47def get_host_cpus():
48 cpu_num = multiprocessing.cpu_count()
49 cpu_list = list(range(0, cpu_num))
50 output = check_output("lscpu | grep 'per core'", shell=True)
51
52 # Assuming 2-socket server
53 if "2" in str(output):
54 ht_enabled = True
55 cpu_chunk = int(cpu_num/4)
56 numa0_cpus = cpu_list[0:cpu_chunk]
57 numa0_cpus.extend(cpu_list[2*cpu_chunk:3*cpu_chunk])
58 numa1_cpus = cpu_list[cpu_chunk:2*cpu_chunk]
59 numa1_cpus.extend(cpu_list[3*cpu_chunk:4*cpu_chunk])
60 else:
61 ht_enabled = False
62 cpu_chunk = int(cpu_num/2)
63 numa0_cpus = cpu_list[:cpu_chunk]
64 numa1_cpus = cpu_list[cpu_chunk:]
65 return [numa0_cpus, numa1_cpus]
66
67
68def gen_qemu_cpu_mask_config(spdk_cpu_list, vm_count, vm_cpu_num):
69 print("Creating masks for QEMU")
70 ret = ""
71
72 # Exclude SPDK cores from available CPU list
73 numa0_cpus, numa1_cpus = get_host_cpus()
74 spdk_cpus = list_spdk_used_cpus(spdk_cpu_list)
75 spdk_cpus.sort()
76
77 numa0_cpus = sorted(list(set(numa0_cpus) - set(spdk_cpus)))
78 numa1_cpus = sorted(list(set(numa1_cpus) - set(spdk_cpus)))
79
80 # Generate qemu cpu mask and numa param for VMs out of
81 # remaining free CPU cores.
82 # All CPUs assigned to a VM will come from the same NUMA node.
83 # Assuming 2 socket server.
84 used_numa = 0
85 available = numa0_cpus
86 for i in range(0, vm_count):
87 cpus = [str(x) for x in available[0:vm_cpu_num]]
88
89 # If there is not enough cores on first numa node for a VM
90 # then switch to next numa node
91 if len(cpus) < vm_cpu_num and used_numa == 0:
92 available = numa1_cpus
93 used_numa = 1
94 cpus = [str(x) for x in available[0:vm_cpu_num]]
95
96 # If not enough cores on second numa node - break and exit
97 if len(cpus) < vm_cpu_num and used_numa == 1:
98 print("There is not enough CPU Cores available on \
99 Numa node1 to create VM %s" % i)
100 break
101
102 cpus = ",".join(cpus)
103 cpus = "VM_%s_qemu_mask=%s" % (i, cpus)
104 numa = "VM_%s_qemu_numa_node=%s\n" % (i, used_numa)
105
106 # Remove used CPU cores from available list
107 available = available[vm_cpu_num:]
108 ret = "\n".join([ret, cpus, numa])
109
110 return ret
111
112
113def create_fio_cfg(template_dir, output_dir, **kwargs):
114 fio_tempalte = os.path.join(template_dir, "fio_test.conf")
115 with open("scripts/perf/vhost/fio_test.conf", "r") as fh:
116 cfg = fh.read()
117 cfg = cfg.format(**kwargs)
118
119 file_path = os.path.join(output_dir, "fio_job.cfg")
120 with open(file_path, "w") as fh:
121 fh.write(cfg)
122 return file_path
123
124
125script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
126parser = argparse.ArgumentParser()
127
128parser.add_argument('blksize', default="4k", type=str,
129 help="Block size param for FIO. Default: 4k")
130parser.add_argument('iodepth', default="128", type=str,
131 help="Iodepth param for FIO. Default: 128")
132parser.add_argument('rw', default="randread", type=str,
133 help="RW param for FIO. Default: randread")
134parser.add_argument('-m', '--rwmixread', default="70", type=str,
135 help="Percentage of reads in read-write mode. Default: 70")
9f95a23c
TL
136parser.add_argument('-n', '--numjobs', default="1", type=str,
137 help="Numjobs value for FIO job. Default: 1")
11fdf7f2
TL
138parser.add_argument('-r', '--runtime', default="10", type=str,
139 help="Run time param for FIO (in seconds). Default: 10")
140parser.add_argument('-R', '--ramptime', default="10", type=str,
141 help="Ramp time param for FIO (in seconds). Default: 10")
142parser.add_argument('-c', '--ctrl-type', default="spdk_vhost_scsi", type=str,
143 help="Type of vhost controller to use in test.\
9f95a23c 144 Possible options: spdk_vhost_scsi, spdk_vhost_blk\
11fdf7f2
TL
145 Default: spdk_vhost_scsi")
146parser.add_argument('-s', '--split', default=False, type=bool,
147 help="Use split vbdevs instead of logical volumes. Default: false")
148parser.add_argument('-d', '--max-disks', default=0, type=int,
149 help="How many physical disks to use in test. Default: all disks.\
150 Depending on the number of --vm-count disks may be split into\
151 smaller logical bdevs (splits or logical volumes) so that\
152 each virtual machine gets it's own bdev to work on.")
153parser.add_argument('-v', '--vm-count', default=1, type=int,
154 help="How many VMs to run in test. Default: 1")
155parser.add_argument('-i', '--vm-image', default="/home/sys_sgsw/vhost_vm_image.qcow2",
156 type=str, help="VM image to use for running VMs.")
157
158subparsers = parser.add_subparsers()
159cpu_cfg_create = subparsers.add_parser('create_cpu_cfg',
160 help="Generate a CPU config file for test.\
161 This option will attempt to automatically\
162 generate config file with SPDK/QEMU cpu lists.\
163 CPU cores on NUMA Node 0 will be used first\
164 (including logical cores when HT is enabled)\
165 and NUMA Node 1 will be used last.")
166cpu_cfg_create.add_argument('spdk_cpu_list', default=None,
167 help="List of CPU cores to be used by SPDK vhost app.\
168 Accepted format examples:\
169 single cpus: 0,2,4\
170 ranges (inclusive!): 0-2\
171 mixed: 0,2-5,9")
172cpu_cfg_create.add_argument('vm_cpu_num', default=None, type=int)
173
174cpu_cfg_load = subparsers.add_parser('load_cpu_cfg',
175 help="Load and use a CPU config file for test\
176 Example configuration files can be found in:\
177 test/vhost/common/autotest.config")
178cpu_cfg_load.add_argument('custom_mask_file', default=None,
179 help="Path to file with custom values for vhost's\
180 reactor mask and master core, and each VM's qemu mask\
181 and qemu numa node")
182
183args = parser.parse_args()
184fio_cfg_path = create_fio_cfg(script_dir, script_dir, **vars(args))
185
186cpu_cfg_arg = ""
187disk_arg = ""
188split_arg = ""
189if "spdk_cpu_list" in args:
190 cfg_path = gen_cpu_mask_config(script_dir, args.spdk_cpu_list, args.vm_count, args.vm_cpu_num)
191 cpu_cfg_arg = "--custom-cpu-cfg=%s" % cfg_path
192if "custom_mask_file" in args:
193 cpu_cfg_arg = "--custom-cpu-cfg=%s" % args.custom_mask_file
194if args.split is True:
195 split_arg = "--use-split"
196if args.max_disks > 0:
197 disk_arg = "--max-disks=%s" % args.max_disks
198
199
200command = " ".join(["test/vhost/perf_bench/vhost_perf.sh",
201 "--vm-image=%s" % args.vm_image,
202 "--vm-count=%s" % args.vm_count,
203 "--ctrl-type=%s" % args.ctrl_type,
204 "%s" % split_arg,
205 "%s" % disk_arg,
206 "--fio-job=%s" % fio_cfg_path,
207 "%s" % cpu_cfg_arg])
9f95a23c
TL
208# TODO: Disabled for now.
209# Reason: initially this script was supposed to be a wrapper for .sh script and would
210# - generate FIO config
211# - generate SPDK/QEMU CPU mask configuration file
212# - run test script
213# Auto-generating CPU masks configuration needs some more work to be done
214# and increasing number of params makes .py script hard to use.
215# Will cleanup here soon.
216
217# print("INFO: Running perf test with command:")
218# print(command)
219# pr = check_output(command, shell=True)