]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/scripts/perf/nvme/run_fio_test.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / scripts / perf / nvme / run_fio_test.py
1 #!/usr/bin/env python3
2
3 # This script runs fio benchmark test on the local nvme device using the SPDK NVMe driver.
4 # Prework: Run script/setup.sh to bind SSDs to SPDK driver.
5 # Prework: Change any fio configurations in the template fio config file fio_test.conf
6 # Output: A csv file <hostname>_<num ssds>_perf_output.csv
7
8 import subprocess
9 from subprocess import check_call, call, check_output, Popen, PIPE
10 import random
11 import os
12 import sys
13 import re
14 import signal
15 import getopt
16 from datetime import datetime
17 from itertools import *
18 import csv
19 import itertools
20 from shutil import copyfile
21 import json
22
23 # Populate test parameters into these lists to run different workloads
24 # The configuration below runs QD 1 & 128. To add QD 32 set q_depth=['1', '32', '128']
25 q_depth = ['1', '128']
26 # io_size specifies the size in bytes of the IO workload.
27 # To add 64K IOs set io_size = ['4096', '65536']
28 io_size = ['4096']
29 workload_type = ['randrw']
30 mix = ['100']
31 core_mask = ['0x1']
32 # run_time parameter specifies how long to run each test.
33 # Set run_time = ['600'] to run the test for 10 minutes
34 run_time = ['60']
35 # iter_num parameter is used to run the test multiple times.
36 # set iter_num = ['1', '2', '3'] to repeat each test 3 times
37 iter_num = ['1']
38
39
40 def run_fio(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
41 print("Running Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
42 string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
43
44 # Call fio
45 path_to_fio_conf = config_file_for_test
46 path_to_ioengine = sys.argv[2]
47 command = "BLK_SIZE=" + str(io_size_bytes) + " RW=" + str(workload) + " MIX=" + str(rw_mix) \
48 + " IODEPTH=" + str(qd) + " RUNTIME=" + str(run_time_sec) + " IOENGINE=" + path_to_ioengine \
49 + " fio " + str(path_to_fio_conf) + " -output=" + string + " -output-format=json"
50 output = subprocess.check_output(command, shell=True)
51
52 print("Finished Test: IO Size={} QD={} Mix={} CPU Mask={}".format(io_size_bytes, qd, rw_mix, cpu_mask))
53 return
54
55
56 def parse_results(io_size_bytes, qd, rw_mix, cpu_mask, run_num, workload, run_time_sec):
57 results_array = []
58
59 # If json file has results for multiple fio jobs pick the results from the right job
60 job_pos = 0
61
62 # generate the next result line that will be added to the output csv file
63 results = str(io_size_bytes) + "," + str(qd) + "," + str(rw_mix) + "," \
64 + str(workload) + "," + str(cpu_mask) + "," + str(run_time_sec) + "," + str(run_num)
65
66 # Read the results of this run from the test result file
67 string = "s_" + str(io_size_bytes) + "_q_" + str(qd) + "_m_" + str(rw_mix) + "_c_" + str(cpu_mask) + "_run_" + str(run_num)
68 with open(string) as json_file:
69 data = json.load(json_file)
70 job_name = data['jobs'][job_pos]['jobname']
71 # print "FIO job name: ", job_name
72 if 'lat_ns' in data['jobs'][job_pos]['read']:
73 lat = 'lat_ns'
74 lat_units = 'ns'
75 else:
76 lat = 'lat'
77 lat_units = 'us'
78 read_iops = float(data['jobs'][job_pos]['read']['iops'])
79 read_bw = float(data['jobs'][job_pos]['read']['bw'])
80 read_avg_lat = float(data['jobs'][job_pos]['read'][lat]['mean'])
81 read_min_lat = float(data['jobs'][job_pos]['read'][lat]['min'])
82 read_max_lat = float(data['jobs'][job_pos]['read'][lat]['max'])
83 write_iops = float(data['jobs'][job_pos]['write']['iops'])
84 write_bw = float(data['jobs'][job_pos]['write']['bw'])
85 write_avg_lat = float(data['jobs'][job_pos]['write'][lat]['mean'])
86 write_min_lat = float(data['jobs'][job_pos]['write'][lat]['min'])
87 write_max_lat = float(data['jobs'][job_pos]['write'][lat]['max'])
88 print("%-10s" % "IO Size", "%-10s" % "QD", "%-10s" % "Mix",
89 "%-10s" % "Workload Type", "%-10s" % "CPU Mask",
90 "%-10s" % "Run Time", "%-10s" % "Run Num",
91 "%-15s" % "Read IOps",
92 "%-10s" % "Read MBps", "%-15s" % "Read Avg. Lat(" + lat_units + ")",
93 "%-15s" % "Read Min. Lat(" + lat_units + ")", "%-15s" % "Read Max. Lat(" + lat_units + ")",
94 "%-15s" % "Write IOps",
95 "%-10s" % "Write MBps", "%-15s" % "Write Avg. Lat(" + lat_units + ")",
96 "%-15s" % "Write Min. Lat(" + lat_units + ")", "%-15s" % "Write Max. Lat(" + lat_units + ")")
97 print("%-10s" % io_size_bytes, "%-10s" % qd, "%-10s" % rw_mix,
98 "%-10s" % workload, "%-10s" % cpu_mask, "%-10s" % run_time_sec,
99 "%-10s" % run_num, "%-15s" % read_iops, "%-10s" % read_bw,
100 "%-15s" % read_avg_lat, "%-15s" % read_min_lat, "%-15s" % read_max_lat,
101 "%-15s" % write_iops, "%-10s" % write_bw, "%-15s" % write_avg_lat,
102 "%-15s" % write_min_lat, "%-15s" % write_max_lat)
103 results = results + "," + str(read_iops) + "," + str(read_bw) + "," \
104 + str(read_avg_lat) + "," + str(read_min_lat) + "," + str(read_max_lat) \
105 + "," + str(write_iops) + "," + str(write_bw) + "," + str(write_avg_lat) \
106 + "," + str(write_min_lat) + "," + str(write_max_lat)
107 with open(result_file_name, "a") as result_file:
108 result_file.write(results + "\n")
109 results_array = []
110 return
111
112
113 def get_nvme_devices_count():
114 output = check_output('lspci | grep -i Non | wc -l', shell=True)
115 return int(output)
116
117
118 def get_nvme_devices_bdf():
119 output = check_output('lspci | grep -i Non | awk \'{print $1}\'', shell=True).decode("utf-8")
120 output = output.split()
121 return output
122
123
124 def add_filename_to_conf(conf_file_name, bdf):
125 filestring = "filename=trtype=PCIe traddr=0000." + bdf.replace(":", ".") + " ns=1"
126 with open(conf_file_name, "a") as conf_file:
127 conf_file.write(filestring + "\n")
128
129
130 if len(sys.argv) != 4:
131 print("usage: " % sys.argv[0] % " path_to_fio_conf path_to_ioengine num_ssds")
132 sys.exit()
133
134 num_ssds = int(sys.argv[3])
135 if num_ssds > get_nvme_devices_count():
136 print("System does not have {} NVMe SSDs.".format(num_ssds))
137 sys.exit()
138
139 host_name = os.uname()[1]
140 result_file_name = host_name + "_" + sys.argv[3] + "ssds_perf_output.csv"
141
142 bdf = get_nvme_devices_bdf()
143 config_file_for_test = sys.argv[1] + "_" + sys.argv[3] + "ssds"
144 copyfile(sys.argv[1], config_file_for_test)
145
146 # Add the number of threads to the fio config file
147 with open(config_file_for_test, "a") as conf_file:
148 conf_file.write("numjobs=" + str(1) + "\n")
149
150 # Add the NVMe bdf to the fio config file
151 for i in range(0, num_ssds):
152 add_filename_to_conf(config_file_for_test, bdf[i])
153
154 # Set up for output
155 columns = "IO_Size,Q_Depth,Workload_Mix,Workload_Type,Core_Mask,Run_Time,Run,Read_IOPS,Read_bw(KiB/s), \
156 Read_Avg_lat(us),Read_Min_Lat(us),Read_Max_Lat(us),Write_IOPS,Write_bw(KiB/s),Write_Avg_lat(us), \
157 Write_Min_Lat(us),Write_Max_Lat(us)"
158
159 with open(result_file_name, "w+") as result_file:
160 result_file.write(columns + "\n")
161
162 for i, (s, q, m, w, c, t) in enumerate(itertools.product(io_size, q_depth, mix, workload_type, core_mask, run_time)):
163 run_fio(s, q, m, c, i, w, t)
164 parse_results(s, q, m, c, i, w, t)
165
166 result_file.close()