]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/peering_speed_test.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / peering_speed_test.py
1 """
2 Remotely run peering tests.
3 """
4 import logging
5 import time
6
7 log = logging.getLogger(__name__)
8
9 from teuthology.task.args import argify
10
11 POOLNAME = "POOLNAME"
12 ARGS = [
13 ('num_pgs', 'number of pgs to create', 256, int),
14 ('max_time', 'seconds to complete peering', 0, int),
15 ('runs', 'trials to run', 10, int),
16 ('num_objects', 'objects to create', 256 * 1024, int),
17 ('object_size', 'size in bytes for objects', 64, int),
18 ('creation_time_limit', 'time limit for pool population', 60*60, int),
19 ('create_threads', 'concurrent writes for create', 256, int)
20 ]
21
22 def setup(ctx, config):
23 """
24 Setup peering test on remotes.
25 """
26 manager = ctx.managers['ceph']
27 manager.clear_pools()
28 manager.create_pool(POOLNAME, config.num_pgs)
29 log.info("populating pool")
30 manager.rados_write_objects(
31 POOLNAME,
32 config.num_objects,
33 config.object_size,
34 config.creation_time_limit,
35 config.create_threads)
36 log.info("done populating pool")
37
38 def do_run(ctx, config):
39 """
40 Perform the test.
41 """
42 start = time.time()
43 # mark in osd
44 manager = ctx.managers['ceph']
45 manager.mark_in_osd(0)
46 log.info("writing out objects")
47 manager.rados_write_objects(
48 POOLNAME,
49 config.num_pgs, # write 1 object per pg or so
50 1,
51 config.creation_time_limit,
52 config.num_pgs, # lots of concurrency
53 cleanup = True)
54 peering_end = time.time()
55
56 log.info("peering done, waiting on recovery")
57 manager.wait_for_clean()
58
59 log.info("recovery done")
60 recovery_end = time.time()
61 if config.max_time:
62 assert(peering_end - start < config.max_time)
63 manager.mark_out_osd(0)
64 manager.wait_for_clean()
65 return {
66 'time_to_active': peering_end - start,
67 'time_to_clean': recovery_end - start
68 }
69
70 @argify("peering_speed_test", ARGS)
71 def task(ctx, config):
72 """
73 Peering speed test
74 """
75 setup(ctx, config)
76 manager = ctx.managers['ceph']
77 manager.mark_out_osd(0)
78 manager.wait_for_clean()
79 ret = []
80 for i in range(config.runs):
81 log.info("Run {i}".format(i = i))
82 ret.append(do_run(ctx, config))
83
84 manager.mark_in_osd(0)
85 ctx.summary['recovery_times'] = {
86 'runs': ret
87 }