]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/radosbench.py
import quincy beta 17.1.0
[ceph.git] / ceph / qa / tasks / radosbench.py
1 """
2 Rados benchmarking
3 """
4 import contextlib
5 import logging
6
7 from teuthology.orchestra import run
8 from teuthology import misc as teuthology
9
10
11 log = logging.getLogger(__name__)
12
13 @contextlib.contextmanager
14 def task(ctx, config):
15 """
16 Run radosbench
17
18 The config should be as follows:
19
20 radosbench:
21 clients: [client list]
22 time: <seconds to run>
23 pool: <pool to use>
24 size: write size to use
25 concurrency: max number of outstanding writes (16)
26 objectsize: object size to use
27 unique_pool: use a unique pool, defaults to False
28 ec_pool: create an ec pool, defaults to False
29 create_pool: create pool, defaults to True
30 erasure_code_profile:
31 name: teuthologyprofile
32 k: 2
33 m: 1
34 crush-failure-domain: osd
35 cleanup: false (defaults to true)
36 type: <write|seq|rand> (defaults to write)
37 example:
38
39 tasks:
40 - ceph:
41 - radosbench:
42 clients: [client.0]
43 time: 360
44 - interactive:
45 """
46 log.info('Beginning radosbench...')
47 assert isinstance(config, dict), \
48 "please list clients to run on"
49 radosbench = {}
50
51 testdir = teuthology.get_testdir(ctx)
52 manager = ctx.managers['ceph']
53 runtype = config.get('type', 'write')
54
55 create_pool = config.get('create_pool', True)
56 for role in config.get(
57 'clients',
58 list(map(lambda x: 'client.' + x,
59 teuthology.all_roles_of_type(ctx.cluster, 'client')))):
60 assert isinstance(role, str)
61 (_, id_) = role.split('.', 1)
62 (remote,) = ctx.cluster.only(role).remotes.keys()
63
64 if config.get('ec_pool', False):
65 profile = config.get('erasure_code_profile', {})
66 profile_name = profile.get('name', 'teuthologyprofile')
67 manager.create_erasure_code_profile(profile_name, profile)
68 else:
69 profile_name = None
70
71 cleanup = []
72 if not config.get('cleanup', True):
73 cleanup = ['--no-cleanup']
74 write_to_omap = []
75 if config.get('write-omap', False):
76 write_to_omap = ['--write-omap']
77 log.info('omap writes')
78
79 pool = config.get('pool', 'data')
80 if create_pool:
81 if pool != 'data':
82 manager.create_pool(pool, erasure_code_profile_name=profile_name)
83 else:
84 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
85
86 concurrency = config.get('concurrency', 16)
87 osize = config.get('objectsize', 65536)
88 if osize == 0:
89 objectsize = []
90 else:
91 objectsize = ['--object-size', str(osize)]
92 size = ['-b', str(config.get('size', 65536))]
93 # If doing a reading run then populate data
94 if runtype != "write":
95 proc = remote.run(
96 args=[
97 "/bin/sh", "-c",
98 " ".join(['adjust-ulimits',
99 'ceph-coverage',
100 '{tdir}/archive/coverage',
101 'rados',
102 '--no-log-to-stderr',
103 '--name', role] +
104 ['-t', str(concurrency)]
105 + size + objectsize +
106 ['-p' , pool,
107 'bench', str(60), "write", "--no-cleanup"
108 ]).format(tdir=testdir),
109 ],
110 logger=log.getChild('radosbench.{id}'.format(id=id_)),
111 wait=True
112 )
113 size = []
114 objectsize = []
115
116 proc = remote.run(
117 args=[
118 "/bin/sh", "-c",
119 " ".join(['adjust-ulimits',
120 'ceph-coverage',
121 '{tdir}/archive/coverage',
122 'rados',
123 '--no-log-to-stderr',
124 '--name', role]
125 + size + objectsize +
126 ['-p' , pool,
127 'bench', str(config.get('time', 360)), runtype,
128 ] + write_to_omap + cleanup).format(tdir=testdir),
129 ],
130 logger=log.getChild('radosbench.{id}'.format(id=id_)),
131 stdin=run.PIPE,
132 wait=False
133 )
134 radosbench[id_] = proc
135
136 try:
137 yield
138 finally:
139 timeout = config.get('time', 360) * 30 + 300
140 log.info('joining radosbench (timing out after %ss)', timeout)
141 run.wait(radosbench.values(), timeout=timeout)
142
143 if pool != 'data' and create_pool:
144 manager.remove_pool(pool)