]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/radosbench.py
90b21a5d88b26620c7d35b917d1d85be81288944
[ceph.git] / ceph / qa / tasks / radosbench.py
1 """
2 Rados benchmarking
3 """
4 import contextlib
5 import logging
6
7 from teuthology.orchestra import run
8 from teuthology import misc as teuthology
9
10 import six
11
12 log = logging.getLogger(__name__)
13
14 @contextlib.contextmanager
15 def task(ctx, config):
16 """
17 Run radosbench
18
19 The config should be as follows:
20
21 radosbench:
22 clients: [client list]
23 time: <seconds to run>
24 pool: <pool to use>
25 size: write size to use
26 concurrency: max number of outstanding writes (16)
27 objectsize: object size to use
28 unique_pool: use a unique pool, defaults to False
29 ec_pool: create an ec pool, defaults to False
30 create_pool: create pool, defaults to True
31 erasure_code_profile:
32 name: teuthologyprofile
33 k: 2
34 m: 1
35 crush-failure-domain: osd
36 cleanup: false (defaults to true)
37 type: <write|seq|rand> (defaults to write)
38 example:
39
40 tasks:
41 - ceph:
42 - radosbench:
43 clients: [client.0]
44 time: 360
45 - interactive:
46 """
47 log.info('Beginning radosbench...')
48 assert isinstance(config, dict), \
49 "please list clients to run on"
50 radosbench = {}
51
52 testdir = teuthology.get_testdir(ctx)
53 manager = ctx.managers['ceph']
54 runtype = config.get('type', 'write')
55
56 create_pool = config.get('create_pool', True)
57 for role in config.get('clients', ['client.0']):
58 assert isinstance(role, six.string_types)
59 PREFIX = 'client.'
60 assert role.startswith(PREFIX)
61 id_ = role[len(PREFIX):]
62 (remote,) = ctx.cluster.only(role).remotes.keys()
63
64 if config.get('ec_pool', False):
65 profile = config.get('erasure_code_profile', {})
66 profile_name = profile.get('name', 'teuthologyprofile')
67 manager.create_erasure_code_profile(profile_name, profile)
68 else:
69 profile_name = None
70
71 cleanup = []
72 if not config.get('cleanup', True):
73 cleanup = ['--no-cleanup']
74 write_to_omap = []
75 if config.get('write-omap', False):
76 write_to_omap = ['--write-omap']
77 log.info('omap writes')
78
79 pool = config.get('pool', 'data')
80 if create_pool:
81 if pool != 'data':
82 manager.create_pool(pool, erasure_code_profile_name=profile_name)
83 else:
84 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
85
86 size = config.get('size', 65536)
87 concurrency = config.get('concurrency', 16)
88 osize = config.get('objectsize', 65536)
89 sizeargs = ['-b', str(size)]
90 if osize != 0 and osize != size:
91 # only use -O if this varies from size. kludgey workaround the
92 # fact that -O was -o in older releases.
93 sizeargs.extend(['-O', str(osize)])
94
95 # If doing a reading run then populate data
96 if runtype != "write":
97 proc = remote.run(
98 args=[
99 "/bin/sh", "-c",
100 " ".join(['adjust-ulimits',
101 'ceph-coverage',
102 '{tdir}/archive/coverage',
103 'rados',
104 '--no-log-to-stderr',
105 '--name', role]
106 + sizeargs +
107 ['-t', str(concurrency)] +
108 ['-p' , pool,
109 'bench', str(60), "write", "--no-cleanup"
110 ]).format(tdir=testdir),
111 ],
112 logger=log.getChild('radosbench.{id}'.format(id=id_)),
113 wait=True
114 )
115 sizeargs = []
116
117 proc = remote.run(
118 args=[
119 "/bin/sh", "-c",
120 " ".join(['adjust-ulimits',
121 'ceph-coverage',
122 '{tdir}/archive/coverage',
123 'rados',
124 '--no-log-to-stderr',
125 '--name', role]
126 + sizeargs +
127 ['-p' , pool,
128 'bench', str(config.get('time', 360)), runtype,
129 ] + write_to_omap + cleanup).format(tdir=testdir),
130 ],
131 logger=log.getChild('radosbench.{id}'.format(id=id_)),
132 stdin=run.PIPE,
133 wait=False
134 )
135 radosbench[id_] = proc
136
137 try:
138 yield
139 finally:
140 timeout = config.get('time', 360) * 30 + 300
141 log.info('joining radosbench (timing out after %ss)', timeout)
142 run.wait(radosbench.values(), timeout=timeout)
143
144 if pool != 'data' and create_pool:
145 manager.remove_pool(pool)