]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/radosbench.py
import 15.2.2 octopus source
[ceph.git] / ceph / qa / tasks / radosbench.py
CommitLineData
7c673cae
FG
1"""
2Rados benchmarking
3"""
4import contextlib
5import logging
6
7from teuthology.orchestra import run
8from teuthology import misc as teuthology
9
9f95a23c
TL
10import six
11
7c673cae
FG
12log = logging.getLogger(__name__)
13
14@contextlib.contextmanager
15def task(ctx, config):
16 """
17 Run radosbench
18
19 The config should be as follows:
20
21 radosbench:
22 clients: [client list]
23 time: <seconds to run>
24 pool: <pool to use>
25 size: write size to use
1911f103 26 concurrency: max number of outstanding writes (16)
224ce89b 27 objectsize: object size to use
7c673cae
FG
28 unique_pool: use a unique pool, defaults to False
29 ec_pool: create an ec pool, defaults to False
224ce89b 30 create_pool: create pool, defaults to True
7c673cae
FG
31 erasure_code_profile:
32 name: teuthologyprofile
33 k: 2
34 m: 1
224ce89b 35 crush-failure-domain: osd
7c673cae 36 cleanup: false (defaults to true)
224ce89b 37 type: <write|seq|rand> (defaults to write)
7c673cae
FG
38 example:
39
40 tasks:
41 - ceph:
42 - radosbench:
43 clients: [client.0]
44 time: 360
45 - interactive:
46 """
47 log.info('Beginning radosbench...')
48 assert isinstance(config, dict), \
49 "please list clients to run on"
50 radosbench = {}
51
52 testdir = teuthology.get_testdir(ctx)
53 manager = ctx.managers['ceph']
224ce89b 54 runtype = config.get('type', 'write')
7c673cae
FG
55
56 create_pool = config.get('create_pool', True)
57 for role in config.get('clients', ['client.0']):
9f95a23c 58 assert isinstance(role, six.string_types)
7c673cae
FG
59 PREFIX = 'client.'
60 assert role.startswith(PREFIX)
61 id_ = role[len(PREFIX):]
9f95a23c 62 (remote,) = ctx.cluster.only(role).remotes.keys()
7c673cae
FG
63
64 if config.get('ec_pool', False):
65 profile = config.get('erasure_code_profile', {})
66 profile_name = profile.get('name', 'teuthologyprofile')
67 manager.create_erasure_code_profile(profile_name, profile)
68 else:
69 profile_name = None
70
71 cleanup = []
72 if not config.get('cleanup', True):
73 cleanup = ['--no-cleanup']
9f95a23c
TL
74 write_to_omap = []
75 if config.get('write-omap', False):
76 write_to_omap = ['--write-omap']
77 log.info('omap writes')
7c673cae
FG
78
79 pool = config.get('pool', 'data')
80 if create_pool:
81 if pool != 'data':
82 manager.create_pool(pool, erasure_code_profile_name=profile_name)
83 else:
84 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
85
9f95a23c 86 size = config.get('size', 65536)
1911f103 87 concurrency = config.get('concurrency', 16)
a8e16298 88 osize = config.get('objectsize', 65536)
9f95a23c
TL
89 sizeargs = ['-b', str(size)]
90 if osize != 0 and osize != size:
91 # only use -O if this varies from size. kludgey workaround the
92 # fact that -O was -o in older releases.
93 sizeargs.extend(['-O', str(osize)])
94
224ce89b
WB
95 # If doing a reading run then populate data
96 if runtype != "write":
97 proc = remote.run(
98 args=[
99 "/bin/sh", "-c",
100 " ".join(['adjust-ulimits',
101 'ceph-coverage',
102 '{tdir}/archive/coverage',
103 'rados',
104 '--no-log-to-stderr',
105 '--name', role]
9f95a23c 106 + sizeargs +
1911f103 107 ['-t', str(concurrency)] +
224ce89b
WB
108 ['-p' , pool,
109 'bench', str(60), "write", "--no-cleanup"
110 ]).format(tdir=testdir),
111 ],
112 logger=log.getChild('radosbench.{id}'.format(id=id_)),
113 wait=True
114 )
9f95a23c 115 sizeargs = []
224ce89b 116
7c673cae
FG
117 proc = remote.run(
118 args=[
119 "/bin/sh", "-c",
120 " ".join(['adjust-ulimits',
121 'ceph-coverage',
122 '{tdir}/archive/coverage',
123 'rados',
124 '--no-log-to-stderr',
224ce89b 125 '--name', role]
9f95a23c 126 + sizeargs +
224ce89b
WB
127 ['-p' , pool,
128 'bench', str(config.get('time', 360)), runtype,
9f95a23c 129 ] + write_to_omap + cleanup).format(tdir=testdir),
7c673cae
FG
130 ],
131 logger=log.getChild('radosbench.{id}'.format(id=id_)),
132 stdin=run.PIPE,
133 wait=False
134 )
135 radosbench[id_] = proc
136
137 try:
138 yield
139 finally:
224ce89b 140 timeout = config.get('time', 360) * 30 + 300
7c673cae 141 log.info('joining radosbench (timing out after %ss)', timeout)
9f95a23c 142 run.wait(radosbench.values(), timeout=timeout)
7c673cae 143
9f95a23c 144 if pool != 'data' and create_pool:
7c673cae 145 manager.remove_pool(pool)