]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/radosbench.py
update sources to v12.1.1
[ceph.git] / ceph / qa / tasks / radosbench.py
1 """
2 Rados benchmarking
3 """
4 import contextlib
5 import logging
6
7 from teuthology.orchestra import run
8 from teuthology import misc as teuthology
9
10 log = logging.getLogger(__name__)
11
12 @contextlib.contextmanager
13 def task(ctx, config):
14 """
15 Run radosbench
16
17 The config should be as follows:
18
19 radosbench:
20 clients: [client list]
21 time: <seconds to run>
22 pool: <pool to use>
23 size: write size to use
24 objectsize: object size to use
25 unique_pool: use a unique pool, defaults to False
26 ec_pool: create an ec pool, defaults to False
27 create_pool: create pool, defaults to True
28 erasure_code_profile:
29 name: teuthologyprofile
30 k: 2
31 m: 1
32 crush-failure-domain: osd
33 cleanup: false (defaults to true)
34 type: <write|seq|rand> (defaults to write)
35 example:
36
37 tasks:
38 - ceph:
39 - radosbench:
40 clients: [client.0]
41 time: 360
42 - interactive:
43 """
44 log.info('Beginning radosbench...')
45 assert isinstance(config, dict), \
46 "please list clients to run on"
47 radosbench = {}
48
49 testdir = teuthology.get_testdir(ctx)
50 manager = ctx.managers['ceph']
51 runtype = config.get('type', 'write')
52
53 create_pool = config.get('create_pool', True)
54 for role in config.get('clients', ['client.0']):
55 assert isinstance(role, basestring)
56 PREFIX = 'client.'
57 assert role.startswith(PREFIX)
58 id_ = role[len(PREFIX):]
59 (remote,) = ctx.cluster.only(role).remotes.iterkeys()
60
61 if config.get('ec_pool', False):
62 profile = config.get('erasure_code_profile', {})
63 profile_name = profile.get('name', 'teuthologyprofile')
64 manager.create_erasure_code_profile(profile_name, profile)
65 else:
66 profile_name = None
67
68 cleanup = []
69 if not config.get('cleanup', True):
70 cleanup = ['--no-cleanup']
71
72 pool = config.get('pool', 'data')
73 if create_pool:
74 if pool != 'data':
75 manager.create_pool(pool, erasure_code_profile_name=profile_name)
76 else:
77 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
78
79 osize = config.get('objectsize', 0)
80 if osize is 0:
81 objectsize = []
82 else:
83 objectsize = ['-o', str(osize)]
84 size = ['-b', str(config.get('size', 4<<20))]
85 # If doing a reading run then populate data
86 if runtype != "write":
87 proc = remote.run(
88 args=[
89 "/bin/sh", "-c",
90 " ".join(['adjust-ulimits',
91 'ceph-coverage',
92 '{tdir}/archive/coverage',
93 'rados',
94 '--no-log-to-stderr',
95 '--name', role]
96 + size + objectsize +
97 ['-p' , pool,
98 'bench', str(60), "write", "--no-cleanup"
99 ]).format(tdir=testdir),
100 ],
101 logger=log.getChild('radosbench.{id}'.format(id=id_)),
102 wait=True
103 )
104 size = []
105 objectsize = []
106
107 proc = remote.run(
108 args=[
109 "/bin/sh", "-c",
110 " ".join(['adjust-ulimits',
111 'ceph-coverage',
112 '{tdir}/archive/coverage',
113 'rados',
114 '--no-log-to-stderr',
115 '--name', role]
116 + size + objectsize +
117 ['-p' , pool,
118 'bench', str(config.get('time', 360)), runtype,
119 ] + cleanup).format(tdir=testdir),
120 ],
121 logger=log.getChild('radosbench.{id}'.format(id=id_)),
122 stdin=run.PIPE,
123 wait=False
124 )
125 radosbench[id_] = proc
126
127 try:
128 yield
129 finally:
130 timeout = config.get('time', 360) * 30 + 300
131 log.info('joining radosbench (timing out after %ss)', timeout)
132 run.wait(radosbench.itervalues(), timeout=timeout)
133
134 if pool is not 'data' and create_pool:
135 manager.remove_pool(pool)