]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | """ |
2 | Rados benchmarking | |
3 | """ | |
4 | import contextlib | |
5 | import logging | |
6 | ||
7 | from teuthology.orchestra import run | |
8 | from teuthology import misc as teuthology | |
9 | ||
9f95a23c TL |
10 | import six |
11 | ||
7c673cae FG |
12 | log = logging.getLogger(__name__) |
13 | ||
14 | @contextlib.contextmanager | |
15 | def task(ctx, config): | |
16 | """ | |
17 | Run radosbench | |
18 | ||
19 | The config should be as follows: | |
20 | ||
21 | radosbench: | |
22 | clients: [client list] | |
23 | time: <seconds to run> | |
24 | pool: <pool to use> | |
25 | size: write size to use | |
1911f103 | 26 | concurrency: max number of outstanding writes (16) |
224ce89b | 27 | objectsize: object size to use |
7c673cae FG |
28 | unique_pool: use a unique pool, defaults to False |
29 | ec_pool: create an ec pool, defaults to False | |
224ce89b | 30 | create_pool: create pool, defaults to True |
7c673cae FG |
31 | erasure_code_profile: |
32 | name: teuthologyprofile | |
33 | k: 2 | |
34 | m: 1 | |
224ce89b | 35 | crush-failure-domain: osd |
7c673cae | 36 | cleanup: false (defaults to true) |
224ce89b | 37 | type: <write|seq|rand> (defaults to write) |
7c673cae FG |
38 | example: |
39 | ||
40 | tasks: | |
41 | - ceph: | |
42 | - radosbench: | |
43 | clients: [client.0] | |
44 | time: 360 | |
45 | - interactive: | |
46 | """ | |
47 | log.info('Beginning radosbench...') | |
48 | assert isinstance(config, dict), \ | |
49 | "please list clients to run on" | |
50 | radosbench = {} | |
51 | ||
52 | testdir = teuthology.get_testdir(ctx) | |
53 | manager = ctx.managers['ceph'] | |
224ce89b | 54 | runtype = config.get('type', 'write') |
7c673cae FG |
55 | |
56 | create_pool = config.get('create_pool', True) | |
57 | for role in config.get('clients', ['client.0']): | |
9f95a23c | 58 | assert isinstance(role, six.string_types) |
7c673cae FG |
59 | PREFIX = 'client.' |
60 | assert role.startswith(PREFIX) | |
61 | id_ = role[len(PREFIX):] | |
9f95a23c | 62 | (remote,) = ctx.cluster.only(role).remotes.keys() |
7c673cae FG |
63 | |
64 | if config.get('ec_pool', False): | |
65 | profile = config.get('erasure_code_profile', {}) | |
66 | profile_name = profile.get('name', 'teuthologyprofile') | |
67 | manager.create_erasure_code_profile(profile_name, profile) | |
68 | else: | |
69 | profile_name = None | |
70 | ||
71 | cleanup = [] | |
72 | if not config.get('cleanup', True): | |
73 | cleanup = ['--no-cleanup'] | |
9f95a23c TL |
74 | write_to_omap = [] |
75 | if config.get('write-omap', False): | |
76 | write_to_omap = ['--write-omap'] | |
77 | log.info('omap writes') | |
7c673cae FG |
78 | |
79 | pool = config.get('pool', 'data') | |
80 | if create_pool: | |
81 | if pool != 'data': | |
82 | manager.create_pool(pool, erasure_code_profile_name=profile_name) | |
83 | else: | |
84 | pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name) | |
85 | ||
1911f103 | 86 | concurrency = config.get('concurrency', 16) |
a8e16298 | 87 | osize = config.get('objectsize', 65536) |
e306af50 TL |
88 | if osize == 0: |
89 | objectsize = [] | |
90 | else: | |
91 | objectsize = ['--object-size', str(osize)] | |
92 | size = ['-b', str(config.get('size', 65536))] | |
224ce89b WB |
93 | # If doing a reading run then populate data |
94 | if runtype != "write": | |
95 | proc = remote.run( | |
96 | args=[ | |
97 | "/bin/sh", "-c", | |
98 | " ".join(['adjust-ulimits', | |
99 | 'ceph-coverage', | |
100 | '{tdir}/archive/coverage', | |
101 | 'rados', | |
102 | '--no-log-to-stderr', | |
e306af50 TL |
103 | '--name', role] + |
104 | ['-t', str(concurrency)] | |
105 | + size + objectsize + | |
224ce89b WB |
106 | ['-p' , pool, |
107 | 'bench', str(60), "write", "--no-cleanup" | |
108 | ]).format(tdir=testdir), | |
109 | ], | |
110 | logger=log.getChild('radosbench.{id}'.format(id=id_)), | |
111 | wait=True | |
112 | ) | |
e306af50 TL |
113 | size = [] |
114 | objectsize = [] | |
224ce89b | 115 | |
7c673cae FG |
116 | proc = remote.run( |
117 | args=[ | |
118 | "/bin/sh", "-c", | |
119 | " ".join(['adjust-ulimits', | |
120 | 'ceph-coverage', | |
121 | '{tdir}/archive/coverage', | |
122 | 'rados', | |
123 | '--no-log-to-stderr', | |
224ce89b | 124 | '--name', role] |
e306af50 | 125 | + size + objectsize + |
224ce89b WB |
126 | ['-p' , pool, |
127 | 'bench', str(config.get('time', 360)), runtype, | |
9f95a23c | 128 | ] + write_to_omap + cleanup).format(tdir=testdir), |
7c673cae FG |
129 | ], |
130 | logger=log.getChild('radosbench.{id}'.format(id=id_)), | |
131 | stdin=run.PIPE, | |
132 | wait=False | |
133 | ) | |
134 | radosbench[id_] = proc | |
135 | ||
136 | try: | |
137 | yield | |
138 | finally: | |
224ce89b | 139 | timeout = config.get('time', 360) * 30 + 300 |
7c673cae | 140 | log.info('joining radosbench (timing out after %ss)', timeout) |
9f95a23c | 141 | run.wait(radosbench.values(), timeout=timeout) |
7c673cae | 142 | |
9f95a23c | 143 | if pool != 'data' and create_pool: |
7c673cae | 144 | manager.remove_pool(pool) |