]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | """ |
2 | Rados modle-based integration tests | |
3 | """ | |
4 | import contextlib | |
5 | import logging | |
6 | import gevent | |
7 | from teuthology import misc as teuthology | |
8 | ||
9 | from teuthology.orchestra import run | |
10 | ||
11 | log = logging.getLogger(__name__) | |
12 | ||
13 | @contextlib.contextmanager | |
14 | def task(ctx, config): | |
15 | """ | |
16 | Run RadosModel-based integration tests. | |
17 | ||
18 | The config should be as follows:: | |
19 | ||
20 | rados: | |
21 | clients: [client list] | |
22 | ops: <number of ops> | |
23 | objects: <number of objects to use> | |
24 | max_in_flight: <max number of operations in flight> | |
25 | object_size: <size of objects in bytes> | |
26 | min_stride_size: <minimum write stride size in bytes> | |
27 | max_stride_size: <maximum write stride size in bytes> | |
28 | op_weights: <dictionary mapping operation type to integer weight> | |
29 | runs: <number of times to run> - the pool is remade between runs | |
30 | ec_pool: use an ec pool | |
31 | erasure_code_profile: profile to use with the erasure coded pool | |
32 | fast_read: enable ec_pool's fast_read | |
33 | min_size: set the min_size of created pool | |
34 | pool_snaps: use pool snapshots instead of selfmanaged snapshots | |
35 | write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED. | |
36 | This mean data don't access in the near future. | |
37 | Let osd backend don't keep data in cache. | |
38 | ||
39 | For example:: | |
40 | ||
41 | tasks: | |
42 | - ceph: | |
43 | - rados: | |
44 | clients: [client.0] | |
45 | ops: 1000 | |
46 | max_seconds: 0 # 0 for no limit | |
47 | objects: 25 | |
48 | max_in_flight: 16 | |
49 | object_size: 4000000 | |
50 | min_stride_size: 1024 | |
51 | max_stride_size: 4096 | |
52 | op_weights: | |
53 | read: 20 | |
54 | write: 10 | |
55 | delete: 2 | |
56 | snap_create: 3 | |
57 | rollback: 2 | |
58 | snap_remove: 0 | |
59 | ec_pool: create an ec pool, defaults to False | |
60 | erasure_code_use_overwrites: test overwrites, default false | |
61 | erasure_code_profile: | |
62 | name: teuthologyprofile | |
63 | k: 2 | |
64 | m: 1 | |
65 | ruleset-failure-domain: osd | |
66 | pool_snaps: true | |
67 | write_fadvise_dontneed: true | |
68 | runs: 10 | |
69 | - interactive: | |
70 | ||
71 | Optionally, you can provide the pool name to run against: | |
72 | ||
73 | tasks: | |
74 | - ceph: | |
75 | - exec: | |
76 | client.0: | |
77 | - ceph osd pool create foo | |
78 | - rados: | |
79 | clients: [client.0] | |
80 | pools: [foo] | |
81 | ... | |
82 | ||
83 | Alternatively, you can provide a pool prefix: | |
84 | ||
85 | tasks: | |
86 | - ceph: | |
87 | - exec: | |
88 | client.0: | |
89 | - ceph osd pool create foo.client.0 | |
90 | - rados: | |
91 | clients: [client.0] | |
92 | pool_prefix: foo | |
93 | ... | |
94 | ||
95 | The tests are run asynchronously, they are not complete when the task | |
96 | returns. For instance: | |
97 | ||
98 | - rados: | |
99 | clients: [client.0] | |
100 | pools: [ecbase] | |
101 | ops: 4000 | |
102 | objects: 500 | |
103 | op_weights: | |
104 | read: 100 | |
105 | write: 100 | |
106 | delete: 50 | |
107 | copy_from: 50 | |
108 | - print: "**** done rados ec-cache-agent (part 2)" | |
109 | ||
110 | will run the print task immediately after the rados tasks begins but | |
111 | not after it completes. To make the rados task a blocking / sequential | |
112 | task, use: | |
113 | ||
114 | - sequential: | |
115 | - rados: | |
116 | clients: [client.0] | |
117 | pools: [ecbase] | |
118 | ops: 4000 | |
119 | objects: 500 | |
120 | op_weights: | |
121 | read: 100 | |
122 | write: 100 | |
123 | delete: 50 | |
124 | copy_from: 50 | |
125 | - print: "**** done rados ec-cache-agent (part 2)" | |
126 | ||
127 | """ | |
128 | log.info('Beginning rados...') | |
129 | assert isinstance(config, dict), \ | |
130 | "please list clients to run on" | |
131 | ||
132 | object_size = int(config.get('object_size', 4000000)) | |
133 | op_weights = config.get('op_weights', {}) | |
134 | testdir = teuthology.get_testdir(ctx) | |
135 | args = [ | |
136 | 'adjust-ulimits', | |
137 | 'ceph-coverage', | |
138 | '{tdir}/archive/coverage'.format(tdir=testdir), | |
139 | 'ceph_test_rados'] | |
140 | if config.get('ec_pool', False): | |
141 | args.extend(['--no-omap']) | |
142 | if not config.get('erasure_code_use_overwrites', False): | |
143 | args.extend(['--ec-pool']) | |
144 | if config.get('write_fadvise_dontneed', False): | |
145 | args.extend(['--write-fadvise-dontneed']) | |
146 | if config.get('pool_snaps', False): | |
147 | args.extend(['--pool-snaps']) | |
148 | args.extend([ | |
149 | '--max-ops', str(config.get('ops', 10000)), | |
150 | '--objects', str(config.get('objects', 500)), | |
151 | '--max-in-flight', str(config.get('max_in_flight', 16)), | |
152 | '--size', str(object_size), | |
153 | '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), | |
154 | '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), | |
155 | '--max-seconds', str(config.get('max_seconds', 0)) | |
156 | ]) | |
157 | ||
158 | weights = {} | |
159 | weights['read'] = 100 | |
160 | weights['write'] = 100 | |
161 | weights['delete'] = 10 | |
162 | # Parallel of the op_types in test/osd/TestRados.cc | |
163 | for field in [ | |
164 | # read handled above | |
165 | # write handled above | |
166 | # delete handled above | |
167 | "snap_create", | |
168 | "snap_remove", | |
169 | "rollback", | |
170 | "setattr", | |
171 | "rmattr", | |
172 | "watch", | |
173 | "copy_from", | |
174 | "hit_set_list", | |
175 | "is_dirty", | |
176 | "undirty", | |
177 | "cache_flush", | |
178 | "cache_try_flush", | |
179 | "cache_evict", | |
180 | "append", | |
181 | "write", | |
182 | "read", | |
183 | "delete" | |
184 | ]: | |
185 | if field in op_weights: | |
186 | weights[field] = op_weights[field] | |
187 | ||
188 | if config.get('write_append_excl', True): | |
189 | if 'write' in weights: | |
190 | weights['write'] = weights['write'] / 2 | |
191 | weights['write_excl'] = weights['write'] | |
192 | ||
193 | if 'append' in weights: | |
194 | weights['append'] = weights['append'] / 2 | |
195 | weights['append_excl'] = weights['append'] | |
196 | ||
197 | for op, weight in weights.iteritems(): | |
198 | args.extend([ | |
199 | '--op', op, str(weight) | |
200 | ]) | |
201 | ||
202 | ||
203 | def thread(): | |
204 | """Thread spawned by gevent""" | |
205 | clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] | |
206 | log.info('clients are %s' % clients) | |
207 | manager = ctx.managers['ceph'] | |
208 | if config.get('ec_pool', False): | |
209 | profile = config.get('erasure_code_profile', {}) | |
210 | profile_name = profile.get('name', 'teuthologyprofile') | |
211 | manager.create_erasure_code_profile(profile_name, profile) | |
212 | else: | |
213 | profile_name = None | |
214 | for i in range(int(config.get('runs', '1'))): | |
215 | log.info("starting run %s out of %s", str(i), config.get('runs', '1')) | |
216 | tests = {} | |
217 | existing_pools = config.get('pools', []) | |
218 | created_pools = [] | |
219 | for role in config.get('clients', clients): | |
220 | assert isinstance(role, basestring) | |
221 | PREFIX = 'client.' | |
222 | assert role.startswith(PREFIX) | |
223 | id_ = role[len(PREFIX):] | |
224 | ||
225 | pool = config.get('pool', None) | |
226 | if not pool and existing_pools: | |
227 | pool = existing_pools.pop() | |
228 | else: | |
229 | pool = manager.create_pool_with_unique_name( | |
230 | erasure_code_profile_name=profile_name, | |
231 | erasure_code_use_overwrites= | |
232 | config.get('erasure_code_use_overwrites', False) | |
233 | ) | |
234 | created_pools.append(pool) | |
235 | if config.get('fast_read', False): | |
236 | manager.raw_cluster_cmd( | |
237 | 'osd', 'pool', 'set', pool, 'fast_read', 'true') | |
238 | min_size = config.get('min_size', None); | |
239 | if min_size is not None: | |
240 | manager.raw_cluster_cmd( | |
241 | 'osd', 'pool', 'set', pool, 'min_size', str(min_size)) | |
242 | ||
243 | (remote,) = ctx.cluster.only(role).remotes.iterkeys() | |
244 | proc = remote.run( | |
245 | args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + | |
246 | ["--pool", pool], | |
247 | logger=log.getChild("rados.{id}".format(id=id_)), | |
248 | stdin=run.PIPE, | |
249 | wait=False | |
250 | ) | |
251 | tests[id_] = proc | |
252 | run.wait(tests.itervalues()) | |
253 | ||
254 | for pool in created_pools: | |
255 | manager.wait_snap_trimming_complete(pool); | |
256 | manager.remove_pool(pool) | |
257 | ||
258 | running = gevent.spawn(thread) | |
259 | ||
260 | try: | |
261 | yield | |
262 | finally: | |
263 | log.info('joining rados') | |
264 | running.get() |