2 Rados modle-based integration tests
7 from teuthology
import misc
as teuthology
11 from teuthology
.orchestra
import run
13 log
= logging
.getLogger(__name__
)
15 @contextlib.contextmanager
16 def task(ctx
, config
):
18 Run RadosModel-based integration tests.
20 The config should be as follows::
23 clients: [client list]
25 objects: <number of objects to use>
26 max_in_flight: <max number of operations in flight>
27 object_size: <size of objects in bytes>
28 min_stride_size: <minimum write stride size in bytes>
29 max_stride_size: <maximum write stride size in bytes>
30 op_weights: <dictionary mapping operation type to integer weight>
31 runs: <number of times to run> - the pool is remade between runs
32 ec_pool: use an ec pool
33 erasure_code_profile: profile to use with the erasure coded pool
34 fast_read: enable ec_pool's fast_read
35 min_size: set the min_size of created pool
36 pool_snaps: use pool snapshots instead of selfmanaged snapshots
37 write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
38 This mean data don't access in the near future.
39 Let osd backend don't keep data in cache.
48 max_seconds: 0 # 0 for no limit
61 ec_pool: create an ec pool, defaults to False
62 erasure_code_use_overwrites: test overwrites, default false
64 name: teuthologyprofile
67 crush-failure-domain: osd
69 write_fadvise_dontneed: true
73 Optionally, you can provide the pool name to run against:
79 - ceph osd pool create foo
85 Alternatively, you can provide a pool prefix:
91 - ceph osd pool create foo.client.0
97 The tests are run asynchronously, they are not complete when the task
98 returns. For instance:
110 - print: "**** done rados ec-cache-agent (part 2)"
112 will run the print task immediately after the rados tasks begins but
113 not after it completes. To make the rados task a blocking / sequential
127 - print: "**** done rados ec-cache-agent (part 2)"
130 log
.info('Beginning rados...')
131 assert isinstance(config
, dict), \
132 "please list clients to run on"
134 object_size
= int(config
.get('object_size', 4000000))
135 op_weights
= config
.get('op_weights', {})
136 testdir
= teuthology
.get_testdir(ctx
)
140 '{tdir}/archive/coverage'.format(tdir
=testdir
),
142 if config
.get('ec_pool', False):
143 args
.extend(['--no-omap'])
144 if not config
.get('erasure_code_use_overwrites', False):
145 args
.extend(['--ec-pool'])
146 if config
.get('write_fadvise_dontneed', False):
147 args
.extend(['--write-fadvise-dontneed'])
148 if config
.get('set_redirect', False):
149 args
.extend(['--set_redirect'])
150 if config
.get('set_chunk', False):
151 args
.extend(['--set_chunk'])
152 if config
.get('enable_dedup', False):
153 args
.extend(['--enable_dedup'])
154 if config
.get('low_tier_pool', None):
155 args
.extend(['--low_tier_pool', config
.get('low_tier_pool', None)])
156 if config
.get('pool_snaps', False):
157 args
.extend(['--pool-snaps'])
158 if config
.get('balance_reads', False):
159 args
.extend(['--balance-reads'])
160 if config
.get('localize_reads', False):
161 args
.extend(['--localize-reads'])
163 '--max-ops', str(config
.get('ops', 10000)),
164 '--objects', str(config
.get('objects', 500)),
165 '--max-in-flight', str(config
.get('max_in_flight', 16)),
166 '--size', str(object_size
),
167 '--min-stride-size', str(config
.get('min_stride_size', object_size
// 10)),
168 '--max-stride-size', str(config
.get('max_stride_size', object_size
// 5)),
169 '--max-seconds', str(config
.get('max_seconds', 0))
173 weights
['read'] = 100
174 weights
['write'] = 100
175 weights
['delete'] = 10
176 # Parallel of the op_types in test/osd/TestRados.cc
179 # write handled above
180 # delete handled above
199 if field
in op_weights
:
200 weights
[field
] = op_weights
[field
]
202 if config
.get('write_append_excl', True):
203 if 'write' in weights
:
204 weights
['write'] = weights
['write'] // 2
205 weights
['write_excl'] = weights
['write']
207 if 'append' in weights
:
208 weights
['append'] = weights
['append'] // 2
209 weights
['append_excl'] = weights
['append']
211 for op
, weight
in weights
.items():
213 '--op', op
, str(weight
)
218 """Thread spawned by gevent"""
219 clients
= ['client.{id}'.format(id=id_
) for id_
in teuthology
.all_roles_of_type(ctx
.cluster
, 'client')]
220 log
.info('clients are %s' % clients
)
221 manager
= ctx
.managers
['ceph']
222 if config
.get('ec_pool', False):
223 profile
= config
.get('erasure_code_profile', {})
224 profile_name
= profile
.get('name', 'teuthologyprofile')
225 manager
.create_erasure_code_profile(profile_name
, profile
)
228 for i
in range(int(config
.get('runs', '1'))):
229 log
.info("starting run %s out of %s", str(i
), config
.get('runs', '1'))
231 existing_pools
= config
.get('pools', [])
233 for role
in config
.get('clients', clients
):
234 assert isinstance(role
, six
.string_types
)
236 assert role
.startswith(PREFIX
)
237 id_
= role
[len(PREFIX
):]
239 pool
= config
.get('pool', None)
240 if not pool
and existing_pools
:
241 pool
= existing_pools
.pop()
243 pool
= manager
.create_pool_with_unique_name(
244 erasure_code_profile_name
=profile_name
,
245 erasure_code_use_overwrites
=
246 config
.get('erasure_code_use_overwrites', False)
248 created_pools
.append(pool
)
249 if config
.get('fast_read', False):
250 manager
.raw_cluster_cmd(
251 'osd', 'pool', 'set', pool
, 'fast_read', 'true')
252 min_size
= config
.get('min_size', None);
253 if min_size
is not None:
254 manager
.raw_cluster_cmd(
255 'osd', 'pool', 'set', pool
, 'min_size', str(min_size
))
257 (remote
,) = ctx
.cluster
.only(role
).remotes
.keys()
259 args
=["CEPH_CLIENT_ID={id_}".format(id_
=id_
)] + args
+
261 logger
=log
.getChild("rados.{id}".format(id=id_
)),
266 run
.wait(tests
.values())
268 for pool
in created_pools
:
269 manager
.wait_snap_trimming_complete(pool
);
270 manager
.remove_pool(pool
)
272 running
= gevent
.spawn(thread
)
277 log
.info('joining rados')