2 Rados modle-based integration tests
7 from teuthology
import misc
as teuthology
9 from teuthology
.orchestra
import run
11 log
= logging
.getLogger(__name__
)
13 @contextlib.contextmanager
14 def task(ctx
, config
):
16 Run RadosModel-based integration tests.
18 The config should be as follows::
21 clients: [client list]
23 objects: <number of objects to use>
24 max_in_flight: <max number of operations in flight>
25 object_size: <size of objects in bytes>
26 min_stride_size: <minimum write stride size in bytes>
27 max_stride_size: <maximum write stride size in bytes>
28 op_weights: <dictionary mapping operation type to integer weight>
29 runs: <number of times to run> - the pool is remade between runs
30 ec_pool: use an ec pool
31 erasure_code_profile: profile to use with the erasure coded pool
32 fast_read: enable ec_pool's fast_read
33 min_size: set the min_size of created pool
34 pool_snaps: use pool snapshots instead of selfmanaged snapshots
35 write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
36 This mean data don't access in the near future.
37 Let osd backend don't keep data in cache.
46 max_seconds: 0 # 0 for no limit
59 ec_pool: create an ec pool, defaults to False
60 erasure_code_use_overwrites: test overwrites, default false
62 name: teuthologyprofile
65 ruleset-failure-domain: osd
67 write_fadvise_dontneed: true
71 Optionally, you can provide the pool name to run against:
77 - ceph osd pool create foo
83 Alternatively, you can provide a pool prefix:
89 - ceph osd pool create foo.client.0
95 The tests are run asynchronously, they are not complete when the task
96 returns. For instance:
108 - print: "**** done rados ec-cache-agent (part 2)"
110 will run the print task immediately after the rados tasks begins but
111 not after it completes. To make the rados task a blocking / sequential
125 - print: "**** done rados ec-cache-agent (part 2)"
128 log
.info('Beginning rados...')
129 assert isinstance(config
, dict), \
130 "please list clients to run on"
132 object_size
= int(config
.get('object_size', 4000000))
133 op_weights
= config
.get('op_weights', {})
134 testdir
= teuthology
.get_testdir(ctx
)
138 '{tdir}/archive/coverage'.format(tdir
=testdir
),
140 if config
.get('ec_pool', False):
141 args
.extend(['--no-omap'])
142 if not config
.get('erasure_code_use_overwrites', False):
143 args
.extend(['--ec-pool'])
144 if config
.get('write_fadvise_dontneed', False):
145 args
.extend(['--write-fadvise-dontneed'])
146 if config
.get('pool_snaps', False):
147 args
.extend(['--pool-snaps'])
149 '--max-ops', str(config
.get('ops', 10000)),
150 '--objects', str(config
.get('objects', 500)),
151 '--max-in-flight', str(config
.get('max_in_flight', 16)),
152 '--size', str(object_size
),
153 '--min-stride-size', str(config
.get('min_stride_size', object_size
/ 10)),
154 '--max-stride-size', str(config
.get('max_stride_size', object_size
/ 5)),
155 '--max-seconds', str(config
.get('max_seconds', 0))
159 weights
['read'] = 100
160 weights
['write'] = 100
161 weights
['delete'] = 10
162 # Parallel of the op_types in test/osd/TestRados.cc
165 # write handled above
166 # delete handled above
185 if field
in op_weights
:
186 weights
[field
] = op_weights
[field
]
188 if config
.get('write_append_excl', True):
189 if 'write' in weights
:
190 weights
['write'] = weights
['write'] / 2
191 weights
['write_excl'] = weights
['write']
193 if 'append' in weights
:
194 weights
['append'] = weights
['append'] / 2
195 weights
['append_excl'] = weights
['append']
197 for op
, weight
in weights
.iteritems():
199 '--op', op
, str(weight
)
204 """Thread spawned by gevent"""
205 clients
= ['client.{id}'.format(id=id_
) for id_
in teuthology
.all_roles_of_type(ctx
.cluster
, 'client')]
206 log
.info('clients are %s' % clients
)
207 manager
= ctx
.managers
['ceph']
208 if config
.get('ec_pool', False):
209 profile
= config
.get('erasure_code_profile', {})
210 profile_name
= profile
.get('name', 'teuthologyprofile')
211 manager
.create_erasure_code_profile(profile_name
, profile
)
214 for i
in range(int(config
.get('runs', '1'))):
215 log
.info("starting run %s out of %s", str(i
), config
.get('runs', '1'))
217 existing_pools
= config
.get('pools', [])
219 for role
in config
.get('clients', clients
):
220 assert isinstance(role
, basestring
)
222 assert role
.startswith(PREFIX
)
223 id_
= role
[len(PREFIX
):]
225 pool
= config
.get('pool', None)
226 if not pool
and existing_pools
:
227 pool
= existing_pools
.pop()
229 pool
= manager
.create_pool_with_unique_name(
230 erasure_code_profile_name
=profile_name
,
231 erasure_code_use_overwrites
=
232 config
.get('erasure_code_use_overwrites', False)
234 created_pools
.append(pool
)
235 if config
.get('fast_read', False):
236 manager
.raw_cluster_cmd(
237 'osd', 'pool', 'set', pool
, 'fast_read', 'true')
238 min_size
= config
.get('min_size', None);
239 if min_size
is not None:
240 manager
.raw_cluster_cmd(
241 'osd', 'pool', 'set', pool
, 'min_size', str(min_size
))
243 (remote
,) = ctx
.cluster
.only(role
).remotes
.iterkeys()
245 args
=["CEPH_CLIENT_ID={id_}".format(id_
=id_
)] + args
+
247 logger
=log
.getChild("rados.{id}".format(id=id_
)),
252 run
.wait(tests
.itervalues())
254 for pool
in created_pools
:
255 manager
.wait_snap_trimming_complete(pool
);
256 manager
.remove_pool(pool
)
258 running
= gevent
.spawn(thread
)
263 log
.info('joining rados')