]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/rados.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / tasks / rados.py
CommitLineData
7c673cae
FG
1"""
2Rados modle-based integration tests
3"""
4import contextlib
5import logging
6import gevent
7from teuthology import misc as teuthology
8
9from teuthology.orchestra import run
10
11log = logging.getLogger(__name__)
12
13@contextlib.contextmanager
14def task(ctx, config):
15 """
16 Run RadosModel-based integration tests.
17
18 The config should be as follows::
19
20 rados:
21 clients: [client list]
22 ops: <number of ops>
23 objects: <number of objects to use>
24 max_in_flight: <max number of operations in flight>
25 object_size: <size of objects in bytes>
26 min_stride_size: <minimum write stride size in bytes>
27 max_stride_size: <maximum write stride size in bytes>
28 op_weights: <dictionary mapping operation type to integer weight>
29 runs: <number of times to run> - the pool is remade between runs
30 ec_pool: use an ec pool
31 erasure_code_profile: profile to use with the erasure coded pool
32 fast_read: enable ec_pool's fast_read
33 min_size: set the min_size of created pool
34 pool_snaps: use pool snapshots instead of selfmanaged snapshots
35 write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
36 This mean data don't access in the near future.
37 Let osd backend don't keep data in cache.
38
39 For example::
40
41 tasks:
42 - ceph:
43 - rados:
44 clients: [client.0]
45 ops: 1000
46 max_seconds: 0 # 0 for no limit
47 objects: 25
48 max_in_flight: 16
49 object_size: 4000000
50 min_stride_size: 1024
51 max_stride_size: 4096
52 op_weights:
53 read: 20
54 write: 10
55 delete: 2
56 snap_create: 3
57 rollback: 2
58 snap_remove: 0
59 ec_pool: create an ec pool, defaults to False
60 erasure_code_use_overwrites: test overwrites, default false
61 erasure_code_profile:
62 name: teuthologyprofile
63 k: 2
64 m: 1
224ce89b 65 crush-failure-domain: osd
7c673cae
FG
66 pool_snaps: true
67 write_fadvise_dontneed: true
68 runs: 10
69 - interactive:
70
71 Optionally, you can provide the pool name to run against:
72
73 tasks:
74 - ceph:
75 - exec:
76 client.0:
77 - ceph osd pool create foo
78 - rados:
79 clients: [client.0]
80 pools: [foo]
81 ...
82
83 Alternatively, you can provide a pool prefix:
84
85 tasks:
86 - ceph:
87 - exec:
88 client.0:
89 - ceph osd pool create foo.client.0
90 - rados:
91 clients: [client.0]
92 pool_prefix: foo
93 ...
94
95 The tests are run asynchronously, they are not complete when the task
96 returns. For instance:
97
98 - rados:
99 clients: [client.0]
100 pools: [ecbase]
101 ops: 4000
102 objects: 500
103 op_weights:
104 read: 100
105 write: 100
106 delete: 50
107 copy_from: 50
108 - print: "**** done rados ec-cache-agent (part 2)"
109
110 will run the print task immediately after the rados tasks begins but
111 not after it completes. To make the rados task a blocking / sequential
112 task, use:
113
114 - sequential:
115 - rados:
116 clients: [client.0]
117 pools: [ecbase]
118 ops: 4000
119 objects: 500
120 op_weights:
121 read: 100
122 write: 100
123 delete: 50
124 copy_from: 50
125 - print: "**** done rados ec-cache-agent (part 2)"
126
127 """
128 log.info('Beginning rados...')
129 assert isinstance(config, dict), \
130 "please list clients to run on"
131
132 object_size = int(config.get('object_size', 4000000))
133 op_weights = config.get('op_weights', {})
134 testdir = teuthology.get_testdir(ctx)
135 args = [
136 'adjust-ulimits',
137 'ceph-coverage',
138 '{tdir}/archive/coverage'.format(tdir=testdir),
139 'ceph_test_rados']
140 if config.get('ec_pool', False):
141 args.extend(['--no-omap'])
142 if not config.get('erasure_code_use_overwrites', False):
143 args.extend(['--ec-pool'])
144 if config.get('write_fadvise_dontneed', False):
145 args.extend(['--write-fadvise-dontneed'])
31f18b77
FG
146 if config.get('set_redirect', False):
147 args.extend(['--set_redirect'])
11fdf7f2
TL
148 if config.get('set_chunk', False):
149 args.extend(['--set_chunk'])
150 if config.get('low_tier_pool', None):
151 args.extend(['--low_tier_pool', config.get('low_tier_pool', None)])
7c673cae
FG
152 if config.get('pool_snaps', False):
153 args.extend(['--pool-snaps'])
154 args.extend([
155 '--max-ops', str(config.get('ops', 10000)),
156 '--objects', str(config.get('objects', 500)),
157 '--max-in-flight', str(config.get('max_in_flight', 16)),
158 '--size', str(object_size),
159 '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
160 '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
161 '--max-seconds', str(config.get('max_seconds', 0))
162 ])
163
164 weights = {}
165 weights['read'] = 100
166 weights['write'] = 100
167 weights['delete'] = 10
168 # Parallel of the op_types in test/osd/TestRados.cc
169 for field in [
170 # read handled above
171 # write handled above
172 # delete handled above
173 "snap_create",
174 "snap_remove",
175 "rollback",
176 "setattr",
177 "rmattr",
178 "watch",
179 "copy_from",
180 "hit_set_list",
181 "is_dirty",
182 "undirty",
183 "cache_flush",
184 "cache_try_flush",
185 "cache_evict",
186 "append",
187 "write",
188 "read",
189 "delete"
190 ]:
191 if field in op_weights:
192 weights[field] = op_weights[field]
193
194 if config.get('write_append_excl', True):
195 if 'write' in weights:
196 weights['write'] = weights['write'] / 2
197 weights['write_excl'] = weights['write']
198
199 if 'append' in weights:
200 weights['append'] = weights['append'] / 2
201 weights['append_excl'] = weights['append']
202
203 for op, weight in weights.iteritems():
204 args.extend([
205 '--op', op, str(weight)
206 ])
207
208
209 def thread():
210 """Thread spawned by gevent"""
211 clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
212 log.info('clients are %s' % clients)
213 manager = ctx.managers['ceph']
214 if config.get('ec_pool', False):
215 profile = config.get('erasure_code_profile', {})
216 profile_name = profile.get('name', 'teuthologyprofile')
217 manager.create_erasure_code_profile(profile_name, profile)
218 else:
219 profile_name = None
220 for i in range(int(config.get('runs', '1'))):
221 log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
222 tests = {}
223 existing_pools = config.get('pools', [])
224 created_pools = []
225 for role in config.get('clients', clients):
226 assert isinstance(role, basestring)
227 PREFIX = 'client.'
228 assert role.startswith(PREFIX)
229 id_ = role[len(PREFIX):]
230
231 pool = config.get('pool', None)
232 if not pool and existing_pools:
233 pool = existing_pools.pop()
234 else:
235 pool = manager.create_pool_with_unique_name(
236 erasure_code_profile_name=profile_name,
237 erasure_code_use_overwrites=
238 config.get('erasure_code_use_overwrites', False)
239 )
240 created_pools.append(pool)
241 if config.get('fast_read', False):
242 manager.raw_cluster_cmd(
243 'osd', 'pool', 'set', pool, 'fast_read', 'true')
244 min_size = config.get('min_size', None);
245 if min_size is not None:
246 manager.raw_cluster_cmd(
247 'osd', 'pool', 'set', pool, 'min_size', str(min_size))
248
249 (remote,) = ctx.cluster.only(role).remotes.iterkeys()
250 proc = remote.run(
251 args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
252 ["--pool", pool],
253 logger=log.getChild("rados.{id}".format(id=id_)),
254 stdin=run.PIPE,
255 wait=False
256 )
257 tests[id_] = proc
258 run.wait(tests.itervalues())
259
260 for pool in created_pools:
261 manager.wait_snap_trimming_complete(pool);
262 manager.remove_pool(pool)
263
264 running = gevent.spawn(thread)
265
266 try:
267 yield
268 finally:
269 log.info('joining rados')
270 running.get()