]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/rados.py
import 15.2.4
[ceph.git] / ceph / qa / tasks / rados.py
CommitLineData
7c673cae
FG
1"""
2Rados modle-based integration tests
3"""
4import contextlib
5import logging
6import gevent
7from teuthology import misc as teuthology
8
9f95a23c
TL
9import six
10
7c673cae
FG
11from teuthology.orchestra import run
12
13log = logging.getLogger(__name__)
14
15@contextlib.contextmanager
16def task(ctx, config):
17 """
18 Run RadosModel-based integration tests.
19
20 The config should be as follows::
21
22 rados:
23 clients: [client list]
24 ops: <number of ops>
25 objects: <number of objects to use>
26 max_in_flight: <max number of operations in flight>
27 object_size: <size of objects in bytes>
28 min_stride_size: <minimum write stride size in bytes>
29 max_stride_size: <maximum write stride size in bytes>
30 op_weights: <dictionary mapping operation type to integer weight>
31 runs: <number of times to run> - the pool is remade between runs
32 ec_pool: use an ec pool
33 erasure_code_profile: profile to use with the erasure coded pool
34 fast_read: enable ec_pool's fast_read
35 min_size: set the min_size of created pool
36 pool_snaps: use pool snapshots instead of selfmanaged snapshots
37 write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
38 This mean data don't access in the near future.
39 Let osd backend don't keep data in cache.
40
41 For example::
42
43 tasks:
44 - ceph:
45 - rados:
46 clients: [client.0]
47 ops: 1000
48 max_seconds: 0 # 0 for no limit
49 objects: 25
50 max_in_flight: 16
51 object_size: 4000000
52 min_stride_size: 1024
53 max_stride_size: 4096
54 op_weights:
55 read: 20
56 write: 10
57 delete: 2
58 snap_create: 3
59 rollback: 2
60 snap_remove: 0
61 ec_pool: create an ec pool, defaults to False
62 erasure_code_use_overwrites: test overwrites, default false
63 erasure_code_profile:
64 name: teuthologyprofile
65 k: 2
66 m: 1
224ce89b 67 crush-failure-domain: osd
7c673cae
FG
68 pool_snaps: true
69 write_fadvise_dontneed: true
70 runs: 10
71 - interactive:
72
73 Optionally, you can provide the pool name to run against:
74
75 tasks:
76 - ceph:
77 - exec:
78 client.0:
79 - ceph osd pool create foo
80 - rados:
81 clients: [client.0]
82 pools: [foo]
83 ...
84
85 Alternatively, you can provide a pool prefix:
86
87 tasks:
88 - ceph:
89 - exec:
90 client.0:
91 - ceph osd pool create foo.client.0
92 - rados:
93 clients: [client.0]
94 pool_prefix: foo
95 ...
96
97 The tests are run asynchronously, they are not complete when the task
98 returns. For instance:
99
100 - rados:
101 clients: [client.0]
102 pools: [ecbase]
103 ops: 4000
104 objects: 500
105 op_weights:
106 read: 100
107 write: 100
108 delete: 50
109 copy_from: 50
110 - print: "**** done rados ec-cache-agent (part 2)"
111
112 will run the print task immediately after the rados tasks begins but
113 not after it completes. To make the rados task a blocking / sequential
114 task, use:
115
116 - sequential:
117 - rados:
118 clients: [client.0]
119 pools: [ecbase]
120 ops: 4000
121 objects: 500
122 op_weights:
123 read: 100
124 write: 100
125 delete: 50
126 copy_from: 50
127 - print: "**** done rados ec-cache-agent (part 2)"
128
129 """
130 log.info('Beginning rados...')
131 assert isinstance(config, dict), \
132 "please list clients to run on"
133
134 object_size = int(config.get('object_size', 4000000))
135 op_weights = config.get('op_weights', {})
136 testdir = teuthology.get_testdir(ctx)
137 args = [
138 'adjust-ulimits',
139 'ceph-coverage',
140 '{tdir}/archive/coverage'.format(tdir=testdir),
141 'ceph_test_rados']
142 if config.get('ec_pool', False):
143 args.extend(['--no-omap'])
144 if not config.get('erasure_code_use_overwrites', False):
145 args.extend(['--ec-pool'])
146 if config.get('write_fadvise_dontneed', False):
147 args.extend(['--write-fadvise-dontneed'])
31f18b77
FG
148 if config.get('set_redirect', False):
149 args.extend(['--set_redirect'])
11fdf7f2
TL
150 if config.get('set_chunk', False):
151 args.extend(['--set_chunk'])
9f95a23c
TL
152 if config.get('enable_dedup', False):
153 args.extend(['--enable_dedup'])
11fdf7f2
TL
154 if config.get('low_tier_pool', None):
155 args.extend(['--low_tier_pool', config.get('low_tier_pool', None)])
7c673cae
FG
156 if config.get('pool_snaps', False):
157 args.extend(['--pool-snaps'])
9f95a23c
TL
158 if config.get('balance_reads', False):
159 args.extend(['--balance-reads'])
160 if config.get('localize_reads', False):
161 args.extend(['--localize-reads'])
7c673cae
FG
162 args.extend([
163 '--max-ops', str(config.get('ops', 10000)),
164 '--objects', str(config.get('objects', 500)),
165 '--max-in-flight', str(config.get('max_in_flight', 16)),
166 '--size', str(object_size),
e306af50
TL
167 '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
168 '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
7c673cae
FG
169 '--max-seconds', str(config.get('max_seconds', 0))
170 ])
171
172 weights = {}
173 weights['read'] = 100
174 weights['write'] = 100
175 weights['delete'] = 10
176 # Parallel of the op_types in test/osd/TestRados.cc
177 for field in [
178 # read handled above
179 # write handled above
180 # delete handled above
181 "snap_create",
182 "snap_remove",
183 "rollback",
184 "setattr",
185 "rmattr",
186 "watch",
187 "copy_from",
188 "hit_set_list",
189 "is_dirty",
190 "undirty",
191 "cache_flush",
192 "cache_try_flush",
193 "cache_evict",
194 "append",
195 "write",
196 "read",
197 "delete"
198 ]:
199 if field in op_weights:
200 weights[field] = op_weights[field]
201
202 if config.get('write_append_excl', True):
203 if 'write' in weights:
e306af50 204 weights['write'] = weights['write'] // 2
7c673cae
FG
205 weights['write_excl'] = weights['write']
206
207 if 'append' in weights:
e306af50 208 weights['append'] = weights['append'] // 2
7c673cae
FG
209 weights['append_excl'] = weights['append']
210
9f95a23c 211 for op, weight in weights.items():
7c673cae
FG
212 args.extend([
213 '--op', op, str(weight)
214 ])
215
216
217 def thread():
218 """Thread spawned by gevent"""
219 clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
220 log.info('clients are %s' % clients)
221 manager = ctx.managers['ceph']
222 if config.get('ec_pool', False):
223 profile = config.get('erasure_code_profile', {})
224 profile_name = profile.get('name', 'teuthologyprofile')
225 manager.create_erasure_code_profile(profile_name, profile)
226 else:
227 profile_name = None
228 for i in range(int(config.get('runs', '1'))):
229 log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
230 tests = {}
231 existing_pools = config.get('pools', [])
232 created_pools = []
233 for role in config.get('clients', clients):
9f95a23c 234 assert isinstance(role, six.string_types)
7c673cae
FG
235 PREFIX = 'client.'
236 assert role.startswith(PREFIX)
237 id_ = role[len(PREFIX):]
238
239 pool = config.get('pool', None)
240 if not pool and existing_pools:
241 pool = existing_pools.pop()
242 else:
243 pool = manager.create_pool_with_unique_name(
244 erasure_code_profile_name=profile_name,
245 erasure_code_use_overwrites=
246 config.get('erasure_code_use_overwrites', False)
247 )
248 created_pools.append(pool)
249 if config.get('fast_read', False):
250 manager.raw_cluster_cmd(
251 'osd', 'pool', 'set', pool, 'fast_read', 'true')
252 min_size = config.get('min_size', None);
253 if min_size is not None:
254 manager.raw_cluster_cmd(
255 'osd', 'pool', 'set', pool, 'min_size', str(min_size))
256
9f95a23c 257 (remote,) = ctx.cluster.only(role).remotes.keys()
7c673cae
FG
258 proc = remote.run(
259 args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
260 ["--pool", pool],
261 logger=log.getChild("rados.{id}".format(id=id_)),
262 stdin=run.PIPE,
263 wait=False
264 )
265 tests[id_] = proc
9f95a23c 266 run.wait(tests.values())
7c673cae
FG
267
268 for pool in created_pools:
269 manager.wait_snap_trimming_complete(pool);
270 manager.remove_pool(pool)
271
272 running = gevent.spawn(thread)
273
274 try:
275 yield
276 finally:
277 log.info('joining rados')
278 running.get()