]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/rados.py
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / qa / tasks / rados.py
CommitLineData
7c673cae
FG
1"""
2Rados modle-based integration tests
3"""
4import contextlib
5import logging
6import gevent
7from teuthology import misc as teuthology
8
9f95a23c 9
7c673cae
FG
10from teuthology.orchestra import run
11
12log = logging.getLogger(__name__)
13
14@contextlib.contextmanager
15def task(ctx, config):
16 """
17 Run RadosModel-based integration tests.
18
19 The config should be as follows::
20
21 rados:
22 clients: [client list]
23 ops: <number of ops>
24 objects: <number of objects to use>
25 max_in_flight: <max number of operations in flight>
26 object_size: <size of objects in bytes>
27 min_stride_size: <minimum write stride size in bytes>
28 max_stride_size: <maximum write stride size in bytes>
29 op_weights: <dictionary mapping operation type to integer weight>
30 runs: <number of times to run> - the pool is remade between runs
31 ec_pool: use an ec pool
32 erasure_code_profile: profile to use with the erasure coded pool
33 fast_read: enable ec_pool's fast_read
34 min_size: set the min_size of created pool
35 pool_snaps: use pool snapshots instead of selfmanaged snapshots
36 write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
37 This mean data don't access in the near future.
38 Let osd backend don't keep data in cache.
39
40 For example::
41
42 tasks:
43 - ceph:
44 - rados:
45 clients: [client.0]
46 ops: 1000
47 max_seconds: 0 # 0 for no limit
48 objects: 25
49 max_in_flight: 16
50 object_size: 4000000
51 min_stride_size: 1024
52 max_stride_size: 4096
53 op_weights:
54 read: 20
55 write: 10
56 delete: 2
57 snap_create: 3
58 rollback: 2
59 snap_remove: 0
60 ec_pool: create an ec pool, defaults to False
61 erasure_code_use_overwrites: test overwrites, default false
62 erasure_code_profile:
63 name: teuthologyprofile
64 k: 2
65 m: 1
224ce89b 66 crush-failure-domain: osd
7c673cae
FG
67 pool_snaps: true
68 write_fadvise_dontneed: true
69 runs: 10
70 - interactive:
71
72 Optionally, you can provide the pool name to run against:
73
74 tasks:
75 - ceph:
76 - exec:
77 client.0:
78 - ceph osd pool create foo
79 - rados:
80 clients: [client.0]
81 pools: [foo]
82 ...
83
84 Alternatively, you can provide a pool prefix:
85
86 tasks:
87 - ceph:
88 - exec:
89 client.0:
90 - ceph osd pool create foo.client.0
91 - rados:
92 clients: [client.0]
93 pool_prefix: foo
94 ...
95
96 The tests are run asynchronously, they are not complete when the task
97 returns. For instance:
98
99 - rados:
100 clients: [client.0]
101 pools: [ecbase]
102 ops: 4000
103 objects: 500
104 op_weights:
105 read: 100
106 write: 100
107 delete: 50
108 copy_from: 50
109 - print: "**** done rados ec-cache-agent (part 2)"
110
111 will run the print task immediately after the rados tasks begins but
112 not after it completes. To make the rados task a blocking / sequential
113 task, use:
114
115 - sequential:
116 - rados:
117 clients: [client.0]
118 pools: [ecbase]
119 ops: 4000
120 objects: 500
121 op_weights:
122 read: 100
123 write: 100
124 delete: 50
125 copy_from: 50
126 - print: "**** done rados ec-cache-agent (part 2)"
127
128 """
129 log.info('Beginning rados...')
130 assert isinstance(config, dict), \
131 "please list clients to run on"
132
133 object_size = int(config.get('object_size', 4000000))
134 op_weights = config.get('op_weights', {})
135 testdir = teuthology.get_testdir(ctx)
136 args = [
137 'adjust-ulimits',
138 'ceph-coverage',
139 '{tdir}/archive/coverage'.format(tdir=testdir),
140 'ceph_test_rados']
141 if config.get('ec_pool', False):
142 args.extend(['--no-omap'])
143 if not config.get('erasure_code_use_overwrites', False):
144 args.extend(['--ec-pool'])
145 if config.get('write_fadvise_dontneed', False):
146 args.extend(['--write-fadvise-dontneed'])
31f18b77
FG
147 if config.get('set_redirect', False):
148 args.extend(['--set_redirect'])
11fdf7f2
TL
149 if config.get('set_chunk', False):
150 args.extend(['--set_chunk'])
9f95a23c
TL
151 if config.get('enable_dedup', False):
152 args.extend(['--enable_dedup'])
11fdf7f2
TL
153 if config.get('low_tier_pool', None):
154 args.extend(['--low_tier_pool', config.get('low_tier_pool', None)])
7c673cae
FG
155 if config.get('pool_snaps', False):
156 args.extend(['--pool-snaps'])
9f95a23c
TL
157 if config.get('balance_reads', False):
158 args.extend(['--balance-reads'])
159 if config.get('localize_reads', False):
160 args.extend(['--localize-reads'])
7c673cae
FG
161 args.extend([
162 '--max-ops', str(config.get('ops', 10000)),
163 '--objects', str(config.get('objects', 500)),
164 '--max-in-flight', str(config.get('max_in_flight', 16)),
165 '--size', str(object_size),
e306af50
TL
166 '--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
167 '--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
7c673cae
FG
168 '--max-seconds', str(config.get('max_seconds', 0))
169 ])
170
171 weights = {}
172 weights['read'] = 100
173 weights['write'] = 100
174 weights['delete'] = 10
175 # Parallel of the op_types in test/osd/TestRados.cc
176 for field in [
177 # read handled above
178 # write handled above
179 # delete handled above
180 "snap_create",
181 "snap_remove",
182 "rollback",
183 "setattr",
184 "rmattr",
185 "watch",
186 "copy_from",
187 "hit_set_list",
188 "is_dirty",
189 "undirty",
190 "cache_flush",
191 "cache_try_flush",
192 "cache_evict",
193 "append",
194 "write",
195 "read",
196 "delete"
197 ]:
198 if field in op_weights:
199 weights[field] = op_weights[field]
200
201 if config.get('write_append_excl', True):
202 if 'write' in weights:
e306af50 203 weights['write'] = weights['write'] // 2
7c673cae
FG
204 weights['write_excl'] = weights['write']
205
206 if 'append' in weights:
e306af50 207 weights['append'] = weights['append'] // 2
7c673cae
FG
208 weights['append_excl'] = weights['append']
209
9f95a23c 210 for op, weight in weights.items():
7c673cae
FG
211 args.extend([
212 '--op', op, str(weight)
213 ])
214
215
216 def thread():
217 """Thread spawned by gevent"""
218 clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
219 log.info('clients are %s' % clients)
220 manager = ctx.managers['ceph']
221 if config.get('ec_pool', False):
222 profile = config.get('erasure_code_profile', {})
223 profile_name = profile.get('name', 'teuthologyprofile')
224 manager.create_erasure_code_profile(profile_name, profile)
225 else:
226 profile_name = None
227 for i in range(int(config.get('runs', '1'))):
228 log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
229 tests = {}
230 existing_pools = config.get('pools', [])
231 created_pools = []
232 for role in config.get('clients', clients):
f67539c2 233 assert isinstance(role, str)
7c673cae
FG
234 PREFIX = 'client.'
235 assert role.startswith(PREFIX)
236 id_ = role[len(PREFIX):]
237
238 pool = config.get('pool', None)
239 if not pool and existing_pools:
240 pool = existing_pools.pop()
241 else:
242 pool = manager.create_pool_with_unique_name(
243 erasure_code_profile_name=profile_name,
244 erasure_code_use_overwrites=
245 config.get('erasure_code_use_overwrites', False)
246 )
247 created_pools.append(pool)
248 if config.get('fast_read', False):
249 manager.raw_cluster_cmd(
250 'osd', 'pool', 'set', pool, 'fast_read', 'true')
251 min_size = config.get('min_size', None);
252 if min_size is not None:
253 manager.raw_cluster_cmd(
254 'osd', 'pool', 'set', pool, 'min_size', str(min_size))
255
9f95a23c 256 (remote,) = ctx.cluster.only(role).remotes.keys()
7c673cae
FG
257 proc = remote.run(
258 args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
259 ["--pool", pool],
260 logger=log.getChild("rados.{id}".format(id=id_)),
261 stdin=run.PIPE,
262 wait=False
263 )
264 tests[id_] = proc
9f95a23c 265 run.wait(tests.values())
7c673cae
FG
266
267 for pool in created_pools:
268 manager.wait_snap_trimming_complete(pool);
269 manager.remove_pool(pool)
270
271 running = gevent.spawn(thread)
272
273 try:
274 yield
275 finally:
276 log.info('joining rados')
277 running.get()