7 from teuthology
.orchestra
import run
8 from teuthology
import misc
as teuthology
11 log
= logging
.getLogger(__name__
)
13 @contextlib.contextmanager
14 def task(ctx
, config
):
18 The config should be as follows:
21 clients: [client list]
22 time: <seconds to run>
24 size: write size to use
25 concurrency: max number of outstanding writes (16)
26 objectsize: object size to use
27 unique_pool: use a unique pool, defaults to False
28 ec_pool: create an ec pool, defaults to False
29 create_pool: create pool, defaults to True
31 name: teuthologyprofile
34 crush-failure-domain: osd
35 cleanup: false (defaults to true)
36 type: <write|seq|rand> (defaults to write)
46 log
.info('Beginning radosbench...')
47 assert isinstance(config
, dict), \
48 "please list clients to run on"
51 testdir
= teuthology
.get_testdir(ctx
)
52 manager
= ctx
.managers
['ceph']
53 runtype
= config
.get('type', 'write')
55 create_pool
= config
.get('create_pool', True)
56 for role
in config
.get(
58 list(map(lambda x
: 'client.' + x
,
59 teuthology
.all_roles_of_type(ctx
.cluster
, 'client')))):
60 assert isinstance(role
, str)
61 (_
, id_
) = role
.split('.', 1)
62 (remote
,) = ctx
.cluster
.only(role
).remotes
.keys()
64 if config
.get('ec_pool', False):
65 profile
= config
.get('erasure_code_profile', {})
66 profile_name
= profile
.get('name', 'teuthologyprofile')
67 manager
.create_erasure_code_profile(profile_name
, profile
)
72 if not config
.get('cleanup', True):
73 cleanup
= ['--no-cleanup']
75 if config
.get('write-omap', False):
76 write_to_omap
= ['--write-omap']
77 log
.info('omap writes')
79 pool
= config
.get('pool', 'data')
82 manager
.create_pool(pool
, erasure_code_profile_name
=profile_name
)
84 pool
= manager
.create_pool_with_unique_name(erasure_code_profile_name
=profile_name
)
86 concurrency
= config
.get('concurrency', 16)
87 osize
= config
.get('objectsize', 65536)
91 objectsize
= ['--object-size', str(osize
)]
92 size
= ['-b', str(config
.get('size', 65536))]
93 # If doing a reading run then populate data
94 if runtype
!= "write":
98 " ".join(['adjust-ulimits',
100 '{tdir}/archive/coverage',
102 '--no-log-to-stderr',
104 ['-t', str(concurrency
)]
105 + size
+ objectsize
+
107 'bench', str(60), "write", "--no-cleanup"
108 ]).format(tdir
=testdir
),
110 logger
=log
.getChild('radosbench.{id}'.format(id=id_
)),
119 " ".join(['adjust-ulimits',
121 '{tdir}/archive/coverage',
123 '--no-log-to-stderr',
125 + size
+ objectsize
+
127 'bench', str(config
.get('time', 360)), runtype
,
128 ] + write_to_omap
+ cleanup
).format(tdir
=testdir
),
130 logger
=log
.getChild('radosbench.{id}'.format(id=id_
)),
134 radosbench
[id_
] = proc
139 timeout
= config
.get('time', 360) * 30 + 300
140 log
.info('joining radosbench (timing out after %ss)', timeout
)
141 run
.wait(radosbench
.values(), timeout
=timeout
)
143 if pool
!= 'data' and create_pool
:
144 manager
.remove_pool(pool
)