]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/tasks/radosbench.py
import quincy beta 17.1.0
[ceph.git] / ceph / qa / tasks / radosbench.py
index dd1f85dee7af023ddd8ada364bf9d3d3bb28eeeb..3a5aee2e265b0cfd03dfab30c4d91809f4c9ac1d 100644 (file)
@@ -7,6 +7,7 @@ import logging
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
+
 log = logging.getLogger(__name__)
 
 @contextlib.contextmanager
@@ -21,6 +22,7 @@ def task(ctx, config):
         time: <seconds to run>
         pool: <pool to use>
         size: write size to use
+        concurrency: max number of outstanding writes (16)
         objectsize: object size to use
         unique_pool: use a unique pool, defaults to False
         ec_pool: create an ec pool, defaults to False
@@ -51,12 +53,13 @@ def task(ctx, config):
     runtype = config.get('type', 'write')
 
     create_pool = config.get('create_pool', True)
-    for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
-        PREFIX = 'client.'
-        assert role.startswith(PREFIX)
-        id_ = role[len(PREFIX):]
-        (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+    for role in config.get(
+            'clients',
+            list(map(lambda x: 'client.' + x,
+                     teuthology.all_roles_of_type(ctx.cluster, 'client')))):
+        assert isinstance(role, str)
+        (_, id_) = role.split('.', 1)
+        (remote,) = ctx.cluster.only(role).remotes.keys()
 
         if config.get('ec_pool', False):
             profile = config.get('erasure_code_profile', {})
@@ -68,6 +71,10 @@ def task(ctx, config):
         cleanup = []
         if not config.get('cleanup', True):
             cleanup = ['--no-cleanup']
+        write_to_omap = []
+        if config.get('write-omap', False):
+            write_to_omap = ['--write-omap']
+            log.info('omap writes')
 
         pool = config.get('pool', 'data')
         if create_pool:
@@ -76,11 +83,12 @@ def task(ctx, config):
             else:
                 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
 
+        concurrency = config.get('concurrency', 16)
         osize = config.get('objectsize', 65536)
-        if osize is 0:
+        if osize == 0:
             objectsize = []
         else:
-            objectsize = ['-o', str(osize)]
+            objectsize = ['--object-size', str(osize)]
         size = ['-b', str(config.get('size', 65536))]
         # If doing a reading run then populate data
         if runtype != "write":
@@ -92,7 +100,8 @@ def task(ctx, config):
                               '{tdir}/archive/coverage',
                               'rados',
                               '--no-log-to-stderr',
-                              '--name', role]
+                              '--name', role] +
+                              ['-t', str(concurrency)]
                               + size + objectsize +
                               ['-p' , pool,
                           'bench', str(60), "write", "--no-cleanup"
@@ -116,7 +125,7 @@ def task(ctx, config):
                           + size + objectsize +
                           ['-p' , pool,
                           'bench', str(config.get('time', 360)), runtype,
-                          ] + cleanup).format(tdir=testdir),
+                          ] + write_to_omap + cleanup).format(tdir=testdir),
                 ],
             logger=log.getChild('radosbench.{id}'.format(id=id_)),
             stdin=run.PIPE,
@@ -129,7 +138,7 @@ def task(ctx, config):
     finally:
         timeout = config.get('time', 360) * 30 + 300
         log.info('joining radosbench (timing out after %ss)', timeout)
-        run.wait(radosbench.itervalues(), timeout=timeout)
+        run.wait(radosbench.values(), timeout=timeout)
 
-        if pool is not 'data' and create_pool:
+        if pool != 'data' and create_pool:
             manager.remove_pool(pool)