]> git.proxmox.com Git - mirror_zfs.git/commitdiff
OpenZFS 8199 - multi-threaded dmu_object_alloc()
authorMatthew Ahrens <mahrens@delphix.com>
Fri, 13 May 2016 04:16:36 +0000 (21:16 -0700)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 9 Jun 2017 16:43:26 +0000 (09:43 -0700)
dmu_object_alloc() is single-threaded, so when multiple threads are
creating files in a single filesystem, they spend a lot of time waiting
for the os_obj_lock.  To improve performance of multi-threaded file
creation, we must make dmu_object_alloc() typically not grab any
filesystem-wide locks.

The solution is to have a "next object to allocate" for each CPU. Each
of these "next object"s is in a different block of the dnode object, so
that concurrent allocation holds dnodes in different dbufs.  When a
thread's "next object" reaches the end of a chunk of objects (by default
4 blocks worth -- 128 dnodes), it will be reset to the per-objset
os_obj_next, which will be increased by a chunk of objects (128).  Only
when manipulating the os_obj_next will we need to grab the os_obj_lock.
This decreases lock contention dramatically, because each thread only
needs to grab the os_obj_lock briefly, once per 128 allocations.

This results in a 70% performance improvement to multi-threaded object
creation (where each thread is creating objects in its own directory),
from 67,000/sec to 115,000/sec, with 8 CPUs.

Work sponsored by Intel Corp.

Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Ported-by: Matthew Ahrens <mahrens@delphix.com>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
OpenZFS-issue: https://www.illumos.org/issues/8199
OpenZFS-commit: https://github.com/openzfs/openzfs/pull/374
Closes #4703
Closes #6117

include/sys/dmu_objset.h
module/zfs/dmu_object.c
module/zfs/dmu_objset.c
module/zfs/zfs_znode.c

index 636b0e2493367fdc8ae59c24b96c6c096453e338..a836e03722c32fa9feffbce170cb2d73b3be9b11 100644 (file)
@@ -120,7 +120,11 @@ struct objset {
 
        /* Protected by os_obj_lock */
        kmutex_t os_obj_lock;
-       uint64_t os_obj_next;
+       uint64_t os_obj_next_chunk;
+
+       /* Per-CPU next object to allocate, protected by atomic ops. */
+       uint64_t *os_obj_next_percpu;
+       int os_obj_next_percpu_len;
 
        /* Protected by os_lock */
        kmutex_t os_lock;
index 8ca699ebff4a1b7383e7c3b8b190042c3d122622..cb861a196558aeba102c6d4d2b23a3f98944c16c 100644 (file)
 #include <sys/zfeature.h>
 #include <sys/dsl_dataset.h>
 
+/*
+ * Each of the concurrent object allocators will grab
+ * 2^dmu_object_alloc_chunk_shift dnode slots at a time.  The default is to
+ * grab 128 slots, which is 4 blocks worth.  This was experimentally
+ * determined to be the lowest value that eliminates the measurable effect
+ * of lock contention from this code path.
+ */
+int dmu_object_alloc_chunk_shift = 7;
+
 uint64_t
 dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
@@ -50,6 +59,9 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
        dnode_t *dn = NULL;
        int dn_slots = dnodesize >> DNODE_SHIFT;
        boolean_t restarted = B_FALSE;
+       uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
+           os->os_obj_next_percpu_len];
+       int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
 
        if (dn_slots == 0) {
                dn_slots = DNODE_MIN_SLOTS;
@@ -58,54 +70,88 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
                ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
        }
 
-       mutex_enter(&os->os_obj_lock);
+       /*
+        * The "chunk" of dnodes that is assigned to a CPU-specific
+        * allocator needs to be at least one block's worth, to avoid
+        * lock contention on the dbuf.  It can be at most one L1 block's
+        * worth, so that the "rescan after polishing off a L1's worth"
+        * logic below will be sure to kick in.
+        */
+       if (dnodes_per_chunk < DNODES_PER_BLOCK)
+               dnodes_per_chunk = DNODES_PER_BLOCK;
+       if (dnodes_per_chunk > L1_dnode_count)
+               dnodes_per_chunk = L1_dnode_count;
+
+       object = *cpuobj;
        for (;;) {
-               object = os->os_obj_next;
                /*
-                * Each time we polish off a L1 bp worth of dnodes (2^12
-                * objects), move to another L1 bp that's still
-                * reasonably sparse (at most 1/4 full). Look from the
-                * beginning at most once per txg. If we still can't
-                * allocate from that L1 block, search for an empty L0
-                * block, which will quickly skip to the end of the
-                * metadnode if the no nearby L0 blocks are empty. This
-                * fallback avoids a pathology where full dnode blocks
-                * containing large dnodes appear sparse because they
-                * have a low blk_fill, leading to many failed
-                * allocation attempts. In the long term a better
-                * mechanism to search for sparse metadnode regions,
-                * such as spacemaps, could be implemented.
-                *
-                * os_scan_dnodes is set during txg sync if enough objects
-                * have been freed since the previous rescan to justify
-                * backfilling again.
-                *
-                * Note that dmu_traverse depends on the behavior that we use
-                * multiple blocks of the dnode object before going back to
-                * reuse objects.  Any change to this algorithm should preserve
-                * that property or find another solution to the issues
-                * described in traverse_visitbp.
+                * If we finished a chunk of dnodes, get a new one from
+                * the global allocator.
                 */
-               if (P2PHASE(object, L1_dnode_count) == 0) {
-                       uint64_t offset;
-                       uint64_t blkfill;
-                       int minlvl;
-                       int error;
-                       if (os->os_rescan_dnodes) {
-                               offset = 0;
-                               os->os_rescan_dnodes = B_FALSE;
-                       } else {
-                               offset = object << DNODE_SHIFT;
+               if (P2PHASE(object, dnodes_per_chunk) == 0) {
+                       mutex_enter(&os->os_obj_lock);
+                       ASSERT0(P2PHASE(os->os_obj_next_chunk,
+                           dnodes_per_chunk));
+                       object = os->os_obj_next_chunk;
+
+                       /*
+                        * Each time we polish off a L1 bp worth of dnodes
+                        * (2^12 objects), move to another L1 bp that's
+                        * still reasonably sparse (at most 1/4 full). Look
+                        * from the beginning at most once per txg. If we
+                        * still can't allocate from that L1 block, search
+                        * for an empty L0 block, which will quickly skip
+                        * to the end of the metadnode if no nearby L0
+                        * blocks are empty. This fallback avoids a
+                        * pathology where full dnode blocks containing
+                        * large dnodes appear sparse because they have a
+                        * low blk_fill, leading to many failed allocation
+                        * attempts. In the long term a better mechanism to
+                        * search for sparse metadnode regions, such as
+                        * spacemaps, could be implemented.
+                        *
+                        * os_scan_dnodes is set during txg sync if enough
+                        * objects have been freed since the previous
+                        * rescan to justify backfilling again.
+                        *
+                        * Note that dmu_traverse depends on the behavior
+                        * that we use multiple blocks of the dnode object
+                        * before going back to reuse objects.  Any change
+                        * to this algorithm should preserve that property
+                        * or find another solution to the issues described
+                        * in traverse_visitbp.
+                        */
+                       if (P2PHASE(object, L1_dnode_count) == 0) {
+                               uint64_t offset;
+                               uint64_t blkfill;
+                               int minlvl;
+                               int error;
+                               if (os->os_rescan_dnodes) {
+                                       offset = 0;
+                                       os->os_rescan_dnodes = B_FALSE;
+                               } else {
+                                       offset = object << DNODE_SHIFT;
+                               }
+                               blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
+                               minlvl = restarted ? 1 : 2;
+                               restarted = B_TRUE;
+                               error = dnode_next_offset(DMU_META_DNODE(os),
+                                   DNODE_FIND_HOLE, &offset, minlvl,
+                                   blkfill, 0);
+                               if (error == 0) {
+                                       object = offset >> DNODE_SHIFT;
+                               }
                        }
-                       blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
-                       minlvl = restarted ? 1 : 2;
-                       restarted = B_TRUE;
-                       error = dnode_next_offset(DMU_META_DNODE(os),
-                           DNODE_FIND_HOLE, &offset, minlvl, blkfill, 0);
-                       if (error == 0)
-                               object = offset >> DNODE_SHIFT;
+                       /*
+                        * Note: if "restarted", we may find a L0 that
+                        * is not suitably aligned.
+                        */
+                       os->os_obj_next_chunk =
+                           P2ALIGN(object, dnodes_per_chunk) +
+                           dnodes_per_chunk;
+                       (void) atomic_swap_64(cpuobj, object);
+                       mutex_exit(&os->os_obj_lock);
                }
-               os->os_obj_next = object + dn_slots;
 
                /*
                 * XXX We should check for an i/o error here and return
@@ -113,28 +159,38 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
                 * dmu_tx_assign(), but there is currently no mechanism
                 * to do so.
                 */
-               (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
-                   FTAG, &dn);
-               if (dn)
-                       break;
-
-               if (dmu_object_next(os, &object, B_TRUE, 0) == 0)
-                       os->os_obj_next = object;
-               else
+               (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
+                   dn_slots, FTAG, &dn);
+               if (dn != NULL) {
+                       rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
                        /*
-                        * Skip to next known valid starting point for a dnode.
+                        * Another thread could have allocated it; check
+                        * again now that we have the struct lock.
                         */
-                       os->os_obj_next = P2ROUNDUP(object + 1,
-                           DNODES_PER_BLOCK);
-       }
-
-       dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, dn_slots, tx);
-       mutex_exit(&os->os_obj_lock);
-
-       dmu_tx_add_new_object(tx, dn);
-       dnode_rele(dn, FTAG);
+                       if (dn->dn_type == DMU_OT_NONE) {
+                               dnode_allocate(dn, ot, blocksize, 0,
+                                   bonustype, bonuslen, dn_slots, tx);
+                               rw_exit(&dn->dn_struct_rwlock);
+                               dmu_tx_add_new_object(tx, dn);
+                               dnode_rele(dn, FTAG);
+
+                               (void) atomic_swap_64(cpuobj,
+                                   object + dn_slots);
+                               return (object);
+                       }
+                       rw_exit(&dn->dn_struct_rwlock);
+                       dnode_rele(dn, FTAG);
+               }
 
-       return (object);
+               if (dmu_object_next(os, &object, B_TRUE, 0) != 0) {
+                       /*
+                        * Skip to next known valid starting point for a
+                        * dnode.
+                        */
+                       object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK);
+               }
+               (void) atomic_swap_64(cpuobj, object);
+       }
 }
 
 int
@@ -341,4 +397,10 @@ EXPORT_SYMBOL(dmu_object_free);
 EXPORT_SYMBOL(dmu_object_next);
 EXPORT_SYMBOL(dmu_object_zapify);
 EXPORT_SYMBOL(dmu_object_free_zapified);
+
+/* BEGIN CSTYLED */
+module_param(dmu_object_alloc_chunk_shift, int, 0644);
+MODULE_PARM_DESC(dmu_object_alloc_chunk_shift,
+       "CPU-specific allocator grabs 2^N objects at once");
+/* END CSTYLED */
 #endif
index a50f8dcb15bdf141f82757d7ca4de4613d69d45f..9a7a6968d6319163b39ab2e5ea2be6f023ae731c 100644 (file)
@@ -547,6 +547,9 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
        mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
        mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
+       os->os_obj_next_percpu_len = boot_ncpus;
+       os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
+           sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
 
        dnode_special_open(os, &os->os_phys->os_meta_dnode,
            DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
@@ -842,6 +845,9 @@ dmu_objset_evict_done(objset_t *os)
        rw_enter(&os_lock, RW_READER);
        rw_exit(&os_lock);
 
+       kmem_free(os->os_obj_next_percpu,
+           os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
+
        mutex_destroy(&os->os_lock);
        mutex_destroy(&os->os_userused_lock);
        mutex_destroy(&os->os_obj_lock);
index 52262166866b3d2d15eaca66b04c0bffbd991b44..1ec5618e08ab0b2644afd5eaf8c24903b7c756c9 100644 (file)
@@ -1779,14 +1779,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
            DMU_OT_NONE, 0, tx);
        ASSERT(error == 0);
 
-       /*
-        * Give dmu_object_alloc() a hint about where to start
-        * allocating new objects. Otherwise, since the metadnode's
-        * dnode_phys_t structure isn't initialized yet, dmu_object_next()
-        * would fail and we'd have to skip to the next dnode block.
-        */
-       os->os_obj_next = moid + 1;
-
        /*
         * Set starting attributes.
         */