]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bcache: finish incremental GC
authorTang Junhui <tang.junhui@zte.com.cn>
Thu, 26 Jul 2018 04:17:34 +0000 (12:17 +0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 27 Jul 2018 15:15:46 +0000 (09:15 -0600)
In GC thread, we record the latest GC key in gc_done, which is expected
to be used for incremental GC, but in currently code, we didn't realize
it. When GC runs, front side IO would be blocked until the GC over, it
would be a long time if there is a lot of btree nodes.

This patch realizes incremental GC, the main ideal is that, when there
are front side I/Os, after GC some nodes (100), we stop GC, release locker
of the btree node, and go to process the front side I/Os for some times
(100 ms), then go back to GC again.

By this patch, when we doing GC, I/Os are not blocked all the time, and
there is no obvious I/Os zero jump problem any more.

Patch v2: Rename some variables and macros name as Coly suggested.

Signed-off-by: Tang Junhui <tang.junhui@zte.com.cn>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/request.c

index 3226d38bf8594e385002bdea5fee1896ea6acc9d..872ef4d677115f60c7ba6cf8608e5d5b981bc006 100644 (file)
@@ -474,6 +474,7 @@ struct cache {
 
 struct gc_stat {
        size_t                  nodes;
+       size_t                  nodes_pre;
        size_t                  key_bytes;
 
        size_t                  nkeys;
@@ -603,6 +604,10 @@ struct cache_set {
         * rescale; when it hits 0 we rescale all the bucket priorities.
         */
        atomic_t                rescale;
+       /*
+        * used for GC, identify if any front side I/Os is inflight
+        */
+       atomic_t                search_inflight;
        /*
         * When we invalidate buckets, we use both the priority and the amount
         * of good data to determine which buckets to reuse first - to weight
index 547c9eedc2f4fa3e90cde0149e4be4b48fd93fbf..b4407ba12667d2fce91063e0a86d5be686bce310 100644 (file)
@@ -90,6 +90,8 @@
 
 #define MAX_NEED_GC            64
 #define MAX_SAVE_PRIO          72
+#define MIN_GC_NODES           100
+#define GC_SLEEP_MS            100
 
 #define PTR_DIRTY_BIT          (((uint64_t) 1 << 36))
 
@@ -1585,6 +1587,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
                memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
                r->b = NULL;
 
+               if (atomic_read(&b->c->search_inflight) &&
+                   gc->nodes >= gc->nodes_pre + MIN_GC_NODES) {
+                       gc->nodes_pre =  gc->nodes;
+                       ret = -EAGAIN;
+                       break;
+               }
+
                if (need_resched()) {
                        ret = -EAGAIN;
                        break;
@@ -1753,7 +1762,10 @@ static void bch_btree_gc(struct cache_set *c)
                closure_sync(&writes);
                cond_resched();
 
-               if (ret && ret != -EAGAIN)
+               if (ret == -EAGAIN)
+                       schedule_timeout_interruptible(msecs_to_jiffies
+                                                      (GC_SLEEP_MS));
+               else if (ret)
                        pr_warn("gc failed!");
        } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
 
index 97707b0c54ce05b65f9c3e6ae26111fe4788915c..43af905920f54531b4df0ee7e836c3a3ab64f2f6 100644 (file)
@@ -701,6 +701,8 @@ static void search_free(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
 
+       atomic_dec(&s->d->c->search_inflight);
+
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
@@ -718,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio,
 
        closure_init(&s->cl, NULL);
        do_bio_hook(s, bio, request_endio);
+       atomic_inc(&d->c->search_inflight);
 
        s->orig_bio             = bio;
        s->cache_miss           = NULL;