]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/backing-dev.c
writeback: move backing_dev_info->bdi_stat[] into bdi_writeback
[mirror_ubuntu-artful-kernel.git] / mm / backing-dev.c
index 6dc4580df2af040b10bc10a5f9c423becc3ff47e..7b1d1917b6583ebf7ed65b4fd4e530162259f2f7 100644 (file)
@@ -84,19 +84,19 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
                   "b_dirty_time:       %10lu\n"
                   "bdi_list:           %10u\n"
                   "state:              %10lx\n",
-                  (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
-                  (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
+                  (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
+                  (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
                   K(bdi_thresh),
                   K(dirty_thresh),
                   K(background_thresh),
-                  (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
-                  (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
+                  (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
+                  (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
                   (unsigned long) K(bdi->write_bandwidth),
                   nr_dirty,
                   nr_io,
                   nr_more_io,
                   nr_dirty_time,
-                  !list_empty(&bdi->bdi_list), bdi->state);
+                  !list_empty(&bdi->bdi_list), bdi->wb.state);
 #undef K
 
        return 0;
@@ -280,7 +280,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
 
        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
        spin_lock_bh(&bdi->wb_lock);
-       if (test_bit(BDI_registered, &bdi->state))
+       if (test_bit(WB_registered, &bdi->wb.state))
                queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
        spin_unlock_bh(&bdi->wb_lock);
 }
@@ -315,7 +315,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
        bdi->dev = dev;
 
        bdi_debug_register(bdi, dev_name(dev));
-       set_bit(BDI_registered, &bdi->state);
+       set_bit(WB_registered, &bdi->wb.state);
 
        spin_lock_bh(&bdi_lock);
        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
@@ -339,7 +339,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
 {
        /* Make sure nobody queues further work */
        spin_lock_bh(&bdi->wb_lock);
-       if (!test_and_clear_bit(BDI_registered, &bdi->state)) {
+       if (!test_and_clear_bit(WB_registered, &bdi->wb.state)) {
                spin_unlock_bh(&bdi->wb_lock);
                return;
        }
@@ -376,8 +376,10 @@ void bdi_unregister(struct backing_dev_info *bdi)
 }
 EXPORT_SYMBOL(bdi_unregister);
 
-static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
+static int bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 {
+       int i, err;
+
        memset(wb, 0, sizeof(*wb));
 
        wb->bdi = bdi;
@@ -388,6 +390,27 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
        INIT_LIST_HEAD(&wb->b_dirty_time);
        spin_lock_init(&wb->list_lock);
        INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
+
+       for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
+               err = percpu_counter_init(&wb->stat[i], 0, GFP_KERNEL);
+               if (err) {
+                       while (--i)
+                               percpu_counter_destroy(&wb->stat[i]);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void bdi_wb_exit(struct bdi_writeback *wb)
+{
+       int i;
+
+       WARN_ON(delayed_work_pending(&wb->dwork));
+
+       for (i = 0; i < NR_WB_STAT_ITEMS; i++)
+               percpu_counter_destroy(&wb->stat[i]);
 }
 
 /*
@@ -397,7 +420,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 
 int bdi_init(struct backing_dev_info *bdi)
 {
-       int i, err;
+       int err;
 
        bdi->dev = NULL;
 
@@ -408,13 +431,9 @@ int bdi_init(struct backing_dev_info *bdi)
        INIT_LIST_HEAD(&bdi->bdi_list);
        INIT_LIST_HEAD(&bdi->work_list);
 
-       bdi_wb_init(&bdi->wb, bdi);
-
-       for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
-               err = percpu_counter_init(&bdi->bdi_stat[i], 0, GFP_KERNEL);
-               if (err)
-                       goto err;
-       }
+       err = bdi_wb_init(&bdi->wb, bdi);
+       if (err)
+               return err;
 
        bdi->dirty_exceeded = 0;
 
@@ -427,25 +446,20 @@ int bdi_init(struct backing_dev_info *bdi)
        bdi->avg_write_bandwidth = INIT_BW;
 
        err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL);
-
        if (err) {
-err:
-               while (i--)
-                       percpu_counter_destroy(&bdi->bdi_stat[i]);
+               bdi_wb_exit(&bdi->wb);
+               return err;
        }
 
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL(bdi_init);
 
 void bdi_destroy(struct backing_dev_info *bdi)
 {
-       int i;
-
        bdi_wb_shutdown(bdi);
 
        WARN_ON(!list_empty(&bdi->work_list));
-       WARN_ON(delayed_work_pending(&bdi->wb.dwork));
 
        if (bdi->dev) {
                bdi_debug_unregister(bdi);
@@ -453,8 +467,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
                bdi->dev = NULL;
        }
 
-       for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
-               percpu_counter_destroy(&bdi->bdi_stat[i]);
+       bdi_wb_exit(&bdi->wb);
+
        fprop_local_destroy_percpu(&bdi->completions);
 }
 EXPORT_SYMBOL(bdi_destroy);
@@ -492,11 +506,11 @@ static atomic_t nr_bdi_congested[2];
 
 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
-       enum bdi_state bit;
+       enum wb_state bit;
        wait_queue_head_t *wqh = &congestion_wqh[sync];
 
-       bit = sync ? BDI_sync_congested : BDI_async_congested;
-       if (test_and_clear_bit(bit, &bdi->state))
+       bit = sync ? WB_sync_congested : WB_async_congested;
+       if (test_and_clear_bit(bit, &bdi->wb.state))
                atomic_dec(&nr_bdi_congested[sync]);
        smp_mb__after_atomic();
        if (waitqueue_active(wqh))
@@ -506,10 +520,10 @@ EXPORT_SYMBOL(clear_bdi_congested);
 
 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
-       enum bdi_state bit;
+       enum wb_state bit;
 
-       bit = sync ? BDI_sync_congested : BDI_async_congested;
-       if (!test_and_set_bit(bit, &bdi->state))
+       bit = sync ? WB_sync_congested : WB_async_congested;
+       if (!test_and_set_bit(bit, &bdi->wb.state))
                atomic_inc(&nr_bdi_congested[sync]);
 }
 EXPORT_SYMBOL(set_bdi_congested);