1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/wait.h>
4 #include <linux/rbtree.h>
5 #include <linux/backing-dev.h>
6 #include <linux/kthread.h>
7 #include <linux/freezer.h>
9 #include <linux/pagemap.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched.h>
13 #include <linux/module.h>
14 #include <linux/writeback.h>
15 #include <linux/device.h>
16 #include <trace/events/writeback.h>
18 struct backing_dev_info noop_backing_dev_info
;
19 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
21 static struct class *bdi_class
;
22 static const char *bdi_unknown_name
= "(unknown)";
25 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
26 * reader side locking.
28 DEFINE_SPINLOCK(bdi_lock
);
29 static u64 bdi_id_cursor
;
30 static struct rb_root bdi_tree
= RB_ROOT
;
33 /* bdi_wq serves all asynchronous writeback tasks */
34 struct workqueue_struct
*bdi_wq
;
36 #define K(x) ((x) << (PAGE_SHIFT - 10))
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/debugfs.h>
40 #include <linux/seq_file.h>
42 static struct dentry
*bdi_debug_root
;
44 static void bdi_debug_init(void)
46 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
49 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
51 struct backing_dev_info
*bdi
= m
->private;
52 struct bdi_writeback
*wb
= &bdi
->wb
;
53 unsigned long background_thresh
;
54 unsigned long dirty_thresh
;
55 unsigned long wb_thresh
;
56 unsigned long nr_dirty
, nr_io
, nr_more_io
, nr_dirty_time
;
59 nr_dirty
= nr_io
= nr_more_io
= nr_dirty_time
= 0;
60 spin_lock(&wb
->list_lock
);
61 list_for_each_entry(inode
, &wb
->b_dirty
, i_io_list
)
63 list_for_each_entry(inode
, &wb
->b_io
, i_io_list
)
65 list_for_each_entry(inode
, &wb
->b_more_io
, i_io_list
)
67 list_for_each_entry(inode
, &wb
->b_dirty_time
, i_io_list
)
68 if (inode
->i_state
& I_DIRTY_TIME
)
70 spin_unlock(&wb
->list_lock
);
72 global_dirty_limits(&background_thresh
, &dirty_thresh
);
73 wb_thresh
= wb_calc_thresh(wb
, dirty_thresh
);
76 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
81 "BdiDirtied: %10lu kB\n"
82 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
87 "b_dirty_time: %10lu\n"
90 (unsigned long) K(wb_stat(wb
, WB_WRITEBACK
)),
91 (unsigned long) K(wb_stat(wb
, WB_RECLAIMABLE
)),
95 (unsigned long) K(wb_stat(wb
, WB_DIRTIED
)),
96 (unsigned long) K(wb_stat(wb
, WB_WRITTEN
)),
97 (unsigned long) K(wb
->write_bandwidth
),
102 !list_empty(&bdi
->bdi_list
), bdi
->wb
.state
);
106 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats
);
108 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
110 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
112 debugfs_create_file("stats", 0444, bdi
->debug_dir
, bdi
,
113 &bdi_debug_stats_fops
);
116 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
118 debugfs_remove_recursive(bdi
->debug_dir
);
121 static inline void bdi_debug_init(void)
124 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
128 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
133 static ssize_t
read_ahead_kb_store(struct device
*dev
,
134 struct device_attribute
*attr
,
135 const char *buf
, size_t count
)
137 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
138 unsigned long read_ahead_kb
;
141 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
145 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
150 #define BDI_SHOW(name, expr) \
151 static ssize_t name##_show(struct device *dev, \
152 struct device_attribute *attr, char *buf) \
154 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
156 return sysfs_emit(buf, "%lld\n", (long long)expr); \
158 static DEVICE_ATTR_RW(name);
160 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
162 static ssize_t
min_ratio_store(struct device
*dev
,
163 struct device_attribute
*attr
, const char *buf
, size_t count
)
165 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
169 ret
= kstrtouint(buf
, 10, &ratio
);
173 ret
= bdi_set_min_ratio(bdi
, ratio
);
179 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
181 static ssize_t
max_ratio_store(struct device
*dev
,
182 struct device_attribute
*attr
, const char *buf
, size_t count
)
184 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
188 ret
= kstrtouint(buf
, 10, &ratio
);
192 ret
= bdi_set_max_ratio(bdi
, ratio
);
198 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
200 static ssize_t
stable_pages_required_show(struct device
*dev
,
201 struct device_attribute
*attr
,
205 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
206 return sysfs_emit(buf
, "%d\n", 0);
208 static DEVICE_ATTR_RO(stable_pages_required
);
210 static struct attribute
*bdi_dev_attrs
[] = {
211 &dev_attr_read_ahead_kb
.attr
,
212 &dev_attr_min_ratio
.attr
,
213 &dev_attr_max_ratio
.attr
,
214 &dev_attr_stable_pages_required
.attr
,
217 ATTRIBUTE_GROUPS(bdi_dev
);
219 static __init
int bdi_class_init(void)
221 bdi_class
= class_create(THIS_MODULE
, "bdi");
222 if (IS_ERR(bdi_class
))
223 return PTR_ERR(bdi_class
);
225 bdi_class
->dev_groups
= bdi_dev_groups
;
230 postcore_initcall(bdi_class_init
);
232 static int bdi_init(struct backing_dev_info
*bdi
);
234 static int __init
default_bdi_init(void)
238 bdi_wq
= alloc_workqueue("writeback", WQ_MEM_RECLAIM
| WQ_UNBOUND
|
243 err
= bdi_init(&noop_backing_dev_info
);
247 subsys_initcall(default_bdi_init
);
250 * This function is used when the first inode for this wb is marked dirty. It
251 * wakes-up the corresponding bdi thread which should then take care of the
252 * periodic background write-out of dirty inodes. Since the write-out would
253 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
254 * set up a timer which wakes the bdi thread up later.
256 * Note, we wouldn't bother setting up the timer, but this function is on the
257 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
258 * by delaying the wake-up.
260 * We have to be careful not to postpone flush work if it is scheduled for
261 * earlier. Thus we use queue_delayed_work().
263 void wb_wakeup_delayed(struct bdi_writeback
*wb
)
265 unsigned long timeout
;
267 timeout
= msecs_to_jiffies(dirty_writeback_interval
* 10);
268 spin_lock_bh(&wb
->work_lock
);
269 if (test_bit(WB_registered
, &wb
->state
))
270 queue_delayed_work(bdi_wq
, &wb
->dwork
, timeout
);
271 spin_unlock_bh(&wb
->work_lock
);
275 * Initial write bandwidth: 100 MB/s
277 #define INIT_BW (100 << (20 - PAGE_SHIFT))
279 static int wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
,
284 memset(wb
, 0, sizeof(*wb
));
289 wb
->last_old_flush
= jiffies
;
290 INIT_LIST_HEAD(&wb
->b_dirty
);
291 INIT_LIST_HEAD(&wb
->b_io
);
292 INIT_LIST_HEAD(&wb
->b_more_io
);
293 INIT_LIST_HEAD(&wb
->b_dirty_time
);
294 spin_lock_init(&wb
->list_lock
);
296 wb
->bw_time_stamp
= jiffies
;
297 wb
->balanced_dirty_ratelimit
= INIT_BW
;
298 wb
->dirty_ratelimit
= INIT_BW
;
299 wb
->write_bandwidth
= INIT_BW
;
300 wb
->avg_write_bandwidth
= INIT_BW
;
302 spin_lock_init(&wb
->work_lock
);
303 INIT_LIST_HEAD(&wb
->work_list
);
304 INIT_DELAYED_WORK(&wb
->dwork
, wb_workfn
);
305 wb
->dirty_sleep
= jiffies
;
307 err
= fprop_local_init_percpu(&wb
->completions
, gfp
);
311 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++) {
312 err
= percpu_counter_init(&wb
->stat
[i
], 0, gfp
);
314 goto out_destroy_stat
;
321 percpu_counter_destroy(&wb
->stat
[i
]);
322 fprop_local_destroy_percpu(&wb
->completions
);
329 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
);
332 * Remove bdi from the global list and shutdown any threads we have running
334 static void wb_shutdown(struct bdi_writeback
*wb
)
336 /* Make sure nobody queues further work */
337 spin_lock_bh(&wb
->work_lock
);
338 if (!test_and_clear_bit(WB_registered
, &wb
->state
)) {
339 spin_unlock_bh(&wb
->work_lock
);
342 spin_unlock_bh(&wb
->work_lock
);
344 cgwb_remove_from_bdi_list(wb
);
346 * Drain work list and shutdown the delayed_work. !WB_registered
347 * tells wb_workfn() that @wb is dying and its work_list needs to
348 * be drained no matter what.
350 mod_delayed_work(bdi_wq
, &wb
->dwork
, 0);
351 flush_delayed_work(&wb
->dwork
);
352 WARN_ON(!list_empty(&wb
->work_list
));
355 static void wb_exit(struct bdi_writeback
*wb
)
359 WARN_ON(delayed_work_pending(&wb
->dwork
));
361 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++)
362 percpu_counter_destroy(&wb
->stat
[i
]);
364 fprop_local_destroy_percpu(&wb
->completions
);
365 if (wb
!= &wb
->bdi
->wb
)
369 #ifdef CONFIG_CGROUP_WRITEBACK
371 #include <linux/memcontrol.h>
374 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
375 * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected.
377 static DEFINE_SPINLOCK(cgwb_lock
);
378 static struct workqueue_struct
*cgwb_release_wq
;
380 static LIST_HEAD(offline_cgwbs
);
381 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
);
382 static DECLARE_WORK(cleanup_offline_cgwbs_work
, cleanup_offline_cgwbs_workfn
);
384 static void cgwb_release_workfn(struct work_struct
*work
)
386 struct bdi_writeback
*wb
= container_of(work
, struct bdi_writeback
,
388 struct blkcg
*blkcg
= css_to_blkcg(wb
->blkcg_css
);
390 mutex_lock(&wb
->bdi
->cgwb_release_mutex
);
393 css_put(wb
->memcg_css
);
394 css_put(wb
->blkcg_css
);
395 mutex_unlock(&wb
->bdi
->cgwb_release_mutex
);
397 /* triggers blkg destruction if no online users left */
398 blkcg_unpin_online(blkcg
);
400 fprop_local_destroy_percpu(&wb
->memcg_completions
);
402 spin_lock_irq(&cgwb_lock
);
403 list_del(&wb
->offline_node
);
404 spin_unlock_irq(&cgwb_lock
);
406 percpu_ref_exit(&wb
->refcnt
);
408 WARN_ON_ONCE(!list_empty(&wb
->b_attached
));
412 static void cgwb_release(struct percpu_ref
*refcnt
)
414 struct bdi_writeback
*wb
= container_of(refcnt
, struct bdi_writeback
,
416 queue_work(cgwb_release_wq
, &wb
->release_work
);
419 static void cgwb_kill(struct bdi_writeback
*wb
)
421 lockdep_assert_held(&cgwb_lock
);
423 WARN_ON(!radix_tree_delete(&wb
->bdi
->cgwb_tree
, wb
->memcg_css
->id
));
424 list_del(&wb
->memcg_node
);
425 list_del(&wb
->blkcg_node
);
426 list_add(&wb
->offline_node
, &offline_cgwbs
);
427 percpu_ref_kill(&wb
->refcnt
);
430 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
432 spin_lock_irq(&cgwb_lock
);
433 list_del_rcu(&wb
->bdi_node
);
434 spin_unlock_irq(&cgwb_lock
);
437 static int cgwb_create(struct backing_dev_info
*bdi
,
438 struct cgroup_subsys_state
*memcg_css
, gfp_t gfp
)
440 struct mem_cgroup
*memcg
;
441 struct cgroup_subsys_state
*blkcg_css
;
443 struct list_head
*memcg_cgwb_list
, *blkcg_cgwb_list
;
444 struct bdi_writeback
*wb
;
448 memcg
= mem_cgroup_from_css(memcg_css
);
449 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
450 blkcg
= css_to_blkcg(blkcg_css
);
451 memcg_cgwb_list
= &memcg
->cgwb_list
;
452 blkcg_cgwb_list
= &blkcg
->cgwb_list
;
454 /* look up again under lock and discard on blkcg mismatch */
455 spin_lock_irqsave(&cgwb_lock
, flags
);
456 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
457 if (wb
&& wb
->blkcg_css
!= blkcg_css
) {
461 spin_unlock_irqrestore(&cgwb_lock
, flags
);
465 /* need to create a new one */
466 wb
= kmalloc(sizeof(*wb
), gfp
);
472 ret
= wb_init(wb
, bdi
, gfp
);
476 ret
= percpu_ref_init(&wb
->refcnt
, cgwb_release
, 0, gfp
);
480 ret
= fprop_local_init_percpu(&wb
->memcg_completions
, gfp
);
484 wb
->memcg_css
= memcg_css
;
485 wb
->blkcg_css
= blkcg_css
;
486 INIT_LIST_HEAD(&wb
->b_attached
);
487 INIT_WORK(&wb
->release_work
, cgwb_release_workfn
);
488 set_bit(WB_registered
, &wb
->state
);
491 * The root wb determines the registered state of the whole bdi and
492 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
493 * whether they're still online. Don't link @wb if any is dead.
494 * See wb_memcg_offline() and wb_blkcg_offline().
497 spin_lock_irqsave(&cgwb_lock
, flags
);
498 if (test_bit(WB_registered
, &bdi
->wb
.state
) &&
499 blkcg_cgwb_list
->next
&& memcg_cgwb_list
->next
) {
500 /* we might have raced another instance of this function */
501 ret
= radix_tree_insert(&bdi
->cgwb_tree
, memcg_css
->id
, wb
);
503 list_add_tail_rcu(&wb
->bdi_node
, &bdi
->wb_list
);
504 list_add(&wb
->memcg_node
, memcg_cgwb_list
);
505 list_add(&wb
->blkcg_node
, blkcg_cgwb_list
);
506 blkcg_pin_online(blkcg
);
511 spin_unlock_irqrestore(&cgwb_lock
, flags
);
520 fprop_local_destroy_percpu(&wb
->memcg_completions
);
522 percpu_ref_exit(&wb
->refcnt
);
533 * wb_get_lookup - get wb for a given memcg
535 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
537 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
538 * refcount incremented.
540 * This function uses css_get() on @memcg_css and thus expects its refcnt
541 * to be positive on invocation. IOW, rcu_read_lock() protection on
542 * @memcg_css isn't enough. try_get it before calling this function.
544 * A wb is keyed by its associated memcg. As blkcg implicitly enables
545 * memcg on the default hierarchy, memcg association is guaranteed to be
546 * more specific (equal or descendant to the associated blkcg) and thus can
547 * identify both the memcg and blkcg associations.
549 * Because the blkcg associated with a memcg may change as blkcg is enabled
550 * and disabled closer to root in the hierarchy, each wb keeps track of
551 * both the memcg and blkcg associated with it and verifies the blkcg on
552 * each lookup. On mismatch, the existing wb is discarded and a new one is
555 struct bdi_writeback
*wb_get_lookup(struct backing_dev_info
*bdi
,
556 struct cgroup_subsys_state
*memcg_css
)
558 struct bdi_writeback
*wb
;
560 if (!memcg_css
->parent
)
564 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
566 struct cgroup_subsys_state
*blkcg_css
;
568 /* see whether the blkcg association has changed */
569 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
570 if (unlikely(wb
->blkcg_css
!= blkcg_css
|| !wb_tryget(wb
)))
580 * wb_get_create - get wb for a given memcg, create if necessary
582 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
583 * @gfp: allocation mask to use
585 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
586 * create one. See wb_get_lookup() for more details.
588 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
589 struct cgroup_subsys_state
*memcg_css
,
592 struct bdi_writeback
*wb
;
596 if (!memcg_css
->parent
)
600 wb
= wb_get_lookup(bdi
, memcg_css
);
601 } while (!wb
&& !cgwb_create(bdi
, memcg_css
, gfp
));
606 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
610 INIT_RADIX_TREE(&bdi
->cgwb_tree
, GFP_ATOMIC
);
611 mutex_init(&bdi
->cgwb_release_mutex
);
612 init_rwsem(&bdi
->wb_switch_rwsem
);
614 ret
= wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
616 bdi
->wb
.memcg_css
= &root_mem_cgroup
->css
;
617 bdi
->wb
.blkcg_css
= blkcg_root_css
;
622 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
)
624 struct radix_tree_iter iter
;
626 struct bdi_writeback
*wb
;
628 WARN_ON(test_bit(WB_registered
, &bdi
->wb
.state
));
630 spin_lock_irq(&cgwb_lock
);
631 radix_tree_for_each_slot(slot
, &bdi
->cgwb_tree
, &iter
, 0)
633 spin_unlock_irq(&cgwb_lock
);
635 mutex_lock(&bdi
->cgwb_release_mutex
);
636 spin_lock_irq(&cgwb_lock
);
637 while (!list_empty(&bdi
->wb_list
)) {
638 wb
= list_first_entry(&bdi
->wb_list
, struct bdi_writeback
,
640 spin_unlock_irq(&cgwb_lock
);
642 spin_lock_irq(&cgwb_lock
);
644 spin_unlock_irq(&cgwb_lock
);
645 mutex_unlock(&bdi
->cgwb_release_mutex
);
649 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
651 * Try to release dying cgwbs by switching attached inodes to the nearest
652 * living ancestor's writeback. Processed wbs are placed at the end
653 * of the list to guarantee the forward progress.
655 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
)
657 struct bdi_writeback
*wb
;
658 LIST_HEAD(processed
);
660 spin_lock_irq(&cgwb_lock
);
662 while (!list_empty(&offline_cgwbs
)) {
663 wb
= list_first_entry(&offline_cgwbs
, struct bdi_writeback
,
665 list_move(&wb
->offline_node
, &processed
);
668 * If wb is dirty, cleaning up the writeback by switching
669 * attached inodes will result in an effective removal of any
670 * bandwidth restrictions, which isn't the goal. Instead,
671 * it can be postponed until the next time, when all io
672 * will be likely completed. If in the meantime some inodes
673 * will get re-dirtied, they should be eventually switched to
676 if (wb_has_dirty_io(wb
))
682 spin_unlock_irq(&cgwb_lock
);
683 while (cleanup_offline_cgwb(wb
))
685 spin_lock_irq(&cgwb_lock
);
690 if (!list_empty(&processed
))
691 list_splice_tail(&processed
, &offline_cgwbs
);
693 spin_unlock_irq(&cgwb_lock
);
697 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
698 * @memcg: memcg being offlined
700 * Also prevents creation of any new wb's associated with @memcg.
702 void wb_memcg_offline(struct mem_cgroup
*memcg
)
704 struct list_head
*memcg_cgwb_list
= &memcg
->cgwb_list
;
705 struct bdi_writeback
*wb
, *next
;
707 spin_lock_irq(&cgwb_lock
);
708 list_for_each_entry_safe(wb
, next
, memcg_cgwb_list
, memcg_node
)
710 memcg_cgwb_list
->next
= NULL
; /* prevent new wb's */
711 spin_unlock_irq(&cgwb_lock
);
713 queue_work(system_unbound_wq
, &cleanup_offline_cgwbs_work
);
717 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
718 * @blkcg: blkcg being offlined
720 * Also prevents creation of any new wb's associated with @blkcg.
722 void wb_blkcg_offline(struct blkcg
*blkcg
)
724 struct bdi_writeback
*wb
, *next
;
726 spin_lock_irq(&cgwb_lock
);
727 list_for_each_entry_safe(wb
, next
, &blkcg
->cgwb_list
, blkcg_node
)
729 blkcg
->cgwb_list
.next
= NULL
; /* prevent new wb's */
730 spin_unlock_irq(&cgwb_lock
);
733 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
735 spin_lock_irq(&cgwb_lock
);
736 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
737 spin_unlock_irq(&cgwb_lock
);
740 static int __init
cgwb_init(void)
743 * There can be many concurrent release work items overwhelming
744 * system_wq. Put them in a separate wq and limit concurrency.
745 * There's no point in executing many of these in parallel.
747 cgwb_release_wq
= alloc_workqueue("cgwb_release", 0, 1);
748 if (!cgwb_release_wq
)
753 subsys_initcall(cgwb_init
);
755 #else /* CONFIG_CGROUP_WRITEBACK */
757 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
759 return wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
762 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
) { }
764 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
766 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
769 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
771 list_del_rcu(&wb
->bdi_node
);
774 #endif /* CONFIG_CGROUP_WRITEBACK */
776 static int bdi_init(struct backing_dev_info
*bdi
)
782 kref_init(&bdi
->refcnt
);
784 bdi
->max_ratio
= 100;
785 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
786 INIT_LIST_HEAD(&bdi
->bdi_list
);
787 INIT_LIST_HEAD(&bdi
->wb_list
);
788 init_waitqueue_head(&bdi
->wb_waitq
);
790 ret
= cgwb_bdi_init(bdi
);
795 struct backing_dev_info
*bdi_alloc(int node_id
)
797 struct backing_dev_info
*bdi
;
799 bdi
= kzalloc_node(sizeof(*bdi
), GFP_KERNEL
, node_id
);
807 bdi
->capabilities
= BDI_CAP_WRITEBACK
| BDI_CAP_WRITEBACK_ACCT
;
808 bdi
->ra_pages
= VM_READAHEAD_PAGES
;
809 bdi
->io_pages
= VM_READAHEAD_PAGES
;
812 EXPORT_SYMBOL(bdi_alloc
);
814 static struct rb_node
**bdi_lookup_rb_node(u64 id
, struct rb_node
**parentp
)
816 struct rb_node
**p
= &bdi_tree
.rb_node
;
817 struct rb_node
*parent
= NULL
;
818 struct backing_dev_info
*bdi
;
820 lockdep_assert_held(&bdi_lock
);
824 bdi
= rb_entry(parent
, struct backing_dev_info
, rb_node
);
828 else if (bdi
->id
< id
)
840 * bdi_get_by_id - lookup and get bdi from its id
841 * @id: bdi id to lookup
843 * Find bdi matching @id and get it. Returns NULL if the matching bdi
844 * doesn't exist or is already unregistered.
846 struct backing_dev_info
*bdi_get_by_id(u64 id
)
848 struct backing_dev_info
*bdi
= NULL
;
851 spin_lock_bh(&bdi_lock
);
852 p
= bdi_lookup_rb_node(id
, NULL
);
854 bdi
= rb_entry(*p
, struct backing_dev_info
, rb_node
);
857 spin_unlock_bh(&bdi_lock
);
862 int bdi_register_va(struct backing_dev_info
*bdi
, const char *fmt
, va_list args
)
865 struct rb_node
*parent
, **p
;
867 if (bdi
->dev
) /* The driver needs to use separate queues per device */
870 vsnprintf(bdi
->dev_name
, sizeof(bdi
->dev_name
), fmt
, args
);
871 dev
= device_create(bdi_class
, NULL
, MKDEV(0, 0), bdi
, bdi
->dev_name
);
875 cgwb_bdi_register(bdi
);
878 bdi_debug_register(bdi
, dev_name(dev
));
879 set_bit(WB_registered
, &bdi
->wb
.state
);
881 spin_lock_bh(&bdi_lock
);
883 bdi
->id
= ++bdi_id_cursor
;
885 p
= bdi_lookup_rb_node(bdi
->id
, &parent
);
886 rb_link_node(&bdi
->rb_node
, parent
, p
);
887 rb_insert_color(&bdi
->rb_node
, &bdi_tree
);
889 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
891 spin_unlock_bh(&bdi_lock
);
893 trace_writeback_bdi_register(bdi
);
897 int bdi_register(struct backing_dev_info
*bdi
, const char *fmt
, ...)
903 ret
= bdi_register_va(bdi
, fmt
, args
);
907 EXPORT_SYMBOL(bdi_register
);
909 void bdi_set_owner(struct backing_dev_info
*bdi
, struct device
*owner
)
911 WARN_ON_ONCE(bdi
->owner
);
917 * Remove bdi from bdi_list, and ensure that it is no longer visible
919 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
921 spin_lock_bh(&bdi_lock
);
922 rb_erase(&bdi
->rb_node
, &bdi_tree
);
923 list_del_rcu(&bdi
->bdi_list
);
924 spin_unlock_bh(&bdi_lock
);
926 synchronize_rcu_expedited();
929 void bdi_unregister(struct backing_dev_info
*bdi
)
931 /* make sure nobody finds us on the bdi_list anymore */
932 bdi_remove_from_list(bdi
);
933 wb_shutdown(&bdi
->wb
);
934 cgwb_bdi_unregister(bdi
);
937 bdi_debug_unregister(bdi
);
938 device_unregister(bdi
->dev
);
943 put_device(bdi
->owner
);
948 static void release_bdi(struct kref
*ref
)
950 struct backing_dev_info
*bdi
=
951 container_of(ref
, struct backing_dev_info
, refcnt
);
953 if (test_bit(WB_registered
, &bdi
->wb
.state
))
955 WARN_ON_ONCE(bdi
->dev
);
960 void bdi_put(struct backing_dev_info
*bdi
)
962 kref_put(&bdi
->refcnt
, release_bdi
);
964 EXPORT_SYMBOL(bdi_put
);
966 const char *bdi_dev_name(struct backing_dev_info
*bdi
)
968 if (!bdi
|| !bdi
->dev
)
969 return bdi_unknown_name
;
970 return bdi
->dev_name
;
972 EXPORT_SYMBOL_GPL(bdi_dev_name
);
974 static wait_queue_head_t congestion_wqh
[2] = {
975 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
976 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
978 static atomic_t nr_wb_congested
[2];
980 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
982 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
983 enum wb_congested_state bit
;
985 bit
= sync
? WB_sync_congested
: WB_async_congested
;
986 if (test_and_clear_bit(bit
, &bdi
->wb
.congested
))
987 atomic_dec(&nr_wb_congested
[sync
]);
988 smp_mb__after_atomic();
989 if (waitqueue_active(wqh
))
992 EXPORT_SYMBOL(clear_bdi_congested
);
994 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
996 enum wb_congested_state bit
;
998 bit
= sync
? WB_sync_congested
: WB_async_congested
;
999 if (!test_and_set_bit(bit
, &bdi
->wb
.congested
))
1000 atomic_inc(&nr_wb_congested
[sync
]);
1002 EXPORT_SYMBOL(set_bdi_congested
);
1005 * congestion_wait - wait for a backing_dev to become uncongested
1006 * @sync: SYNC or ASYNC IO
1007 * @timeout: timeout in jiffies
1009 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1010 * write congestion. If no backing_devs are congested then just wait for the
1011 * next write to be completed.
1013 long congestion_wait(int sync
, long timeout
)
1016 unsigned long start
= jiffies
;
1018 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
1020 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
1021 ret
= io_schedule_timeout(timeout
);
1022 finish_wait(wqh
, &wait
);
1024 trace_writeback_congestion_wait(jiffies_to_usecs(timeout
),
1025 jiffies_to_usecs(jiffies
- start
));
1029 EXPORT_SYMBOL(congestion_wait
);
1032 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1033 * @sync: SYNC or ASYNC IO
1034 * @timeout: timeout in jiffies
1036 * In the event of a congested backing_dev (any backing_dev) this waits
1037 * for up to @timeout jiffies for either a BDI to exit congestion of the
1038 * given @sync queue or a write to complete.
1040 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1041 * it is the number of jiffies that were still remaining when the function
1042 * returned. return_value == timeout implies the function did not sleep.
1044 long wait_iff_congested(int sync
, long timeout
)
1047 unsigned long start
= jiffies
;
1049 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
1052 * If there is no congestion, yield if necessary instead
1053 * of sleeping on the congestion queue
1055 if (atomic_read(&nr_wb_congested
[sync
]) == 0) {
1058 /* In case we scheduled, work out time remaining */
1059 ret
= timeout
- (jiffies
- start
);
1066 /* Sleep until uncongested or a write happens */
1067 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
1068 ret
= io_schedule_timeout(timeout
);
1069 finish_wait(wqh
, &wait
);
1072 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout
),
1073 jiffies_to_usecs(jiffies
- start
));
1077 EXPORT_SYMBOL(wait_iff_congested
);