]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/backing-dev.c
Merge tag 'mm-slub-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka...
[mirror_ubuntu-jammy-kernel.git] / mm / backing-dev.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3fcfab16
AM
2
3#include <linux/wait.h>
34f8fe50 4#include <linux/rbtree.h>
3fcfab16 5#include <linux/backing-dev.h>
03ba3782
JA
6#include <linux/kthread.h>
7#include <linux/freezer.h>
3fcfab16 8#include <linux/fs.h>
26160158 9#include <linux/pagemap.h>
03ba3782 10#include <linux/mm.h>
c1ca59a1 11#include <linux/sched/mm.h>
3fcfab16
AM
12#include <linux/sched.h>
13#include <linux/module.h>
cf0ca9fe
PZ
14#include <linux/writeback.h>
15#include <linux/device.h>
455b2864 16#include <trace/events/writeback.h>
cf0ca9fe 17
f56753ac 18struct backing_dev_info noop_backing_dev_info;
a212b105 19EXPORT_SYMBOL_GPL(noop_backing_dev_info);
5129a469 20
cf0ca9fe 21static struct class *bdi_class;
eb7ae5e0 22static const char *bdi_unknown_name = "(unknown)";
cfc4ba53
JA
23
24/*
34f8fe50
TH
25 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
26 * reader side locking.
cfc4ba53 27 */
03ba3782 28DEFINE_SPINLOCK(bdi_lock);
34f8fe50
TH
29static u64 bdi_id_cursor;
30static struct rb_root bdi_tree = RB_ROOT;
66f3b8e2 31LIST_HEAD(bdi_list);
03ba3782 32
839a8e86
TH
33/* bdi_wq serves all asynchronous writeback tasks */
34struct workqueue_struct *bdi_wq;
35
6986c3e2
BW
36#define K(x) ((x) << (PAGE_SHIFT - 10))
37
76f1418b
MS
38#ifdef CONFIG_DEBUG_FS
39#include <linux/debugfs.h>
40#include <linux/seq_file.h>
41
42static struct dentry *bdi_debug_root;
43
44static void bdi_debug_init(void)
45{
46 bdi_debug_root = debugfs_create_dir("bdi", NULL);
47}
48
49static int bdi_debug_stats_show(struct seq_file *m, void *v)
50{
51 struct backing_dev_info *bdi = m->private;
c1955ce3 52 struct bdi_writeback *wb = &bdi->wb;
364aeb28
DR
53 unsigned long background_thresh;
54 unsigned long dirty_thresh;
0d960a38 55 unsigned long wb_thresh;
0ae45f63 56 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
f09b00d3
JA
57 struct inode *inode;
58
0ae45f63 59 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
f758eeab 60 spin_lock(&wb->list_lock);
c7f54084 61 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
c1955ce3 62 nr_dirty++;
c7f54084 63 list_for_each_entry(inode, &wb->b_io, i_io_list)
c1955ce3 64 nr_io++;
c7f54084 65 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
c1955ce3 66 nr_more_io++;
c7f54084 67 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
0ae45f63
TT
68 if (inode->i_state & I_DIRTY_TIME)
69 nr_dirty_time++;
f758eeab 70 spin_unlock(&wb->list_lock);
76f1418b 71
16c4042f 72 global_dirty_limits(&background_thresh, &dirty_thresh);
0d960a38 73 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
76f1418b 74
76f1418b 75 seq_printf(m,
00821b00
WF
76 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
c8e28ce0 81 "BdiDirtied: %10lu kB\n"
00821b00
WF
82 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
84 "b_dirty: %10lu\n"
85 "b_io: %10lu\n"
86 "b_more_io: %10lu\n"
0ae45f63 87 "b_dirty_time: %10lu\n"
00821b00
WF
88 "bdi_list: %10u\n"
89 "state: %10lx\n",
93f78d88
TH
90 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
91 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
0d960a38 92 K(wb_thresh),
f7d2b1ec
JK
93 K(dirty_thresh),
94 K(background_thresh),
93f78d88
TH
95 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
96 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
a88a341a 97 (unsigned long) K(wb->write_bandwidth),
f7d2b1ec
JK
98 nr_dirty,
99 nr_io,
100 nr_more_io,
0ae45f63 101 nr_dirty_time,
4452226e 102 !list_empty(&bdi->bdi_list), bdi->wb.state);
76f1418b
MS
103
104 return 0;
105}
5ad35093 106DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
76f1418b 107
2d146b92 108static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
76f1418b
MS
109{
110 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
97f07697 111
2d146b92
GKH
112 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
113 &bdi_debug_stats_fops);
76f1418b
MS
114}
115
116static void bdi_debug_unregister(struct backing_dev_info *bdi)
117{
2d146b92 118 debugfs_remove_recursive(bdi->debug_dir);
76f1418b
MS
119}
120#else
121static inline void bdi_debug_init(void)
122{
123}
2d146b92 124static inline void bdi_debug_register(struct backing_dev_info *bdi,
76f1418b
MS
125 const char *name)
126{
127}
128static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
129{
130}
131#endif
132
cf0ca9fe
PZ
133static ssize_t read_ahead_kb_store(struct device *dev,
134 struct device_attribute *attr,
135 const char *buf, size_t count)
136{
137 struct backing_dev_info *bdi = dev_get_drvdata(dev);
cf0ca9fe 138 unsigned long read_ahead_kb;
7034ed13 139 ssize_t ret;
cf0ca9fe 140
7034ed13
NJ
141 ret = kstrtoul(buf, 10, &read_ahead_kb);
142 if (ret < 0)
143 return ret;
144
145 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
146
147 return count;
cf0ca9fe
PZ
148}
149
cf0ca9fe
PZ
150#define BDI_SHOW(name, expr) \
151static ssize_t name##_show(struct device *dev, \
5e4c0d86 152 struct device_attribute *attr, char *buf) \
cf0ca9fe
PZ
153{ \
154 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
155 \
5e4c0d86 156 return sysfs_emit(buf, "%lld\n", (long long)expr); \
d9e1241e
GKH
157} \
158static DEVICE_ATTR_RW(name);
cf0ca9fe
PZ
159
160BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
161
189d3c4a
PZ
162static ssize_t min_ratio_store(struct device *dev,
163 struct device_attribute *attr, const char *buf, size_t count)
164{
165 struct backing_dev_info *bdi = dev_get_drvdata(dev);
189d3c4a 166 unsigned int ratio;
7034ed13
NJ
167 ssize_t ret;
168
169 ret = kstrtouint(buf, 10, &ratio);
170 if (ret < 0)
171 return ret;
172
173 ret = bdi_set_min_ratio(bdi, ratio);
174 if (!ret)
175 ret = count;
189d3c4a 176
189d3c4a
PZ
177 return ret;
178}
179BDI_SHOW(min_ratio, bdi->min_ratio)
180
a42dde04
PZ
181static ssize_t max_ratio_store(struct device *dev,
182 struct device_attribute *attr, const char *buf, size_t count)
183{
184 struct backing_dev_info *bdi = dev_get_drvdata(dev);
a42dde04 185 unsigned int ratio;
7034ed13
NJ
186 ssize_t ret;
187
188 ret = kstrtouint(buf, 10, &ratio);
189 if (ret < 0)
190 return ret;
191
192 ret = bdi_set_max_ratio(bdi, ratio);
193 if (!ret)
194 ret = count;
a42dde04 195
a42dde04
PZ
196 return ret;
197}
198BDI_SHOW(max_ratio, bdi->max_ratio)
199
7d311cda
DW
200static ssize_t stable_pages_required_show(struct device *dev,
201 struct device_attribute *attr,
5e4c0d86 202 char *buf)
7d311cda 203{
1cb039f3
CH
204 dev_warn_once(dev,
205 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
5e4c0d86 206 return sysfs_emit(buf, "%d\n", 0);
7d311cda 207}
d9e1241e
GKH
208static DEVICE_ATTR_RO(stable_pages_required);
209
210static struct attribute *bdi_dev_attrs[] = {
211 &dev_attr_read_ahead_kb.attr,
212 &dev_attr_min_ratio.attr,
213 &dev_attr_max_ratio.attr,
214 &dev_attr_stable_pages_required.attr,
215 NULL,
cf0ca9fe 216};
d9e1241e 217ATTRIBUTE_GROUPS(bdi_dev);
cf0ca9fe
PZ
218
219static __init int bdi_class_init(void)
220{
221 bdi_class = class_create(THIS_MODULE, "bdi");
14421453
AB
222 if (IS_ERR(bdi_class))
223 return PTR_ERR(bdi_class);
224
d9e1241e 225 bdi_class->dev_groups = bdi_dev_groups;
76f1418b 226 bdi_debug_init();
d03f6cdc 227
cf0ca9fe
PZ
228 return 0;
229}
76f1418b 230postcore_initcall(bdi_class_init);
cf0ca9fe 231
2e82b84c
JK
232static int bdi_init(struct backing_dev_info *bdi);
233
26160158
JA
234static int __init default_bdi_init(void)
235{
236 int err;
237
a2b90f11
MW
238 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
239 WQ_SYSFS, 0);
839a8e86
TH
240 if (!bdi_wq)
241 return -ENOMEM;
242
976e48f8 243 err = bdi_init(&noop_backing_dev_info);
26160158
JA
244
245 return err;
246}
247subsys_initcall(default_bdi_init);
248
6467716a 249/*
f0054bb1 250 * This function is used when the first inode for this wb is marked dirty. It
6467716a
AB
251 * wakes-up the corresponding bdi thread which should then take care of the
252 * periodic background write-out of dirty inodes. Since the write-out would
253 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
254 * set up a timer which wakes the bdi thread up later.
255 *
256 * Note, we wouldn't bother setting up the timer, but this function is on the
257 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
258 * by delaying the wake-up.
6ca738d6
DB
259 *
260 * We have to be careful not to postpone flush work if it is scheduled for
261 * earlier. Thus we use queue_delayed_work().
6467716a 262 */
f0054bb1 263void wb_wakeup_delayed(struct bdi_writeback *wb)
6467716a
AB
264{
265 unsigned long timeout;
266
267 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
f0054bb1
TH
268 spin_lock_bh(&wb->work_lock);
269 if (test_bit(WB_registered, &wb->state))
270 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
271 spin_unlock_bh(&wb->work_lock);
03ba3782
JA
272}
273
45a2966f
JK
274static void wb_update_bandwidth_workfn(struct work_struct *work)
275{
276 struct bdi_writeback *wb = container_of(to_delayed_work(work),
277 struct bdi_writeback, bw_dwork);
278
279 wb_update_bandwidth(wb);
280}
281
cfc4ba53 282/*
a88a341a 283 * Initial write bandwidth: 100 MB/s
cfc4ba53 284 */
a88a341a 285#define INIT_BW (100 << (20 - PAGE_SHIFT))
cfc4ba53 286
8395cd9f 287static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
8c911f3d 288 gfp_t gfp)
cf0ca9fe 289{
93f78d88 290 int i, err;
cf0ca9fe 291
6467716a 292 memset(wb, 0, sizeof(*wb));
f1d0b063 293
810df54a
JK
294 if (wb != &bdi->wb)
295 bdi_get(bdi);
6467716a
AB
296 wb->bdi = bdi;
297 wb->last_old_flush = jiffies;
298 INIT_LIST_HEAD(&wb->b_dirty);
299 INIT_LIST_HEAD(&wb->b_io);
300 INIT_LIST_HEAD(&wb->b_more_io);
0ae45f63 301 INIT_LIST_HEAD(&wb->b_dirty_time);
f758eeab 302 spin_lock_init(&wb->list_lock);
66f3b8e2 303
633a2abb 304 atomic_set(&wb->writeback_inodes, 0);
a88a341a
TH
305 wb->bw_time_stamp = jiffies;
306 wb->balanced_dirty_ratelimit = INIT_BW;
307 wb->dirty_ratelimit = INIT_BW;
308 wb->write_bandwidth = INIT_BW;
309 wb->avg_write_bandwidth = INIT_BW;
cf0ca9fe 310
f0054bb1
TH
311 spin_lock_init(&wb->work_lock);
312 INIT_LIST_HEAD(&wb->work_list);
313 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
45a2966f 314 INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
b57d74af 315 wb->dirty_sleep = jiffies;
c284de61 316
8395cd9f 317 err = fprop_local_init_percpu(&wb->completions, gfp);
a88a341a 318 if (err)
8c911f3d 319 goto out_put_bdi;
c284de61 320
93f78d88 321 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
8395cd9f 322 err = percpu_counter_init(&wb->stat[i], 0, gfp);
a13f35e8
TH
323 if (err)
324 goto out_destroy_stat;
93f78d88 325 }
cf0ca9fe 326
93f78d88 327 return 0;
a13f35e8
TH
328
329out_destroy_stat:
078c6c3a 330 while (i--)
a13f35e8
TH
331 percpu_counter_destroy(&wb->stat[i]);
332 fprop_local_destroy_percpu(&wb->completions);
810df54a
JK
333out_put_bdi:
334 if (wb != &bdi->wb)
335 bdi_put(bdi);
a13f35e8 336 return err;
cf0ca9fe 337}
cf0ca9fe 338
e8cb72b3
JK
339static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
340
03ba3782
JA
341/*
342 * Remove bdi from the global list and shutdown any threads we have running
343 */
46100071 344static void wb_shutdown(struct bdi_writeback *wb)
66f3b8e2 345{
c4db59d3 346 /* Make sure nobody queues further work */
46100071
TH
347 spin_lock_bh(&wb->work_lock);
348 if (!test_and_clear_bit(WB_registered, &wb->state)) {
349 spin_unlock_bh(&wb->work_lock);
03ba3782 350 return;
c4db59d3 351 }
46100071 352 spin_unlock_bh(&wb->work_lock);
03ba3782 353
e8cb72b3 354 cgwb_remove_from_bdi_list(wb);
03ba3782 355 /*
46100071
TH
356 * Drain work list and shutdown the delayed_work. !WB_registered
357 * tells wb_workfn() that @wb is dying and its work_list needs to
358 * be drained no matter what.
03ba3782 359 */
46100071
TH
360 mod_delayed_work(bdi_wq, &wb->dwork, 0);
361 flush_delayed_work(&wb->dwork);
362 WARN_ON(!list_empty(&wb->work_list));
45a2966f 363 flush_delayed_work(&wb->bw_dwork);
46100071
TH
364}
365
f0054bb1 366static void wb_exit(struct bdi_writeback *wb)
93f78d88
TH
367{
368 int i;
369
370 WARN_ON(delayed_work_pending(&wb->dwork));
371
372 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
373 percpu_counter_destroy(&wb->stat[i]);
6467716a 374
a88a341a 375 fprop_local_destroy_percpu(&wb->completions);
810df54a
JK
376 if (wb != &wb->bdi->wb)
377 bdi_put(wb->bdi);
a88a341a 378}
e98be2d5 379
52ebea74
TH
380#ifdef CONFIG_CGROUP_WRITEBACK
381
382#include <linux/memcontrol.h>
383
384/*
c22d70a1
RG
385 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
386 * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected.
52ebea74
TH
387 */
388static DEFINE_SPINLOCK(cgwb_lock);
f1834646 389static struct workqueue_struct *cgwb_release_wq;
52ebea74 390
c22d70a1
RG
391static LIST_HEAD(offline_cgwbs);
392static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
393static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
394
52ebea74
TH
395static void cgwb_release_workfn(struct work_struct *work)
396{
397 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
398 release_work);
59b57717 399 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
52ebea74 400
3ee7e869 401 mutex_lock(&wb->bdi->cgwb_release_mutex);
52ebea74
TH
402 wb_shutdown(wb);
403
404 css_put(wb->memcg_css);
405 css_put(wb->blkcg_css);
3ee7e869 406 mutex_unlock(&wb->bdi->cgwb_release_mutex);
52ebea74 407
d866dbf6
TH
408 /* triggers blkg destruction if no online users left */
409 blkcg_unpin_online(blkcg);
59b57717 410
841710aa 411 fprop_local_destroy_percpu(&wb->memcg_completions);
c22d70a1
RG
412
413 spin_lock_irq(&cgwb_lock);
414 list_del(&wb->offline_node);
415 spin_unlock_irq(&cgwb_lock);
416
b43a9e76 417 percpu_ref_exit(&wb->refcnt);
52ebea74 418 wb_exit(wb);
f3b6a6df 419 WARN_ON_ONCE(!list_empty(&wb->b_attached));
52ebea74 420 kfree_rcu(wb, rcu);
52ebea74
TH
421}
422
423static void cgwb_release(struct percpu_ref *refcnt)
424{
425 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
426 refcnt);
f1834646 427 queue_work(cgwb_release_wq, &wb->release_work);
52ebea74
TH
428}
429
430static void cgwb_kill(struct bdi_writeback *wb)
431{
432 lockdep_assert_held(&cgwb_lock);
433
434 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
435 list_del(&wb->memcg_node);
436 list_del(&wb->blkcg_node);
c22d70a1 437 list_add(&wb->offline_node, &offline_cgwbs);
52ebea74
TH
438 percpu_ref_kill(&wb->refcnt);
439}
440
e8cb72b3
JK
441static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
442{
443 spin_lock_irq(&cgwb_lock);
444 list_del_rcu(&wb->bdi_node);
445 spin_unlock_irq(&cgwb_lock);
446}
447
52ebea74
TH
448static int cgwb_create(struct backing_dev_info *bdi,
449 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
450{
451 struct mem_cgroup *memcg;
452 struct cgroup_subsys_state *blkcg_css;
453 struct blkcg *blkcg;
454 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
455 struct bdi_writeback *wb;
456 unsigned long flags;
457 int ret = 0;
458
459 memcg = mem_cgroup_from_css(memcg_css);
c165b3e3 460 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
52ebea74 461 blkcg = css_to_blkcg(blkcg_css);
9ccc3617 462 memcg_cgwb_list = &memcg->cgwb_list;
52ebea74
TH
463 blkcg_cgwb_list = &blkcg->cgwb_list;
464
465 /* look up again under lock and discard on blkcg mismatch */
466 spin_lock_irqsave(&cgwb_lock, flags);
467 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
468 if (wb && wb->blkcg_css != blkcg_css) {
469 cgwb_kill(wb);
470 wb = NULL;
471 }
472 spin_unlock_irqrestore(&cgwb_lock, flags);
473 if (wb)
474 goto out_put;
475
476 /* need to create a new one */
477 wb = kmalloc(sizeof(*wb), gfp);
0b045bd1
CJ
478 if (!wb) {
479 ret = -ENOMEM;
480 goto out_put;
481 }
52ebea74 482
8c911f3d 483 ret = wb_init(wb, bdi, gfp);
52ebea74
TH
484 if (ret)
485 goto err_free;
486
487 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
488 if (ret)
489 goto err_wb_exit;
490
841710aa
TH
491 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
492 if (ret)
493 goto err_ref_exit;
494
52ebea74
TH
495 wb->memcg_css = memcg_css;
496 wb->blkcg_css = blkcg_css;
f3b6a6df 497 INIT_LIST_HEAD(&wb->b_attached);
52ebea74
TH
498 INIT_WORK(&wb->release_work, cgwb_release_workfn);
499 set_bit(WB_registered, &wb->state);
03ba3782
JA
500
501 /*
52ebea74
TH
502 * The root wb determines the registered state of the whole bdi and
503 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
504 * whether they're still online. Don't link @wb if any is dead.
505 * See wb_memcg_offline() and wb_blkcg_offline().
03ba3782 506 */
52ebea74
TH
507 ret = -ENODEV;
508 spin_lock_irqsave(&cgwb_lock, flags);
509 if (test_bit(WB_registered, &bdi->wb.state) &&
510 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
511 /* we might have raced another instance of this function */
512 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
513 if (!ret) {
b817525a 514 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
52ebea74
TH
515 list_add(&wb->memcg_node, memcg_cgwb_list);
516 list_add(&wb->blkcg_node, blkcg_cgwb_list);
d866dbf6 517 blkcg_pin_online(blkcg);
52ebea74
TH
518 css_get(memcg_css);
519 css_get(blkcg_css);
520 }
521 }
522 spin_unlock_irqrestore(&cgwb_lock, flags);
523 if (ret) {
524 if (ret == -EEXIST)
525 ret = 0;
a13f35e8 526 goto err_fprop_exit;
52ebea74
TH
527 }
528 goto out_put;
529
841710aa
TH
530err_fprop_exit:
531 fprop_local_destroy_percpu(&wb->memcg_completions);
52ebea74
TH
532err_ref_exit:
533 percpu_ref_exit(&wb->refcnt);
534err_wb_exit:
535 wb_exit(wb);
536err_free:
537 kfree(wb);
538out_put:
539 css_put(blkcg_css);
540 return ret;
66f3b8e2
JA
541}
542
52ebea74 543/**
ed288dc0 544 * wb_get_lookup - get wb for a given memcg
52ebea74
TH
545 * @bdi: target bdi
546 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
52ebea74 547 *
ed288dc0
TH
548 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
549 * refcount incremented.
52ebea74
TH
550 *
551 * This function uses css_get() on @memcg_css and thus expects its refcnt
552 * to be positive on invocation. IOW, rcu_read_lock() protection on
553 * @memcg_css isn't enough. try_get it before calling this function.
554 *
555 * A wb is keyed by its associated memcg. As blkcg implicitly enables
556 * memcg on the default hierarchy, memcg association is guaranteed to be
557 * more specific (equal or descendant to the associated blkcg) and thus can
558 * identify both the memcg and blkcg associations.
559 *
560 * Because the blkcg associated with a memcg may change as blkcg is enabled
561 * and disabled closer to root in the hierarchy, each wb keeps track of
562 * both the memcg and blkcg associated with it and verifies the blkcg on
563 * each lookup. On mismatch, the existing wb is discarded and a new one is
564 * created.
565 */
ed288dc0
TH
566struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
567 struct cgroup_subsys_state *memcg_css)
568{
569 struct bdi_writeback *wb;
570
571 if (!memcg_css->parent)
572 return &bdi->wb;
573
574 rcu_read_lock();
575 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
576 if (wb) {
577 struct cgroup_subsys_state *blkcg_css;
578
579 /* see whether the blkcg association has changed */
580 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
581 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
582 wb = NULL;
583 css_put(blkcg_css);
584 }
585 rcu_read_unlock();
586
587 return wb;
588}
589
590/**
591 * wb_get_create - get wb for a given memcg, create if necessary
592 * @bdi: target bdi
593 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
594 * @gfp: allocation mask to use
595 *
596 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
597 * create one. See wb_get_lookup() for more details.
598 */
52ebea74
TH
599struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
600 struct cgroup_subsys_state *memcg_css,
601 gfp_t gfp)
6467716a 602{
52ebea74
TH
603 struct bdi_writeback *wb;
604
c1ca59a1 605 might_alloc(gfp);
52ebea74
TH
606
607 if (!memcg_css->parent)
608 return &bdi->wb;
609
610 do {
ed288dc0 611 wb = wb_get_lookup(bdi, memcg_css);
52ebea74
TH
612 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
613
614 return wb;
615}
6467716a 616
a13f35e8 617static int cgwb_bdi_init(struct backing_dev_info *bdi)
52ebea74 618{
a13f35e8
TH
619 int ret;
620
52ebea74 621 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
3ee7e869 622 mutex_init(&bdi->cgwb_release_mutex);
7fc5854f 623 init_rwsem(&bdi->wb_switch_rwsem);
a13f35e8 624
8c911f3d 625 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
a13f35e8 626 if (!ret) {
7d828602 627 bdi->wb.memcg_css = &root_mem_cgroup->css;
a13f35e8
TH
628 bdi->wb.blkcg_css = blkcg_root_css;
629 }
630 return ret;
6467716a
AB
631}
632
b1c51afc 633static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
52ebea74
TH
634{
635 struct radix_tree_iter iter;
636 void **slot;
5318ce7d 637 struct bdi_writeback *wb;
52ebea74
TH
638
639 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
640
641 spin_lock_irq(&cgwb_lock);
642 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
643 cgwb_kill(*slot);
3ee7e869 644 spin_unlock_irq(&cgwb_lock);
5318ce7d 645
3ee7e869
JK
646 mutex_lock(&bdi->cgwb_release_mutex);
647 spin_lock_irq(&cgwb_lock);
5318ce7d
JK
648 while (!list_empty(&bdi->wb_list)) {
649 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
650 bdi_node);
651 spin_unlock_irq(&cgwb_lock);
652 wb_shutdown(wb);
653 spin_lock_irq(&cgwb_lock);
654 }
52ebea74 655 spin_unlock_irq(&cgwb_lock);
3ee7e869 656 mutex_unlock(&bdi->cgwb_release_mutex);
52ebea74
TH
657}
658
c22d70a1
RG
659/*
660 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
661 *
662 * Try to release dying cgwbs by switching attached inodes to the nearest
663 * living ancestor's writeback. Processed wbs are placed at the end
664 * of the list to guarantee the forward progress.
665 */
666static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
667{
668 struct bdi_writeback *wb;
669 LIST_HEAD(processed);
670
671 spin_lock_irq(&cgwb_lock);
672
673 while (!list_empty(&offline_cgwbs)) {
674 wb = list_first_entry(&offline_cgwbs, struct bdi_writeback,
675 offline_node);
676 list_move(&wb->offline_node, &processed);
677
678 /*
679 * If wb is dirty, cleaning up the writeback by switching
680 * attached inodes will result in an effective removal of any
681 * bandwidth restrictions, which isn't the goal. Instead,
682 * it can be postponed until the next time, when all io
683 * will be likely completed. If in the meantime some inodes
684 * will get re-dirtied, they should be eventually switched to
685 * a new cgwb.
686 */
687 if (wb_has_dirty_io(wb))
688 continue;
689
690 if (!wb_tryget(wb))
691 continue;
692
693 spin_unlock_irq(&cgwb_lock);
694 while (cleanup_offline_cgwb(wb))
695 cond_resched();
696 spin_lock_irq(&cgwb_lock);
697
698 wb_put(wb);
699 }
700
701 if (!list_empty(&processed))
702 list_splice_tail(&processed, &offline_cgwbs);
703
704 spin_unlock_irq(&cgwb_lock);
705}
706
52ebea74
TH
707/**
708 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
709 * @memcg: memcg being offlined
710 *
711 * Also prevents creation of any new wb's associated with @memcg.
e98be2d5 712 */
52ebea74
TH
713void wb_memcg_offline(struct mem_cgroup *memcg)
714{
9ccc3617 715 struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
52ebea74
TH
716 struct bdi_writeback *wb, *next;
717
718 spin_lock_irq(&cgwb_lock);
719 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
720 cgwb_kill(wb);
721 memcg_cgwb_list->next = NULL; /* prevent new wb's */
722 spin_unlock_irq(&cgwb_lock);
c22d70a1
RG
723
724 queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
52ebea74
TH
725}
726
727/**
728 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
729 * @blkcg: blkcg being offlined
730 *
731 * Also prevents creation of any new wb's associated with @blkcg.
732 */
733void wb_blkcg_offline(struct blkcg *blkcg)
734{
52ebea74
TH
735 struct bdi_writeback *wb, *next;
736
737 spin_lock_irq(&cgwb_lock);
738 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
739 cgwb_kill(wb);
740 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
741 spin_unlock_irq(&cgwb_lock);
742}
743
e8cb72b3
JK
744static void cgwb_bdi_register(struct backing_dev_info *bdi)
745{
746 spin_lock_irq(&cgwb_lock);
747 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
748 spin_unlock_irq(&cgwb_lock);
749}
750
f1834646
TH
751static int __init cgwb_init(void)
752{
753 /*
754 * There can be many concurrent release work items overwhelming
755 * system_wq. Put them in a separate wq and limit concurrency.
756 * There's no point in executing many of these in parallel.
757 */
758 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
759 if (!cgwb_release_wq)
760 return -ENOMEM;
761
762 return 0;
763}
764subsys_initcall(cgwb_init);
765
52ebea74
TH
766#else /* CONFIG_CGROUP_WRITEBACK */
767
a13f35e8
TH
768static int cgwb_bdi_init(struct backing_dev_info *bdi)
769{
8c911f3d 770 return wb_init(&bdi->wb, bdi, GFP_KERNEL);
a13f35e8
TH
771}
772
b1c51afc 773static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
df23de55 774
e8cb72b3
JK
775static void cgwb_bdi_register(struct backing_dev_info *bdi)
776{
777 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
778}
779
780static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
781{
782 list_del_rcu(&wb->bdi_node);
783}
784
52ebea74 785#endif /* CONFIG_CGROUP_WRITEBACK */
e98be2d5 786
2e82b84c 787static int bdi_init(struct backing_dev_info *bdi)
b2e8fb6e 788{
b817525a
TH
789 int ret;
790
cf0ca9fe
PZ
791 bdi->dev = NULL;
792
d03f6cdc 793 kref_init(&bdi->refcnt);
189d3c4a 794 bdi->min_ratio = 0;
a42dde04 795 bdi->max_ratio = 100;
eb608e3a 796 bdi->max_prop_frac = FPROP_FRAC_BASE;
66f3b8e2 797 INIT_LIST_HEAD(&bdi->bdi_list);
b817525a 798 INIT_LIST_HEAD(&bdi->wb_list);
cc395d7f 799 init_waitqueue_head(&bdi->wb_waitq);
03ba3782 800
b817525a
TH
801 ret = cgwb_bdi_init(bdi);
802
b817525a 803 return ret;
b2e8fb6e 804}
e98be2d5 805
aef33c2f 806struct backing_dev_info *bdi_alloc(int node_id)
d03f6cdc
JK
807{
808 struct backing_dev_info *bdi;
809
aef33c2f 810 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
d03f6cdc
JK
811 if (!bdi)
812 return NULL;
813
814 if (bdi_init(bdi)) {
815 kfree(bdi);
816 return NULL;
817 }
f56753ac 818 bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
55b2598e
CH
819 bdi->ra_pages = VM_READAHEAD_PAGES;
820 bdi->io_pages = VM_READAHEAD_PAGES;
5ed964f8 821 timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0);
d03f6cdc
JK
822 return bdi;
823}
aef33c2f 824EXPORT_SYMBOL(bdi_alloc);
d03f6cdc 825
34f8fe50
TH
826static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
827{
828 struct rb_node **p = &bdi_tree.rb_node;
829 struct rb_node *parent = NULL;
830 struct backing_dev_info *bdi;
831
832 lockdep_assert_held(&bdi_lock);
833
834 while (*p) {
835 parent = *p;
836 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
837
838 if (bdi->id > id)
839 p = &(*p)->rb_left;
840 else if (bdi->id < id)
841 p = &(*p)->rb_right;
842 else
843 break;
844 }
845
846 if (parentp)
847 *parentp = parent;
848 return p;
849}
850
851/**
852 * bdi_get_by_id - lookup and get bdi from its id
853 * @id: bdi id to lookup
854 *
855 * Find bdi matching @id and get it. Returns NULL if the matching bdi
856 * doesn't exist or is already unregistered.
857 */
858struct backing_dev_info *bdi_get_by_id(u64 id)
859{
860 struct backing_dev_info *bdi = NULL;
861 struct rb_node **p;
862
863 spin_lock_bh(&bdi_lock);
864 p = bdi_lookup_rb_node(id, NULL);
865 if (*p) {
866 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
867 bdi_get(bdi);
868 }
869 spin_unlock_bh(&bdi_lock);
870
871 return bdi;
872}
873
7c4cc300 874int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
46100071 875{
46100071 876 struct device *dev;
34f8fe50 877 struct rb_node *parent, **p;
e98be2d5 878
46100071
TH
879 if (bdi->dev) /* The driver needs to use separate queues per device */
880 return 0;
e98be2d5 881
6bd87eec
CH
882 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
883 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
46100071
TH
884 if (IS_ERR(dev))
885 return PTR_ERR(dev);
04fbfdc1 886
e8cb72b3 887 cgwb_bdi_register(bdi);
46100071 888 bdi->dev = dev;
b2e8fb6e 889
6d0e4827 890 bdi_debug_register(bdi, dev_name(dev));
46100071
TH
891 set_bit(WB_registered, &bdi->wb.state);
892
893 spin_lock_bh(&bdi_lock);
34f8fe50
TH
894
895 bdi->id = ++bdi_id_cursor;
896
897 p = bdi_lookup_rb_node(bdi->id, &parent);
898 rb_link_node(&bdi->rb_node, parent, p);
899 rb_insert_color(&bdi->rb_node, &bdi_tree);
900
46100071 901 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
34f8fe50 902
46100071
TH
903 spin_unlock_bh(&bdi_lock);
904
905 trace_writeback_bdi_register(bdi);
906 return 0;
b2e8fb6e 907}
baf7a616 908
7c4cc300 909int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
baf7a616
JK
910{
911 va_list args;
912 int ret;
913
914 va_start(args, fmt);
7c4cc300 915 ret = bdi_register_va(bdi, fmt, args);
baf7a616
JK
916 va_end(args);
917 return ret;
918}
46100071 919EXPORT_SYMBOL(bdi_register);
b2e8fb6e 920
3c5d202b 921void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
df08c32c 922{
3c5d202b 923 WARN_ON_ONCE(bdi->owner);
df08c32c
DW
924 bdi->owner = owner;
925 get_device(owner);
df08c32c 926}
df08c32c 927
46100071
TH
928/*
929 * Remove bdi from bdi_list, and ensure that it is no longer visible
930 */
931static void bdi_remove_from_list(struct backing_dev_info *bdi)
932{
933 spin_lock_bh(&bdi_lock);
34f8fe50 934 rb_erase(&bdi->rb_node, &bdi_tree);
46100071
TH
935 list_del_rcu(&bdi->bdi_list);
936 spin_unlock_bh(&bdi_lock);
b2e8fb6e 937
46100071
TH
938 synchronize_rcu_expedited();
939}
cf0ca9fe 940
b02176f3 941void bdi_unregister(struct backing_dev_info *bdi)
b2e8fb6e 942{
5ed964f8
CH
943 del_timer_sync(&bdi->laptop_mode_wb_timer);
944
f0054bb1
TH
945 /* make sure nobody finds us on the bdi_list anymore */
946 bdi_remove_from_list(bdi);
947 wb_shutdown(&bdi->wb);
b1c51afc 948 cgwb_bdi_unregister(bdi);
7a401a97 949
c4db59d3
CH
950 if (bdi->dev) {
951 bdi_debug_unregister(bdi);
952 device_unregister(bdi->dev);
953 bdi->dev = NULL;
954 }
df08c32c
DW
955
956 if (bdi->owner) {
957 put_device(bdi->owner);
958 bdi->owner = NULL;
959 }
b02176f3 960}
c4db59d3 961
d03f6cdc
JK
962static void release_bdi(struct kref *ref)
963{
964 struct backing_dev_info *bdi =
965 container_of(ref, struct backing_dev_info, refcnt);
966
5af110b2
JK
967 if (test_bit(WB_registered, &bdi->wb.state))
968 bdi_unregister(bdi);
2e82b84c
JK
969 WARN_ON_ONCE(bdi->dev);
970 wb_exit(&bdi->wb);
d03f6cdc
JK
971 kfree(bdi);
972}
973
974void bdi_put(struct backing_dev_info *bdi)
975{
976 kref_put(&bdi->refcnt, release_bdi);
977}
62bf42ad 978EXPORT_SYMBOL(bdi_put);
d03f6cdc 979
eb7ae5e0
CH
980const char *bdi_dev_name(struct backing_dev_info *bdi)
981{
982 if (!bdi || !bdi->dev)
983 return bdi_unknown_name;
6bd87eec 984 return bdi->dev_name;
eb7ae5e0
CH
985}
986EXPORT_SYMBOL_GPL(bdi_dev_name);
987
3fcfab16
AM
988static wait_queue_head_t congestion_wqh[2] = {
989 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
990 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
991 };
ec8a6f26 992static atomic_t nr_wb_congested[2];
3fcfab16 993
492d76b2 994void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
3fcfab16 995{
1faa16d2 996 wait_queue_head_t *wqh = &congestion_wqh[sync];
c877ef8a 997 enum wb_congested_state bit;
3fcfab16 998
4452226e 999 bit = sync ? WB_sync_congested : WB_async_congested;
8c911f3d 1000 if (test_and_clear_bit(bit, &bdi->wb.congested))
ec8a6f26 1001 atomic_dec(&nr_wb_congested[sync]);
4e857c58 1002 smp_mb__after_atomic();
3fcfab16
AM
1003 if (waitqueue_active(wqh))
1004 wake_up(wqh);
1005}
492d76b2 1006EXPORT_SYMBOL(clear_bdi_congested);
3fcfab16 1007
492d76b2 1008void set_bdi_congested(struct backing_dev_info *bdi, int sync)
3fcfab16 1009{
c877ef8a 1010 enum wb_congested_state bit;
3fcfab16 1011
4452226e 1012 bit = sync ? WB_sync_congested : WB_async_congested;
8c911f3d 1013 if (!test_and_set_bit(bit, &bdi->wb.congested))
ec8a6f26 1014 atomic_inc(&nr_wb_congested[sync]);
3fcfab16 1015}
492d76b2 1016EXPORT_SYMBOL(set_bdi_congested);
3fcfab16
AM
1017
1018/**
1019 * congestion_wait - wait for a backing_dev to become uncongested
8aa7e847 1020 * @sync: SYNC or ASYNC IO
3fcfab16
AM
1021 * @timeout: timeout in jiffies
1022 *
1023 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1024 * write congestion. If no backing_devs are congested then just wait for the
1025 * next write to be completed.
1026 */
8aa7e847 1027long congestion_wait(int sync, long timeout)
3fcfab16
AM
1028{
1029 long ret;
52bb9198 1030 unsigned long start = jiffies;
3fcfab16 1031 DEFINE_WAIT(wait);
8aa7e847 1032 wait_queue_head_t *wqh = &congestion_wqh[sync];
3fcfab16
AM
1033
1034 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1035 ret = io_schedule_timeout(timeout);
1036 finish_wait(wqh, &wait);
52bb9198
MG
1037
1038 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1039 jiffies_to_usecs(jiffies - start));
1040
3fcfab16
AM
1041 return ret;
1042}
1043EXPORT_SYMBOL(congestion_wait);
04fbfdc1 1044
0e093d99 1045/**
599d0c95 1046 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
0e093d99
MG
1047 * @sync: SYNC or ASYNC IO
1048 * @timeout: timeout in jiffies
1049 *
e3c1ac58
AR
1050 * In the event of a congested backing_dev (any backing_dev) this waits
1051 * for up to @timeout jiffies for either a BDI to exit congestion of the
1052 * given @sync queue or a write to complete.
0e093d99
MG
1053 *
1054 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1055 * it is the number of jiffies that were still remaining when the function
1056 * returned. return_value == timeout implies the function did not sleep.
1057 */
e3c1ac58 1058long wait_iff_congested(int sync, long timeout)
0e093d99
MG
1059{
1060 long ret;
1061 unsigned long start = jiffies;
1062 DEFINE_WAIT(wait);
1063 wait_queue_head_t *wqh = &congestion_wqh[sync];
1064
1065 /*
e3c1ac58 1066 * If there is no congestion, yield if necessary instead
0e093d99
MG
1067 * of sleeping on the congestion queue
1068 */
e3c1ac58 1069 if (atomic_read(&nr_wb_congested[sync]) == 0) {
ede37713 1070 cond_resched();
599d0c95 1071
0e093d99
MG
1072 /* In case we scheduled, work out time remaining */
1073 ret = timeout - (jiffies - start);
1074 if (ret < 0)
1075 ret = 0;
1076
1077 goto out;
1078 }
1079
1080 /* Sleep until uncongested or a write happens */
1081 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1082 ret = io_schedule_timeout(timeout);
1083 finish_wait(wqh, &wait);
1084
1085out:
1086 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1087 jiffies_to_usecs(jiffies - start));
1088
1089 return ret;
1090}
1091EXPORT_SYMBOL(wait_iff_congested);