]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/backing-dev.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / mm / backing-dev.c
CommitLineData
3fcfab16
AM
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
03ba3782
JA
4#include <linux/kthread.h>
5#include <linux/freezer.h>
3fcfab16 6#include <linux/fs.h>
26160158 7#include <linux/pagemap.h>
03ba3782 8#include <linux/mm.h>
3fcfab16
AM
9#include <linux/sched.h>
10#include <linux/module.h>
cf0ca9fe
PZ
11#include <linux/writeback.h>
12#include <linux/device.h>
455b2864 13#include <trace/events/writeback.h>
cf0ca9fe 14
c3c53206
JA
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
5129a469
JE
17struct backing_dev_info noop_backing_dev_info = {
18 .name = "noop",
976e48f8 19 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
5129a469 20};
a212b105 21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
5129a469 22
cf0ca9fe 23static struct class *bdi_class;
cfc4ba53
JA
24
25/*
181387da 26 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
cfc4ba53
JA
27 * locking.
28 */
03ba3782 29DEFINE_SPINLOCK(bdi_lock);
66f3b8e2 30LIST_HEAD(bdi_list);
03ba3782 31
839a8e86
TH
32/* bdi_wq serves all asynchronous writeback tasks */
33struct workqueue_struct *bdi_wq;
34
76f1418b
MS
35#ifdef CONFIG_DEBUG_FS
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38
39static struct dentry *bdi_debug_root;
40
41static void bdi_debug_init(void)
42{
43 bdi_debug_root = debugfs_create_dir("bdi", NULL);
44}
45
46static int bdi_debug_stats_show(struct seq_file *m, void *v)
47{
48 struct backing_dev_info *bdi = m->private;
c1955ce3 49 struct bdi_writeback *wb = &bdi->wb;
364aeb28
DR
50 unsigned long background_thresh;
51 unsigned long dirty_thresh;
0d960a38 52 unsigned long wb_thresh;
0ae45f63 53 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
f09b00d3
JA
54 struct inode *inode;
55
0ae45f63 56 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
f758eeab 57 spin_lock(&wb->list_lock);
c7f54084 58 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
c1955ce3 59 nr_dirty++;
c7f54084 60 list_for_each_entry(inode, &wb->b_io, i_io_list)
c1955ce3 61 nr_io++;
c7f54084 62 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
c1955ce3 63 nr_more_io++;
c7f54084 64 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
0ae45f63
TT
65 if (inode->i_state & I_DIRTY_TIME)
66 nr_dirty_time++;
f758eeab 67 spin_unlock(&wb->list_lock);
76f1418b 68
16c4042f 69 global_dirty_limits(&background_thresh, &dirty_thresh);
0d960a38 70 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
76f1418b
MS
71
72#define K(x) ((x) << (PAGE_SHIFT - 10))
73 seq_printf(m,
00821b00
WF
74 "BdiWriteback: %10lu kB\n"
75 "BdiReclaimable: %10lu kB\n"
76 "BdiDirtyThresh: %10lu kB\n"
77 "DirtyThresh: %10lu kB\n"
78 "BackgroundThresh: %10lu kB\n"
c8e28ce0 79 "BdiDirtied: %10lu kB\n"
00821b00
WF
80 "BdiWritten: %10lu kB\n"
81 "BdiWriteBandwidth: %10lu kBps\n"
82 "b_dirty: %10lu\n"
83 "b_io: %10lu\n"
84 "b_more_io: %10lu\n"
0ae45f63 85 "b_dirty_time: %10lu\n"
00821b00
WF
86 "bdi_list: %10u\n"
87 "state: %10lx\n",
93f78d88
TH
88 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
0d960a38 90 K(wb_thresh),
f7d2b1ec
JK
91 K(dirty_thresh),
92 K(background_thresh),
93f78d88
TH
93 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
a88a341a 95 (unsigned long) K(wb->write_bandwidth),
f7d2b1ec
JK
96 nr_dirty,
97 nr_io,
98 nr_more_io,
0ae45f63 99 nr_dirty_time,
4452226e 100 !list_empty(&bdi->bdi_list), bdi->wb.state);
76f1418b
MS
101#undef K
102
103 return 0;
104}
105
106static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107{
108 return single_open(file, bdi_debug_stats_show, inode->i_private);
109}
110
111static const struct file_operations bdi_debug_stats_fops = {
112 .open = bdi_debug_stats_open,
113 .read = seq_read,
114 .llseek = seq_lseek,
115 .release = single_release,
116};
117
118static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119{
120 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122 bdi, &bdi_debug_stats_fops);
123}
124
125static void bdi_debug_unregister(struct backing_dev_info *bdi)
126{
127 debugfs_remove(bdi->debug_stats);
128 debugfs_remove(bdi->debug_dir);
129}
130#else
131static inline void bdi_debug_init(void)
132{
133}
134static inline void bdi_debug_register(struct backing_dev_info *bdi,
135 const char *name)
136{
137}
138static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139{
140}
141#endif
142
cf0ca9fe
PZ
143static ssize_t read_ahead_kb_store(struct device *dev,
144 struct device_attribute *attr,
145 const char *buf, size_t count)
146{
147 struct backing_dev_info *bdi = dev_get_drvdata(dev);
cf0ca9fe 148 unsigned long read_ahead_kb;
7034ed13 149 ssize_t ret;
cf0ca9fe 150
7034ed13
NJ
151 ret = kstrtoul(buf, 10, &read_ahead_kb);
152 if (ret < 0)
153 return ret;
154
155 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156
157 return count;
cf0ca9fe
PZ
158}
159
160#define K(pages) ((pages) << (PAGE_SHIFT - 10))
161
162#define BDI_SHOW(name, expr) \
163static ssize_t name##_show(struct device *dev, \
164 struct device_attribute *attr, char *page) \
165{ \
166 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
167 \
168 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
d9e1241e
GKH
169} \
170static DEVICE_ATTR_RW(name);
cf0ca9fe
PZ
171
172BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173
189d3c4a
PZ
174static ssize_t min_ratio_store(struct device *dev,
175 struct device_attribute *attr, const char *buf, size_t count)
176{
177 struct backing_dev_info *bdi = dev_get_drvdata(dev);
189d3c4a 178 unsigned int ratio;
7034ed13
NJ
179 ssize_t ret;
180
181 ret = kstrtouint(buf, 10, &ratio);
182 if (ret < 0)
183 return ret;
184
185 ret = bdi_set_min_ratio(bdi, ratio);
186 if (!ret)
187 ret = count;
189d3c4a 188
189d3c4a
PZ
189 return ret;
190}
191BDI_SHOW(min_ratio, bdi->min_ratio)
192
a42dde04
PZ
193static ssize_t max_ratio_store(struct device *dev,
194 struct device_attribute *attr, const char *buf, size_t count)
195{
196 struct backing_dev_info *bdi = dev_get_drvdata(dev);
a42dde04 197 unsigned int ratio;
7034ed13
NJ
198 ssize_t ret;
199
200 ret = kstrtouint(buf, 10, &ratio);
201 if (ret < 0)
202 return ret;
203
204 ret = bdi_set_max_ratio(bdi, ratio);
205 if (!ret)
206 ret = count;
a42dde04 207
a42dde04
PZ
208 return ret;
209}
210BDI_SHOW(max_ratio, bdi->max_ratio)
211
7d311cda
DW
212static ssize_t stable_pages_required_show(struct device *dev,
213 struct device_attribute *attr,
214 char *page)
215{
216 struct backing_dev_info *bdi = dev_get_drvdata(dev);
217
218 return snprintf(page, PAGE_SIZE-1, "%d\n",
219 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220}
d9e1241e
GKH
221static DEVICE_ATTR_RO(stable_pages_required);
222
223static struct attribute *bdi_dev_attrs[] = {
224 &dev_attr_read_ahead_kb.attr,
225 &dev_attr_min_ratio.attr,
226 &dev_attr_max_ratio.attr,
227 &dev_attr_stable_pages_required.attr,
228 NULL,
cf0ca9fe 229};
d9e1241e 230ATTRIBUTE_GROUPS(bdi_dev);
cf0ca9fe
PZ
231
232static __init int bdi_class_init(void)
233{
234 bdi_class = class_create(THIS_MODULE, "bdi");
14421453
AB
235 if (IS_ERR(bdi_class))
236 return PTR_ERR(bdi_class);
237
d9e1241e 238 bdi_class->dev_groups = bdi_dev_groups;
76f1418b 239 bdi_debug_init();
cf0ca9fe
PZ
240 return 0;
241}
76f1418b 242postcore_initcall(bdi_class_init);
cf0ca9fe 243
26160158
JA
244static int __init default_bdi_init(void)
245{
246 int err;
247
839a8e86 248 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
b5c872dd 249 WQ_UNBOUND | WQ_SYSFS, 0);
839a8e86
TH
250 if (!bdi_wq)
251 return -ENOMEM;
252
976e48f8 253 err = bdi_init(&noop_backing_dev_info);
26160158
JA
254
255 return err;
256}
257subsys_initcall(default_bdi_init);
258
6467716a 259/*
f0054bb1 260 * This function is used when the first inode for this wb is marked dirty. It
6467716a
AB
261 * wakes-up the corresponding bdi thread which should then take care of the
262 * periodic background write-out of dirty inodes. Since the write-out would
263 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
264 * set up a timer which wakes the bdi thread up later.
265 *
266 * Note, we wouldn't bother setting up the timer, but this function is on the
267 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
268 * by delaying the wake-up.
6ca738d6
DB
269 *
270 * We have to be careful not to postpone flush work if it is scheduled for
271 * earlier. Thus we use queue_delayed_work().
6467716a 272 */
f0054bb1 273void wb_wakeup_delayed(struct bdi_writeback *wb)
6467716a
AB
274{
275 unsigned long timeout;
276
277 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
f0054bb1
TH
278 spin_lock_bh(&wb->work_lock);
279 if (test_bit(WB_registered, &wb->state))
280 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
281 spin_unlock_bh(&wb->work_lock);
03ba3782
JA
282}
283
cfc4ba53 284/*
a88a341a 285 * Initial write bandwidth: 100 MB/s
cfc4ba53 286 */
a88a341a 287#define INIT_BW (100 << (20 - PAGE_SHIFT))
cfc4ba53 288
8395cd9f 289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
a13f35e8 290 int blkcg_id, gfp_t gfp)
cf0ca9fe 291{
93f78d88 292 int i, err;
cf0ca9fe 293
6467716a 294 memset(wb, 0, sizeof(*wb));
f1d0b063 295
6467716a
AB
296 wb->bdi = bdi;
297 wb->last_old_flush = jiffies;
298 INIT_LIST_HEAD(&wb->b_dirty);
299 INIT_LIST_HEAD(&wb->b_io);
300 INIT_LIST_HEAD(&wb->b_more_io);
0ae45f63 301 INIT_LIST_HEAD(&wb->b_dirty_time);
f758eeab 302 spin_lock_init(&wb->list_lock);
66f3b8e2 303
a88a341a
TH
304 wb->bw_time_stamp = jiffies;
305 wb->balanced_dirty_ratelimit = INIT_BW;
306 wb->dirty_ratelimit = INIT_BW;
307 wb->write_bandwidth = INIT_BW;
308 wb->avg_write_bandwidth = INIT_BW;
cf0ca9fe 309
f0054bb1
TH
310 spin_lock_init(&wb->work_lock);
311 INIT_LIST_HEAD(&wb->work_list);
312 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
b57d74af 313 wb->dirty_sleep = jiffies;
c284de61 314
a13f35e8
TH
315 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
316 if (!wb->congested)
317 return -ENOMEM;
318
8395cd9f 319 err = fprop_local_init_percpu(&wb->completions, gfp);
a88a341a 320 if (err)
a13f35e8 321 goto out_put_cong;
c284de61 322
93f78d88 323 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
8395cd9f 324 err = percpu_counter_init(&wb->stat[i], 0, gfp);
a13f35e8
TH
325 if (err)
326 goto out_destroy_stat;
93f78d88 327 }
cf0ca9fe 328
93f78d88 329 return 0;
a13f35e8
TH
330
331out_destroy_stat:
078c6c3a 332 while (i--)
a13f35e8
TH
333 percpu_counter_destroy(&wb->stat[i]);
334 fprop_local_destroy_percpu(&wb->completions);
335out_put_cong:
336 wb_congested_put(wb->congested);
337 return err;
cf0ca9fe 338}
cf0ca9fe 339
03ba3782
JA
340/*
341 * Remove bdi from the global list and shutdown any threads we have running
342 */
46100071 343static void wb_shutdown(struct bdi_writeback *wb)
66f3b8e2 344{
c4db59d3 345 /* Make sure nobody queues further work */
46100071
TH
346 spin_lock_bh(&wb->work_lock);
347 if (!test_and_clear_bit(WB_registered, &wb->state)) {
348 spin_unlock_bh(&wb->work_lock);
03ba3782 349 return;
c4db59d3 350 }
46100071 351 spin_unlock_bh(&wb->work_lock);
03ba3782
JA
352
353 /*
46100071
TH
354 * Drain work list and shutdown the delayed_work. !WB_registered
355 * tells wb_workfn() that @wb is dying and its work_list needs to
356 * be drained no matter what.
03ba3782 357 */
46100071
TH
358 mod_delayed_work(bdi_wq, &wb->dwork, 0);
359 flush_delayed_work(&wb->dwork);
360 WARN_ON(!list_empty(&wb->work_list));
361}
362
f0054bb1 363static void wb_exit(struct bdi_writeback *wb)
93f78d88
TH
364{
365 int i;
366
367 WARN_ON(delayed_work_pending(&wb->dwork));
368
369 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
370 percpu_counter_destroy(&wb->stat[i]);
6467716a 371
a88a341a 372 fprop_local_destroy_percpu(&wb->completions);
a13f35e8 373 wb_congested_put(wb->congested);
a88a341a 374}
e98be2d5 375
52ebea74
TH
376#ifdef CONFIG_CGROUP_WRITEBACK
377
378#include <linux/memcontrol.h>
379
380/*
381 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
382 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
383 * protected. cgwb_release_wait is used to wait for the completion of cgwb
384 * releases from bdi destruction path.
385 */
386static DEFINE_SPINLOCK(cgwb_lock);
387static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
388
389/**
390 * wb_congested_get_create - get or create a wb_congested
391 * @bdi: associated bdi
392 * @blkcg_id: ID of the associated blkcg
393 * @gfp: allocation mask
394 *
395 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
396 * The returned wb_congested has its reference count incremented. Returns
397 * NULL on failure.
398 */
399struct bdi_writeback_congested *
400wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
401{
402 struct bdi_writeback_congested *new_congested = NULL, *congested;
403 struct rb_node **node, *parent;
404 unsigned long flags;
52ebea74
TH
405retry:
406 spin_lock_irqsave(&cgwb_lock, flags);
407
408 node = &bdi->cgwb_congested_tree.rb_node;
409 parent = NULL;
410
411 while (*node != NULL) {
412 parent = *node;
413 congested = container_of(parent, struct bdi_writeback_congested,
414 rb_node);
415 if (congested->blkcg_id < blkcg_id)
416 node = &parent->rb_left;
417 else if (congested->blkcg_id > blkcg_id)
418 node = &parent->rb_right;
419 else
420 goto found;
421 }
422
423 if (new_congested) {
424 /* !found and storage for new one already allocated, insert */
425 congested = new_congested;
426 new_congested = NULL;
427 rb_link_node(&congested->rb_node, parent, node);
428 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
52ebea74
TH
429 goto found;
430 }
431
432 spin_unlock_irqrestore(&cgwb_lock, flags);
433
434 /* allocate storage for new one and retry */
435 new_congested = kzalloc(sizeof(*new_congested), gfp);
436 if (!new_congested)
437 return NULL;
438
439 atomic_set(&new_congested->refcnt, 0);
440 new_congested->bdi = bdi;
441 new_congested->blkcg_id = blkcg_id;
442 goto retry;
443
444found:
445 atomic_inc(&congested->refcnt);
446 spin_unlock_irqrestore(&cgwb_lock, flags);
447 kfree(new_congested);
448 return congested;
449}
450
451/**
452 * wb_congested_put - put a wb_congested
453 * @congested: wb_congested to put
454 *
455 * Put @congested and destroy it if the refcnt reaches zero.
456 */
457void wb_congested_put(struct bdi_writeback_congested *congested)
458{
52ebea74
TH
459 unsigned long flags;
460
52ebea74
TH
461 local_irq_save(flags);
462 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
463 local_irq_restore(flags);
464 return;
465 }
466
a20135ff
TH
467 /* bdi might already have been destroyed leaving @congested unlinked */
468 if (congested->bdi) {
469 rb_erase(&congested->rb_node,
470 &congested->bdi->cgwb_congested_tree);
471 congested->bdi = NULL;
472 }
473
52ebea74
TH
474 spin_unlock_irqrestore(&cgwb_lock, flags);
475 kfree(congested);
52ebea74
TH
476}
477
478static void cgwb_release_workfn(struct work_struct *work)
479{
480 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
481 release_work);
482 struct backing_dev_info *bdi = wb->bdi;
483
b817525a
TH
484 spin_lock_irq(&cgwb_lock);
485 list_del_rcu(&wb->bdi_node);
486 spin_unlock_irq(&cgwb_lock);
487
52ebea74
TH
488 wb_shutdown(wb);
489
490 css_put(wb->memcg_css);
491 css_put(wb->blkcg_css);
52ebea74 492
841710aa 493 fprop_local_destroy_percpu(&wb->memcg_completions);
52ebea74
TH
494 percpu_ref_exit(&wb->refcnt);
495 wb_exit(wb);
496 kfree_rcu(wb, rcu);
497
498 if (atomic_dec_and_test(&bdi->usage_cnt))
499 wake_up_all(&cgwb_release_wait);
500}
501
502static void cgwb_release(struct percpu_ref *refcnt)
503{
504 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
505 refcnt);
506 schedule_work(&wb->release_work);
507}
508
509static void cgwb_kill(struct bdi_writeback *wb)
510{
511 lockdep_assert_held(&cgwb_lock);
512
513 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
514 list_del(&wb->memcg_node);
515 list_del(&wb->blkcg_node);
516 percpu_ref_kill(&wb->refcnt);
517}
518
519static int cgwb_create(struct backing_dev_info *bdi,
520 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
521{
522 struct mem_cgroup *memcg;
523 struct cgroup_subsys_state *blkcg_css;
524 struct blkcg *blkcg;
525 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
526 struct bdi_writeback *wb;
527 unsigned long flags;
528 int ret = 0;
529
530 memcg = mem_cgroup_from_css(memcg_css);
c165b3e3 531 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
52ebea74
TH
532 blkcg = css_to_blkcg(blkcg_css);
533 memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
534 blkcg_cgwb_list = &blkcg->cgwb_list;
535
536 /* look up again under lock and discard on blkcg mismatch */
537 spin_lock_irqsave(&cgwb_lock, flags);
538 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
539 if (wb && wb->blkcg_css != blkcg_css) {
540 cgwb_kill(wb);
541 wb = NULL;
542 }
543 spin_unlock_irqrestore(&cgwb_lock, flags);
544 if (wb)
545 goto out_put;
546
547 /* need to create a new one */
548 wb = kmalloc(sizeof(*wb), gfp);
549 if (!wb)
550 return -ENOMEM;
551
a13f35e8 552 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
52ebea74
TH
553 if (ret)
554 goto err_free;
555
556 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
557 if (ret)
558 goto err_wb_exit;
559
841710aa
TH
560 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
561 if (ret)
562 goto err_ref_exit;
563
52ebea74
TH
564 wb->memcg_css = memcg_css;
565 wb->blkcg_css = blkcg_css;
566 INIT_WORK(&wb->release_work, cgwb_release_workfn);
567 set_bit(WB_registered, &wb->state);
03ba3782
JA
568
569 /*
52ebea74
TH
570 * The root wb determines the registered state of the whole bdi and
571 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
572 * whether they're still online. Don't link @wb if any is dead.
573 * See wb_memcg_offline() and wb_blkcg_offline().
03ba3782 574 */
52ebea74
TH
575 ret = -ENODEV;
576 spin_lock_irqsave(&cgwb_lock, flags);
577 if (test_bit(WB_registered, &bdi->wb.state) &&
578 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
579 /* we might have raced another instance of this function */
580 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
581 if (!ret) {
582 atomic_inc(&bdi->usage_cnt);
b817525a 583 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
52ebea74
TH
584 list_add(&wb->memcg_node, memcg_cgwb_list);
585 list_add(&wb->blkcg_node, blkcg_cgwb_list);
586 css_get(memcg_css);
587 css_get(blkcg_css);
588 }
589 }
590 spin_unlock_irqrestore(&cgwb_lock, flags);
591 if (ret) {
592 if (ret == -EEXIST)
593 ret = 0;
a13f35e8 594 goto err_fprop_exit;
52ebea74
TH
595 }
596 goto out_put;
597
841710aa
TH
598err_fprop_exit:
599 fprop_local_destroy_percpu(&wb->memcg_completions);
52ebea74
TH
600err_ref_exit:
601 percpu_ref_exit(&wb->refcnt);
602err_wb_exit:
603 wb_exit(wb);
604err_free:
605 kfree(wb);
606out_put:
607 css_put(blkcg_css);
608 return ret;
66f3b8e2
JA
609}
610
52ebea74
TH
611/**
612 * wb_get_create - get wb for a given memcg, create if necessary
613 * @bdi: target bdi
614 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
615 * @gfp: allocation mask to use
616 *
617 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
618 * create one. The returned wb has its refcount incremented.
619 *
620 * This function uses css_get() on @memcg_css and thus expects its refcnt
621 * to be positive on invocation. IOW, rcu_read_lock() protection on
622 * @memcg_css isn't enough. try_get it before calling this function.
623 *
624 * A wb is keyed by its associated memcg. As blkcg implicitly enables
625 * memcg on the default hierarchy, memcg association is guaranteed to be
626 * more specific (equal or descendant to the associated blkcg) and thus can
627 * identify both the memcg and blkcg associations.
628 *
629 * Because the blkcg associated with a memcg may change as blkcg is enabled
630 * and disabled closer to root in the hierarchy, each wb keeps track of
631 * both the memcg and blkcg associated with it and verifies the blkcg on
632 * each lookup. On mismatch, the existing wb is discarded and a new one is
633 * created.
634 */
635struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
636 struct cgroup_subsys_state *memcg_css,
637 gfp_t gfp)
6467716a 638{
52ebea74
TH
639 struct bdi_writeback *wb;
640
d0164adc 641 might_sleep_if(gfpflags_allow_blocking(gfp));
52ebea74
TH
642
643 if (!memcg_css->parent)
644 return &bdi->wb;
645
646 do {
647 rcu_read_lock();
648 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
649 if (wb) {
650 struct cgroup_subsys_state *blkcg_css;
651
652 /* see whether the blkcg association has changed */
653 blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
c165b3e3 654 &io_cgrp_subsys);
52ebea74
TH
655 if (unlikely(wb->blkcg_css != blkcg_css ||
656 !wb_tryget(wb)))
657 wb = NULL;
658 css_put(blkcg_css);
659 }
660 rcu_read_unlock();
661 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
662
663 return wb;
664}
6467716a 665
a13f35e8 666static int cgwb_bdi_init(struct backing_dev_info *bdi)
52ebea74 667{
a13f35e8
TH
668 int ret;
669
52ebea74
TH
670 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
671 bdi->cgwb_congested_tree = RB_ROOT;
672 atomic_set(&bdi->usage_cnt, 1);
a13f35e8
TH
673
674 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
675 if (!ret) {
7d828602 676 bdi->wb.memcg_css = &root_mem_cgroup->css;
a13f35e8
TH
677 bdi->wb.blkcg_css = blkcg_root_css;
678 }
679 return ret;
6467716a
AB
680}
681
52ebea74
TH
682static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
683{
684 struct radix_tree_iter iter;
e27c5b9d 685 struct rb_node *rbn;
52ebea74
TH
686 void **slot;
687
688 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
689
690 spin_lock_irq(&cgwb_lock);
a20135ff 691
52ebea74
TH
692 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
693 cgwb_kill(*slot);
a20135ff 694
e27c5b9d
TH
695 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
696 struct bdi_writeback_congested *congested =
697 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
698
699 rb_erase(rbn, &bdi->cgwb_congested_tree);
a20135ff
TH
700 congested->bdi = NULL; /* mark @congested unlinked */
701 }
702
52ebea74
TH
703 spin_unlock_irq(&cgwb_lock);
704
705 /*
706 * All cgwb's and their congested states must be shutdown and
707 * released before returning. Drain the usage counter to wait for
708 * all cgwb's and cgwb_congested's ever created on @bdi.
709 */
710 atomic_dec(&bdi->usage_cnt);
711 wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
712}
713
714/**
715 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
716 * @memcg: memcg being offlined
717 *
718 * Also prevents creation of any new wb's associated with @memcg.
e98be2d5 719 */
52ebea74
TH
720void wb_memcg_offline(struct mem_cgroup *memcg)
721{
722 LIST_HEAD(to_destroy);
723 struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
724 struct bdi_writeback *wb, *next;
725
726 spin_lock_irq(&cgwb_lock);
727 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
728 cgwb_kill(wb);
729 memcg_cgwb_list->next = NULL; /* prevent new wb's */
730 spin_unlock_irq(&cgwb_lock);
731}
732
733/**
734 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
735 * @blkcg: blkcg being offlined
736 *
737 * Also prevents creation of any new wb's associated with @blkcg.
738 */
739void wb_blkcg_offline(struct blkcg *blkcg)
740{
741 LIST_HEAD(to_destroy);
742 struct bdi_writeback *wb, *next;
743
744 spin_lock_irq(&cgwb_lock);
745 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
746 cgwb_kill(wb);
747 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
748 spin_unlock_irq(&cgwb_lock);
749}
750
751#else /* CONFIG_CGROUP_WRITEBACK */
752
a13f35e8
TH
753static int cgwb_bdi_init(struct backing_dev_info *bdi)
754{
755 int err;
756
757 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
758 if (!bdi->wb_congested)
759 return -ENOMEM;
760
b57dbbe8
TH
761 atomic_set(&bdi->wb_congested->refcnt, 1);
762
a13f35e8
TH
763 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
764 if (err) {
b57dbbe8 765 wb_congested_put(bdi->wb_congested);
a13f35e8
TH
766 return err;
767 }
768 return 0;
769}
770
b57dbbe8
TH
771static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
772{
773 wb_congested_put(bdi->wb_congested);
774}
52ebea74
TH
775
776#endif /* CONFIG_CGROUP_WRITEBACK */
e98be2d5 777
b2e8fb6e
PZ
778int bdi_init(struct backing_dev_info *bdi)
779{
b817525a
TH
780 int ret;
781
cf0ca9fe
PZ
782 bdi->dev = NULL;
783
189d3c4a 784 bdi->min_ratio = 0;
a42dde04 785 bdi->max_ratio = 100;
eb608e3a 786 bdi->max_prop_frac = FPROP_FRAC_BASE;
66f3b8e2 787 INIT_LIST_HEAD(&bdi->bdi_list);
b817525a 788 INIT_LIST_HEAD(&bdi->wb_list);
cc395d7f 789 init_waitqueue_head(&bdi->wb_waitq);
03ba3782 790
b817525a
TH
791 ret = cgwb_bdi_init(bdi);
792
793 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
794
795 return ret;
b2e8fb6e
PZ
796}
797EXPORT_SYMBOL(bdi_init);
e98be2d5 798
46100071
TH
799int bdi_register(struct backing_dev_info *bdi, struct device *parent,
800 const char *fmt, ...)
801{
802 va_list args;
803 struct device *dev;
e98be2d5 804
46100071
TH
805 if (bdi->dev) /* The driver needs to use separate queues per device */
806 return 0;
e98be2d5 807
46100071
TH
808 va_start(args, fmt);
809 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
810 va_end(args);
811 if (IS_ERR(dev))
812 return PTR_ERR(dev);
04fbfdc1 813
46100071 814 bdi->dev = dev;
b2e8fb6e 815
46100071
TH
816 bdi_debug_register(bdi, dev_name(dev));
817 set_bit(WB_registered, &bdi->wb.state);
818
819 spin_lock_bh(&bdi_lock);
820 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
821 spin_unlock_bh(&bdi_lock);
822
823 trace_writeback_bdi_register(bdi);
824 return 0;
b2e8fb6e 825}
46100071 826EXPORT_SYMBOL(bdi_register);
b2e8fb6e 827
46100071 828int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
b2e8fb6e 829{
46100071
TH
830 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
831}
832EXPORT_SYMBOL(bdi_register_dev);
833
df08c32c
DW
834int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
835{
836 int rc;
837
838 rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
839 MINOR(owner->devt));
840 if (rc)
841 return rc;
842 bdi->owner = owner;
843 get_device(owner);
844 return 0;
845}
846EXPORT_SYMBOL(bdi_register_owner);
847
46100071
TH
848/*
849 * Remove bdi from bdi_list, and ensure that it is no longer visible
850 */
851static void bdi_remove_from_list(struct backing_dev_info *bdi)
852{
853 spin_lock_bh(&bdi_lock);
854 list_del_rcu(&bdi->bdi_list);
855 spin_unlock_bh(&bdi_lock);
b2e8fb6e 856
46100071
TH
857 synchronize_rcu_expedited();
858}
cf0ca9fe 859
b02176f3 860void bdi_unregister(struct backing_dev_info *bdi)
b2e8fb6e 861{
f0054bb1
TH
862 /* make sure nobody finds us on the bdi_list anymore */
863 bdi_remove_from_list(bdi);
864 wb_shutdown(&bdi->wb);
52ebea74 865 cgwb_bdi_destroy(bdi);
7a401a97 866
c4db59d3
CH
867 if (bdi->dev) {
868 bdi_debug_unregister(bdi);
869 device_unregister(bdi->dev);
870 bdi->dev = NULL;
871 }
df08c32c
DW
872
873 if (bdi->owner) {
874 put_device(bdi->owner);
875 bdi->owner = NULL;
876 }
b02176f3 877}
c4db59d3 878
b02176f3
TH
879void bdi_exit(struct backing_dev_info *bdi)
880{
881 WARN_ON_ONCE(bdi->dev);
f0054bb1 882 wb_exit(&bdi->wb);
b2e8fb6e 883}
b02176f3
TH
884
885void bdi_destroy(struct backing_dev_info *bdi)
886{
887 bdi_unregister(bdi);
888 bdi_exit(bdi);
889}
b2e8fb6e
PZ
890EXPORT_SYMBOL(bdi_destroy);
891
c3c53206
JA
892/*
893 * For use from filesystems to quickly init and register a bdi associated
894 * with dirty writeback
895 */
b4caecd4 896int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
c3c53206 897{
c3c53206
JA
898 int err;
899
900 bdi->name = name;
b4caecd4 901 bdi->capabilities = 0;
c3c53206
JA
902 err = bdi_init(bdi);
903 if (err)
904 return err;
905
02aa2a37
KC
906 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
907 atomic_long_inc_return(&bdi_seq));
c3c53206
JA
908 if (err) {
909 bdi_destroy(bdi);
910 return err;
911 }
912
913 return 0;
914}
915EXPORT_SYMBOL(bdi_setup_and_register);
916
3fcfab16
AM
917static wait_queue_head_t congestion_wqh[2] = {
918 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
919 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
920 };
ec8a6f26 921static atomic_t nr_wb_congested[2];
3fcfab16 922
ec8a6f26 923void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
3fcfab16 924{
1faa16d2 925 wait_queue_head_t *wqh = &congestion_wqh[sync];
c877ef8a 926 enum wb_congested_state bit;
3fcfab16 927
4452226e 928 bit = sync ? WB_sync_congested : WB_async_congested;
ec8a6f26
TH
929 if (test_and_clear_bit(bit, &congested->state))
930 atomic_dec(&nr_wb_congested[sync]);
4e857c58 931 smp_mb__after_atomic();
3fcfab16
AM
932 if (waitqueue_active(wqh))
933 wake_up(wqh);
934}
ec8a6f26 935EXPORT_SYMBOL(clear_wb_congested);
3fcfab16 936
ec8a6f26 937void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
3fcfab16 938{
c877ef8a 939 enum wb_congested_state bit;
3fcfab16 940
4452226e 941 bit = sync ? WB_sync_congested : WB_async_congested;
ec8a6f26
TH
942 if (!test_and_set_bit(bit, &congested->state))
943 atomic_inc(&nr_wb_congested[sync]);
3fcfab16 944}
ec8a6f26 945EXPORT_SYMBOL(set_wb_congested);
3fcfab16
AM
946
947/**
948 * congestion_wait - wait for a backing_dev to become uncongested
8aa7e847 949 * @sync: SYNC or ASYNC IO
3fcfab16
AM
950 * @timeout: timeout in jiffies
951 *
952 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
953 * write congestion. If no backing_devs are congested then just wait for the
954 * next write to be completed.
955 */
8aa7e847 956long congestion_wait(int sync, long timeout)
3fcfab16
AM
957{
958 long ret;
52bb9198 959 unsigned long start = jiffies;
3fcfab16 960 DEFINE_WAIT(wait);
8aa7e847 961 wait_queue_head_t *wqh = &congestion_wqh[sync];
3fcfab16
AM
962
963 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
964 ret = io_schedule_timeout(timeout);
965 finish_wait(wqh, &wait);
52bb9198
MG
966
967 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
968 jiffies_to_usecs(jiffies - start));
969
3fcfab16
AM
970 return ret;
971}
972EXPORT_SYMBOL(congestion_wait);
04fbfdc1 973
0e093d99 974/**
599d0c95
MG
975 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
976 * @pgdat: A pgdat to check if it is heavily congested
0e093d99
MG
977 * @sync: SYNC or ASYNC IO
978 * @timeout: timeout in jiffies
979 *
980 * In the event of a congested backing_dev (any backing_dev) and the given
599d0c95 981 * @pgdat has experienced recent congestion, this waits for up to @timeout
0e093d99
MG
982 * jiffies for either a BDI to exit congestion of the given @sync queue
983 * or a write to complete.
984 *
599d0c95 985 * In the absence of pgdat congestion, cond_resched() is called to yield
ede37713 986 * the processor if necessary but otherwise does not sleep.
0e093d99
MG
987 *
988 * The return value is 0 if the sleep is for the full timeout. Otherwise,
989 * it is the number of jiffies that were still remaining when the function
990 * returned. return_value == timeout implies the function did not sleep.
991 */
599d0c95 992long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
0e093d99
MG
993{
994 long ret;
995 unsigned long start = jiffies;
996 DEFINE_WAIT(wait);
997 wait_queue_head_t *wqh = &congestion_wqh[sync];
998
999 /*
1000 * If there is no congestion, or heavy congestion is not being
599d0c95 1001 * encountered in the current pgdat, yield if necessary instead
0e093d99
MG
1002 * of sleeping on the congestion queue
1003 */
ec8a6f26 1004 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
599d0c95 1005 !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
ede37713 1006 cond_resched();
599d0c95 1007
0e093d99
MG
1008 /* In case we scheduled, work out time remaining */
1009 ret = timeout - (jiffies - start);
1010 if (ret < 0)
1011 ret = 0;
1012
1013 goto out;
1014 }
1015
1016 /* Sleep until uncongested or a write happens */
1017 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1018 ret = io_schedule_timeout(timeout);
1019 finish_wait(wqh, &wait);
1020
1021out:
1022 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1023 jiffies_to_usecs(jiffies - start));
1024
1025 return ret;
1026}
1027EXPORT_SYMBOL(wait_iff_congested);
3965c9ae
WL
1028
1029int pdflush_proc_obsolete(struct ctl_table *table, int write,
1030 void __user *buffer, size_t *lenp, loff_t *ppos)
1031{
1032 char kbuf[] = "0\n";
1033
4c3bffc2 1034 if (*ppos || *lenp < sizeof(kbuf)) {
3965c9ae
WL
1035 *lenp = 0;
1036 return 0;
1037 }
1038
1039 if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1040 return -EFAULT;
1170532b
JP
1041 pr_warn_once("%s exported in /proc is scheduled for removal\n",
1042 table->procname);
3965c9ae
WL
1043
1044 *lenp = 2;
1045 *ppos += *lenp;
1046 return 2;
1047}