]>
Commit | Line | Data |
---|---|---|
1 | ||
2 | #include <linux/wait.h> | |
3 | #include <linux/backing-dev.h> | |
4 | #include <linux/kthread.h> | |
5 | #include <linux/freezer.h> | |
6 | #include <linux/fs.h> | |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/writeback.h> | |
12 | #include <linux/device.h> | |
13 | #include <trace/events/writeback.h> | |
14 | ||
15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | |
16 | ||
17 | struct backing_dev_info noop_backing_dev_info = { | |
18 | .name = "noop", | |
19 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | |
20 | }; | |
21 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | |
22 | ||
23 | static struct class *bdi_class; | |
24 | ||
25 | /* | |
26 | * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side | |
27 | * locking. | |
28 | */ | |
29 | DEFINE_SPINLOCK(bdi_lock); | |
30 | LIST_HEAD(bdi_list); | |
31 | ||
32 | /* bdi_wq serves all asynchronous writeback tasks */ | |
33 | struct workqueue_struct *bdi_wq; | |
34 | ||
35 | #ifdef CONFIG_DEBUG_FS | |
36 | #include <linux/debugfs.h> | |
37 | #include <linux/seq_file.h> | |
38 | ||
39 | static struct dentry *bdi_debug_root; | |
40 | ||
41 | static void bdi_debug_init(void) | |
42 | { | |
43 | bdi_debug_root = debugfs_create_dir("bdi", NULL); | |
44 | } | |
45 | ||
46 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | |
47 | { | |
48 | struct backing_dev_info *bdi = m->private; | |
49 | struct bdi_writeback *wb = &bdi->wb; | |
50 | unsigned long background_thresh; | |
51 | unsigned long dirty_thresh; | |
52 | unsigned long wb_thresh; | |
53 | unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; | |
54 | struct inode *inode; | |
55 | ||
56 | nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; | |
57 | spin_lock(&wb->list_lock); | |
58 | list_for_each_entry(inode, &wb->b_dirty, i_io_list) | |
59 | nr_dirty++; | |
60 | list_for_each_entry(inode, &wb->b_io, i_io_list) | |
61 | nr_io++; | |
62 | list_for_each_entry(inode, &wb->b_more_io, i_io_list) | |
63 | nr_more_io++; | |
64 | list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) | |
65 | if (inode->i_state & I_DIRTY_TIME) | |
66 | nr_dirty_time++; | |
67 | spin_unlock(&wb->list_lock); | |
68 | ||
69 | global_dirty_limits(&background_thresh, &dirty_thresh); | |
70 | wb_thresh = wb_calc_thresh(wb, dirty_thresh); | |
71 | ||
72 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | |
73 | seq_printf(m, | |
74 | "BdiWriteback: %10lu kB\n" | |
75 | "BdiReclaimable: %10lu kB\n" | |
76 | "BdiDirtyThresh: %10lu kB\n" | |
77 | "DirtyThresh: %10lu kB\n" | |
78 | "BackgroundThresh: %10lu kB\n" | |
79 | "BdiDirtied: %10lu kB\n" | |
80 | "BdiWritten: %10lu kB\n" | |
81 | "BdiWriteBandwidth: %10lu kBps\n" | |
82 | "b_dirty: %10lu\n" | |
83 | "b_io: %10lu\n" | |
84 | "b_more_io: %10lu\n" | |
85 | "b_dirty_time: %10lu\n" | |
86 | "bdi_list: %10u\n" | |
87 | "state: %10lx\n", | |
88 | (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), | |
89 | (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), | |
90 | K(wb_thresh), | |
91 | K(dirty_thresh), | |
92 | K(background_thresh), | |
93 | (unsigned long) K(wb_stat(wb, WB_DIRTIED)), | |
94 | (unsigned long) K(wb_stat(wb, WB_WRITTEN)), | |
95 | (unsigned long) K(wb->write_bandwidth), | |
96 | nr_dirty, | |
97 | nr_io, | |
98 | nr_more_io, | |
99 | nr_dirty_time, | |
100 | !list_empty(&bdi->bdi_list), bdi->wb.state); | |
101 | #undef K | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | static int bdi_debug_stats_open(struct inode *inode, struct file *file) | |
107 | { | |
108 | return single_open(file, bdi_debug_stats_show, inode->i_private); | |
109 | } | |
110 | ||
111 | static const struct file_operations bdi_debug_stats_fops = { | |
112 | .open = bdi_debug_stats_open, | |
113 | .read = seq_read, | |
114 | .llseek = seq_lseek, | |
115 | .release = single_release, | |
116 | }; | |
117 | ||
118 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) | |
119 | { | |
120 | bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); | |
121 | bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, | |
122 | bdi, &bdi_debug_stats_fops); | |
123 | } | |
124 | ||
125 | static void bdi_debug_unregister(struct backing_dev_info *bdi) | |
126 | { | |
127 | debugfs_remove(bdi->debug_stats); | |
128 | debugfs_remove(bdi->debug_dir); | |
129 | } | |
130 | #else | |
131 | static inline void bdi_debug_init(void) | |
132 | { | |
133 | } | |
134 | static inline void bdi_debug_register(struct backing_dev_info *bdi, | |
135 | const char *name) | |
136 | { | |
137 | } | |
138 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) | |
139 | { | |
140 | } | |
141 | #endif | |
142 | ||
143 | static ssize_t read_ahead_kb_store(struct device *dev, | |
144 | struct device_attribute *attr, | |
145 | const char *buf, size_t count) | |
146 | { | |
147 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
148 | unsigned long read_ahead_kb; | |
149 | ssize_t ret; | |
150 | ||
151 | ret = kstrtoul(buf, 10, &read_ahead_kb); | |
152 | if (ret < 0) | |
153 | return ret; | |
154 | ||
155 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); | |
156 | ||
157 | return count; | |
158 | } | |
159 | ||
160 | #define K(pages) ((pages) << (PAGE_SHIFT - 10)) | |
161 | ||
162 | #define BDI_SHOW(name, expr) \ | |
163 | static ssize_t name##_show(struct device *dev, \ | |
164 | struct device_attribute *attr, char *page) \ | |
165 | { \ | |
166 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ | |
167 | \ | |
168 | return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ | |
169 | } \ | |
170 | static DEVICE_ATTR_RW(name); | |
171 | ||
172 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) | |
173 | ||
174 | static ssize_t min_ratio_store(struct device *dev, | |
175 | struct device_attribute *attr, const char *buf, size_t count) | |
176 | { | |
177 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
178 | unsigned int ratio; | |
179 | ssize_t ret; | |
180 | ||
181 | ret = kstrtouint(buf, 10, &ratio); | |
182 | if (ret < 0) | |
183 | return ret; | |
184 | ||
185 | ret = bdi_set_min_ratio(bdi, ratio); | |
186 | if (!ret) | |
187 | ret = count; | |
188 | ||
189 | return ret; | |
190 | } | |
191 | BDI_SHOW(min_ratio, bdi->min_ratio) | |
192 | ||
193 | static ssize_t max_ratio_store(struct device *dev, | |
194 | struct device_attribute *attr, const char *buf, size_t count) | |
195 | { | |
196 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
197 | unsigned int ratio; | |
198 | ssize_t ret; | |
199 | ||
200 | ret = kstrtouint(buf, 10, &ratio); | |
201 | if (ret < 0) | |
202 | return ret; | |
203 | ||
204 | ret = bdi_set_max_ratio(bdi, ratio); | |
205 | if (!ret) | |
206 | ret = count; | |
207 | ||
208 | return ret; | |
209 | } | |
210 | BDI_SHOW(max_ratio, bdi->max_ratio) | |
211 | ||
212 | static ssize_t stable_pages_required_show(struct device *dev, | |
213 | struct device_attribute *attr, | |
214 | char *page) | |
215 | { | |
216 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
217 | ||
218 | return snprintf(page, PAGE_SIZE-1, "%d\n", | |
219 | bdi_cap_stable_pages_required(bdi) ? 1 : 0); | |
220 | } | |
221 | static DEVICE_ATTR_RO(stable_pages_required); | |
222 | ||
223 | static struct attribute *bdi_dev_attrs[] = { | |
224 | &dev_attr_read_ahead_kb.attr, | |
225 | &dev_attr_min_ratio.attr, | |
226 | &dev_attr_max_ratio.attr, | |
227 | &dev_attr_stable_pages_required.attr, | |
228 | NULL, | |
229 | }; | |
230 | ATTRIBUTE_GROUPS(bdi_dev); | |
231 | ||
232 | static __init int bdi_class_init(void) | |
233 | { | |
234 | bdi_class = class_create(THIS_MODULE, "bdi"); | |
235 | if (IS_ERR(bdi_class)) | |
236 | return PTR_ERR(bdi_class); | |
237 | ||
238 | bdi_class->dev_groups = bdi_dev_groups; | |
239 | bdi_debug_init(); | |
240 | return 0; | |
241 | } | |
242 | postcore_initcall(bdi_class_init); | |
243 | ||
244 | static int __init default_bdi_init(void) | |
245 | { | |
246 | int err; | |
247 | ||
248 | bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE | | |
249 | WQ_UNBOUND | WQ_SYSFS, 0); | |
250 | if (!bdi_wq) | |
251 | return -ENOMEM; | |
252 | ||
253 | err = bdi_init(&noop_backing_dev_info); | |
254 | ||
255 | return err; | |
256 | } | |
257 | subsys_initcall(default_bdi_init); | |
258 | ||
259 | /* | |
260 | * This function is used when the first inode for this wb is marked dirty. It | |
261 | * wakes-up the corresponding bdi thread which should then take care of the | |
262 | * periodic background write-out of dirty inodes. Since the write-out would | |
263 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just | |
264 | * set up a timer which wakes the bdi thread up later. | |
265 | * | |
266 | * Note, we wouldn't bother setting up the timer, but this function is on the | |
267 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | |
268 | * by delaying the wake-up. | |
269 | * | |
270 | * We have to be careful not to postpone flush work if it is scheduled for | |
271 | * earlier. Thus we use queue_delayed_work(). | |
272 | */ | |
273 | void wb_wakeup_delayed(struct bdi_writeback *wb) | |
274 | { | |
275 | unsigned long timeout; | |
276 | ||
277 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | |
278 | spin_lock_bh(&wb->work_lock); | |
279 | if (test_bit(WB_registered, &wb->state)) | |
280 | queue_delayed_work(bdi_wq, &wb->dwork, timeout); | |
281 | spin_unlock_bh(&wb->work_lock); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Initial write bandwidth: 100 MB/s | |
286 | */ | |
287 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) | |
288 | ||
289 | static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, | |
290 | int blkcg_id, gfp_t gfp) | |
291 | { | |
292 | int i, err; | |
293 | ||
294 | memset(wb, 0, sizeof(*wb)); | |
295 | ||
296 | wb->bdi = bdi; | |
297 | wb->last_old_flush = jiffies; | |
298 | INIT_LIST_HEAD(&wb->b_dirty); | |
299 | INIT_LIST_HEAD(&wb->b_io); | |
300 | INIT_LIST_HEAD(&wb->b_more_io); | |
301 | INIT_LIST_HEAD(&wb->b_dirty_time); | |
302 | spin_lock_init(&wb->list_lock); | |
303 | ||
304 | wb->bw_time_stamp = jiffies; | |
305 | wb->balanced_dirty_ratelimit = INIT_BW; | |
306 | wb->dirty_ratelimit = INIT_BW; | |
307 | wb->write_bandwidth = INIT_BW; | |
308 | wb->avg_write_bandwidth = INIT_BW; | |
309 | ||
310 | spin_lock_init(&wb->work_lock); | |
311 | INIT_LIST_HEAD(&wb->work_list); | |
312 | INIT_DELAYED_WORK(&wb->dwork, wb_workfn); | |
313 | wb->dirty_sleep = jiffies; | |
314 | ||
315 | wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); | |
316 | if (!wb->congested) | |
317 | return -ENOMEM; | |
318 | ||
319 | err = fprop_local_init_percpu(&wb->completions, gfp); | |
320 | if (err) | |
321 | goto out_put_cong; | |
322 | ||
323 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) { | |
324 | err = percpu_counter_init(&wb->stat[i], 0, gfp); | |
325 | if (err) | |
326 | goto out_destroy_stat; | |
327 | } | |
328 | ||
329 | return 0; | |
330 | ||
331 | out_destroy_stat: | |
332 | while (i--) | |
333 | percpu_counter_destroy(&wb->stat[i]); | |
334 | fprop_local_destroy_percpu(&wb->completions); | |
335 | out_put_cong: | |
336 | wb_congested_put(wb->congested); | |
337 | return err; | |
338 | } | |
339 | ||
340 | /* | |
341 | * Remove bdi from the global list and shutdown any threads we have running | |
342 | */ | |
343 | static void wb_shutdown(struct bdi_writeback *wb) | |
344 | { | |
345 | /* Make sure nobody queues further work */ | |
346 | spin_lock_bh(&wb->work_lock); | |
347 | if (!test_and_clear_bit(WB_registered, &wb->state)) { | |
348 | spin_unlock_bh(&wb->work_lock); | |
349 | return; | |
350 | } | |
351 | spin_unlock_bh(&wb->work_lock); | |
352 | ||
353 | /* | |
354 | * Drain work list and shutdown the delayed_work. !WB_registered | |
355 | * tells wb_workfn() that @wb is dying and its work_list needs to | |
356 | * be drained no matter what. | |
357 | */ | |
358 | mod_delayed_work(bdi_wq, &wb->dwork, 0); | |
359 | flush_delayed_work(&wb->dwork); | |
360 | WARN_ON(!list_empty(&wb->work_list)); | |
361 | } | |
362 | ||
363 | static void wb_exit(struct bdi_writeback *wb) | |
364 | { | |
365 | int i; | |
366 | ||
367 | WARN_ON(delayed_work_pending(&wb->dwork)); | |
368 | ||
369 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) | |
370 | percpu_counter_destroy(&wb->stat[i]); | |
371 | ||
372 | fprop_local_destroy_percpu(&wb->completions); | |
373 | wb_congested_put(wb->congested); | |
374 | } | |
375 | ||
376 | #ifdef CONFIG_CGROUP_WRITEBACK | |
377 | ||
378 | #include <linux/memcontrol.h> | |
379 | ||
380 | /* | |
381 | * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree, | |
382 | * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU | |
383 | * protected. cgwb_release_wait is used to wait for the completion of cgwb | |
384 | * releases from bdi destruction path. | |
385 | */ | |
386 | static DEFINE_SPINLOCK(cgwb_lock); | |
387 | static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait); | |
388 | ||
389 | /** | |
390 | * wb_congested_get_create - get or create a wb_congested | |
391 | * @bdi: associated bdi | |
392 | * @blkcg_id: ID of the associated blkcg | |
393 | * @gfp: allocation mask | |
394 | * | |
395 | * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one. | |
396 | * The returned wb_congested has its reference count incremented. Returns | |
397 | * NULL on failure. | |
398 | */ | |
399 | struct bdi_writeback_congested * | |
400 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) | |
401 | { | |
402 | struct bdi_writeback_congested *new_congested = NULL, *congested; | |
403 | struct rb_node **node, *parent; | |
404 | unsigned long flags; | |
405 | retry: | |
406 | spin_lock_irqsave(&cgwb_lock, flags); | |
407 | ||
408 | node = &bdi->cgwb_congested_tree.rb_node; | |
409 | parent = NULL; | |
410 | ||
411 | while (*node != NULL) { | |
412 | parent = *node; | |
413 | congested = container_of(parent, struct bdi_writeback_congested, | |
414 | rb_node); | |
415 | if (congested->blkcg_id < blkcg_id) | |
416 | node = &parent->rb_left; | |
417 | else if (congested->blkcg_id > blkcg_id) | |
418 | node = &parent->rb_right; | |
419 | else | |
420 | goto found; | |
421 | } | |
422 | ||
423 | if (new_congested) { | |
424 | /* !found and storage for new one already allocated, insert */ | |
425 | congested = new_congested; | |
426 | new_congested = NULL; | |
427 | rb_link_node(&congested->rb_node, parent, node); | |
428 | rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); | |
429 | goto found; | |
430 | } | |
431 | ||
432 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
433 | ||
434 | /* allocate storage for new one and retry */ | |
435 | new_congested = kzalloc(sizeof(*new_congested), gfp); | |
436 | if (!new_congested) | |
437 | return NULL; | |
438 | ||
439 | atomic_set(&new_congested->refcnt, 0); | |
440 | new_congested->bdi = bdi; | |
441 | new_congested->blkcg_id = blkcg_id; | |
442 | goto retry; | |
443 | ||
444 | found: | |
445 | atomic_inc(&congested->refcnt); | |
446 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
447 | kfree(new_congested); | |
448 | return congested; | |
449 | } | |
450 | ||
451 | /** | |
452 | * wb_congested_put - put a wb_congested | |
453 | * @congested: wb_congested to put | |
454 | * | |
455 | * Put @congested and destroy it if the refcnt reaches zero. | |
456 | */ | |
457 | void wb_congested_put(struct bdi_writeback_congested *congested) | |
458 | { | |
459 | unsigned long flags; | |
460 | ||
461 | local_irq_save(flags); | |
462 | if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { | |
463 | local_irq_restore(flags); | |
464 | return; | |
465 | } | |
466 | ||
467 | /* bdi might already have been destroyed leaving @congested unlinked */ | |
468 | if (congested->bdi) { | |
469 | rb_erase(&congested->rb_node, | |
470 | &congested->bdi->cgwb_congested_tree); | |
471 | congested->bdi = NULL; | |
472 | } | |
473 | ||
474 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
475 | kfree(congested); | |
476 | } | |
477 | ||
478 | static void cgwb_release_workfn(struct work_struct *work) | |
479 | { | |
480 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, | |
481 | release_work); | |
482 | struct backing_dev_info *bdi = wb->bdi; | |
483 | ||
484 | spin_lock_irq(&cgwb_lock); | |
485 | list_del_rcu(&wb->bdi_node); | |
486 | spin_unlock_irq(&cgwb_lock); | |
487 | ||
488 | wb_shutdown(wb); | |
489 | ||
490 | css_put(wb->memcg_css); | |
491 | css_put(wb->blkcg_css); | |
492 | ||
493 | fprop_local_destroy_percpu(&wb->memcg_completions); | |
494 | percpu_ref_exit(&wb->refcnt); | |
495 | wb_exit(wb); | |
496 | kfree_rcu(wb, rcu); | |
497 | ||
498 | if (atomic_dec_and_test(&bdi->usage_cnt)) | |
499 | wake_up_all(&cgwb_release_wait); | |
500 | } | |
501 | ||
502 | static void cgwb_release(struct percpu_ref *refcnt) | |
503 | { | |
504 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, | |
505 | refcnt); | |
506 | schedule_work(&wb->release_work); | |
507 | } | |
508 | ||
509 | static void cgwb_kill(struct bdi_writeback *wb) | |
510 | { | |
511 | lockdep_assert_held(&cgwb_lock); | |
512 | ||
513 | WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); | |
514 | list_del(&wb->memcg_node); | |
515 | list_del(&wb->blkcg_node); | |
516 | percpu_ref_kill(&wb->refcnt); | |
517 | } | |
518 | ||
519 | static int cgwb_create(struct backing_dev_info *bdi, | |
520 | struct cgroup_subsys_state *memcg_css, gfp_t gfp) | |
521 | { | |
522 | struct mem_cgroup *memcg; | |
523 | struct cgroup_subsys_state *blkcg_css; | |
524 | struct blkcg *blkcg; | |
525 | struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; | |
526 | struct bdi_writeback *wb; | |
527 | unsigned long flags; | |
528 | int ret = 0; | |
529 | ||
530 | memcg = mem_cgroup_from_css(memcg_css); | |
531 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); | |
532 | blkcg = css_to_blkcg(blkcg_css); | |
533 | memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); | |
534 | blkcg_cgwb_list = &blkcg->cgwb_list; | |
535 | ||
536 | /* look up again under lock and discard on blkcg mismatch */ | |
537 | spin_lock_irqsave(&cgwb_lock, flags); | |
538 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
539 | if (wb && wb->blkcg_css != blkcg_css) { | |
540 | cgwb_kill(wb); | |
541 | wb = NULL; | |
542 | } | |
543 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
544 | if (wb) | |
545 | goto out_put; | |
546 | ||
547 | /* need to create a new one */ | |
548 | wb = kmalloc(sizeof(*wb), gfp); | |
549 | if (!wb) | |
550 | return -ENOMEM; | |
551 | ||
552 | ret = wb_init(wb, bdi, blkcg_css->id, gfp); | |
553 | if (ret) | |
554 | goto err_free; | |
555 | ||
556 | ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); | |
557 | if (ret) | |
558 | goto err_wb_exit; | |
559 | ||
560 | ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); | |
561 | if (ret) | |
562 | goto err_ref_exit; | |
563 | ||
564 | wb->memcg_css = memcg_css; | |
565 | wb->blkcg_css = blkcg_css; | |
566 | INIT_WORK(&wb->release_work, cgwb_release_workfn); | |
567 | set_bit(WB_registered, &wb->state); | |
568 | ||
569 | /* | |
570 | * The root wb determines the registered state of the whole bdi and | |
571 | * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate | |
572 | * whether they're still online. Don't link @wb if any is dead. | |
573 | * See wb_memcg_offline() and wb_blkcg_offline(). | |
574 | */ | |
575 | ret = -ENODEV; | |
576 | spin_lock_irqsave(&cgwb_lock, flags); | |
577 | if (test_bit(WB_registered, &bdi->wb.state) && | |
578 | blkcg_cgwb_list->next && memcg_cgwb_list->next) { | |
579 | /* we might have raced another instance of this function */ | |
580 | ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); | |
581 | if (!ret) { | |
582 | atomic_inc(&bdi->usage_cnt); | |
583 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); | |
584 | list_add(&wb->memcg_node, memcg_cgwb_list); | |
585 | list_add(&wb->blkcg_node, blkcg_cgwb_list); | |
586 | css_get(memcg_css); | |
587 | css_get(blkcg_css); | |
588 | } | |
589 | } | |
590 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
591 | if (ret) { | |
592 | if (ret == -EEXIST) | |
593 | ret = 0; | |
594 | goto err_fprop_exit; | |
595 | } | |
596 | goto out_put; | |
597 | ||
598 | err_fprop_exit: | |
599 | fprop_local_destroy_percpu(&wb->memcg_completions); | |
600 | err_ref_exit: | |
601 | percpu_ref_exit(&wb->refcnt); | |
602 | err_wb_exit: | |
603 | wb_exit(wb); | |
604 | err_free: | |
605 | kfree(wb); | |
606 | out_put: | |
607 | css_put(blkcg_css); | |
608 | return ret; | |
609 | } | |
610 | ||
611 | /** | |
612 | * wb_get_create - get wb for a given memcg, create if necessary | |
613 | * @bdi: target bdi | |
614 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) | |
615 | * @gfp: allocation mask to use | |
616 | * | |
617 | * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to | |
618 | * create one. The returned wb has its refcount incremented. | |
619 | * | |
620 | * This function uses css_get() on @memcg_css and thus expects its refcnt | |
621 | * to be positive on invocation. IOW, rcu_read_lock() protection on | |
622 | * @memcg_css isn't enough. try_get it before calling this function. | |
623 | * | |
624 | * A wb is keyed by its associated memcg. As blkcg implicitly enables | |
625 | * memcg on the default hierarchy, memcg association is guaranteed to be | |
626 | * more specific (equal or descendant to the associated blkcg) and thus can | |
627 | * identify both the memcg and blkcg associations. | |
628 | * | |
629 | * Because the blkcg associated with a memcg may change as blkcg is enabled | |
630 | * and disabled closer to root in the hierarchy, each wb keeps track of | |
631 | * both the memcg and blkcg associated with it and verifies the blkcg on | |
632 | * each lookup. On mismatch, the existing wb is discarded and a new one is | |
633 | * created. | |
634 | */ | |
635 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, | |
636 | struct cgroup_subsys_state *memcg_css, | |
637 | gfp_t gfp) | |
638 | { | |
639 | struct bdi_writeback *wb; | |
640 | ||
641 | might_sleep_if(gfpflags_allow_blocking(gfp)); | |
642 | ||
643 | if (!memcg_css->parent) | |
644 | return &bdi->wb; | |
645 | ||
646 | do { | |
647 | rcu_read_lock(); | |
648 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
649 | if (wb) { | |
650 | struct cgroup_subsys_state *blkcg_css; | |
651 | ||
652 | /* see whether the blkcg association has changed */ | |
653 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, | |
654 | &io_cgrp_subsys); | |
655 | if (unlikely(wb->blkcg_css != blkcg_css || | |
656 | !wb_tryget(wb))) | |
657 | wb = NULL; | |
658 | css_put(blkcg_css); | |
659 | } | |
660 | rcu_read_unlock(); | |
661 | } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); | |
662 | ||
663 | return wb; | |
664 | } | |
665 | ||
666 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | |
667 | { | |
668 | int ret; | |
669 | ||
670 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); | |
671 | bdi->cgwb_congested_tree = RB_ROOT; | |
672 | atomic_set(&bdi->usage_cnt, 1); | |
673 | ||
674 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | |
675 | if (!ret) { | |
676 | bdi->wb.memcg_css = &root_mem_cgroup->css; | |
677 | bdi->wb.blkcg_css = blkcg_root_css; | |
678 | } | |
679 | return ret; | |
680 | } | |
681 | ||
682 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | |
683 | { | |
684 | struct radix_tree_iter iter; | |
685 | struct rb_node *rbn; | |
686 | void **slot; | |
687 | ||
688 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | |
689 | ||
690 | spin_lock_irq(&cgwb_lock); | |
691 | ||
692 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | |
693 | cgwb_kill(*slot); | |
694 | ||
695 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | |
696 | struct bdi_writeback_congested *congested = | |
697 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | |
698 | ||
699 | rb_erase(rbn, &bdi->cgwb_congested_tree); | |
700 | congested->bdi = NULL; /* mark @congested unlinked */ | |
701 | } | |
702 | ||
703 | spin_unlock_irq(&cgwb_lock); | |
704 | ||
705 | /* | |
706 | * All cgwb's and their congested states must be shutdown and | |
707 | * released before returning. Drain the usage counter to wait for | |
708 | * all cgwb's and cgwb_congested's ever created on @bdi. | |
709 | */ | |
710 | atomic_dec(&bdi->usage_cnt); | |
711 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); | |
712 | } | |
713 | ||
714 | /** | |
715 | * wb_memcg_offline - kill all wb's associated with a memcg being offlined | |
716 | * @memcg: memcg being offlined | |
717 | * | |
718 | * Also prevents creation of any new wb's associated with @memcg. | |
719 | */ | |
720 | void wb_memcg_offline(struct mem_cgroup *memcg) | |
721 | { | |
722 | LIST_HEAD(to_destroy); | |
723 | struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); | |
724 | struct bdi_writeback *wb, *next; | |
725 | ||
726 | spin_lock_irq(&cgwb_lock); | |
727 | list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) | |
728 | cgwb_kill(wb); | |
729 | memcg_cgwb_list->next = NULL; /* prevent new wb's */ | |
730 | spin_unlock_irq(&cgwb_lock); | |
731 | } | |
732 | ||
733 | /** | |
734 | * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined | |
735 | * @blkcg: blkcg being offlined | |
736 | * | |
737 | * Also prevents creation of any new wb's associated with @blkcg. | |
738 | */ | |
739 | void wb_blkcg_offline(struct blkcg *blkcg) | |
740 | { | |
741 | LIST_HEAD(to_destroy); | |
742 | struct bdi_writeback *wb, *next; | |
743 | ||
744 | spin_lock_irq(&cgwb_lock); | |
745 | list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) | |
746 | cgwb_kill(wb); | |
747 | blkcg->cgwb_list.next = NULL; /* prevent new wb's */ | |
748 | spin_unlock_irq(&cgwb_lock); | |
749 | } | |
750 | ||
751 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
752 | ||
753 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | |
754 | { | |
755 | int err; | |
756 | ||
757 | bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL); | |
758 | if (!bdi->wb_congested) | |
759 | return -ENOMEM; | |
760 | ||
761 | err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | |
762 | if (err) { | |
763 | kfree(bdi->wb_congested); | |
764 | return err; | |
765 | } | |
766 | return 0; | |
767 | } | |
768 | ||
769 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } | |
770 | ||
771 | #endif /* CONFIG_CGROUP_WRITEBACK */ | |
772 | ||
773 | int bdi_init(struct backing_dev_info *bdi) | |
774 | { | |
775 | int ret; | |
776 | ||
777 | bdi->dev = NULL; | |
778 | ||
779 | bdi->min_ratio = 0; | |
780 | bdi->max_ratio = 100; | |
781 | bdi->max_prop_frac = FPROP_FRAC_BASE; | |
782 | INIT_LIST_HEAD(&bdi->bdi_list); | |
783 | INIT_LIST_HEAD(&bdi->wb_list); | |
784 | init_waitqueue_head(&bdi->wb_waitq); | |
785 | ||
786 | ret = cgwb_bdi_init(bdi); | |
787 | ||
788 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); | |
789 | ||
790 | return ret; | |
791 | } | |
792 | EXPORT_SYMBOL(bdi_init); | |
793 | ||
794 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |
795 | const char *fmt, ...) | |
796 | { | |
797 | va_list args; | |
798 | struct device *dev; | |
799 | ||
800 | if (bdi->dev) /* The driver needs to use separate queues per device */ | |
801 | return 0; | |
802 | ||
803 | va_start(args, fmt); | |
804 | dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); | |
805 | va_end(args); | |
806 | if (IS_ERR(dev)) | |
807 | return PTR_ERR(dev); | |
808 | ||
809 | bdi->dev = dev; | |
810 | ||
811 | bdi_debug_register(bdi, dev_name(dev)); | |
812 | set_bit(WB_registered, &bdi->wb.state); | |
813 | ||
814 | spin_lock_bh(&bdi_lock); | |
815 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); | |
816 | spin_unlock_bh(&bdi_lock); | |
817 | ||
818 | trace_writeback_bdi_register(bdi); | |
819 | return 0; | |
820 | } | |
821 | EXPORT_SYMBOL(bdi_register); | |
822 | ||
823 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) | |
824 | { | |
825 | return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); | |
826 | } | |
827 | EXPORT_SYMBOL(bdi_register_dev); | |
828 | ||
829 | int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) | |
830 | { | |
831 | int rc; | |
832 | ||
833 | rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt), | |
834 | MINOR(owner->devt)); | |
835 | if (rc) | |
836 | return rc; | |
837 | bdi->owner = owner; | |
838 | get_device(owner); | |
839 | return 0; | |
840 | } | |
841 | EXPORT_SYMBOL(bdi_register_owner); | |
842 | ||
843 | /* | |
844 | * Remove bdi from bdi_list, and ensure that it is no longer visible | |
845 | */ | |
846 | static void bdi_remove_from_list(struct backing_dev_info *bdi) | |
847 | { | |
848 | spin_lock_bh(&bdi_lock); | |
849 | list_del_rcu(&bdi->bdi_list); | |
850 | spin_unlock_bh(&bdi_lock); | |
851 | ||
852 | synchronize_rcu_expedited(); | |
853 | } | |
854 | ||
855 | void bdi_unregister(struct backing_dev_info *bdi) | |
856 | { | |
857 | /* make sure nobody finds us on the bdi_list anymore */ | |
858 | bdi_remove_from_list(bdi); | |
859 | wb_shutdown(&bdi->wb); | |
860 | cgwb_bdi_destroy(bdi); | |
861 | ||
862 | if (bdi->dev) { | |
863 | bdi_debug_unregister(bdi); | |
864 | device_unregister(bdi->dev); | |
865 | bdi->dev = NULL; | |
866 | } | |
867 | ||
868 | if (bdi->owner) { | |
869 | put_device(bdi->owner); | |
870 | bdi->owner = NULL; | |
871 | } | |
872 | } | |
873 | ||
874 | void bdi_exit(struct backing_dev_info *bdi) | |
875 | { | |
876 | WARN_ON_ONCE(bdi->dev); | |
877 | wb_exit(&bdi->wb); | |
878 | } | |
879 | ||
880 | void bdi_destroy(struct backing_dev_info *bdi) | |
881 | { | |
882 | bdi_unregister(bdi); | |
883 | bdi_exit(bdi); | |
884 | } | |
885 | EXPORT_SYMBOL(bdi_destroy); | |
886 | ||
887 | /* | |
888 | * For use from filesystems to quickly init and register a bdi associated | |
889 | * with dirty writeback | |
890 | */ | |
891 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) | |
892 | { | |
893 | int err; | |
894 | ||
895 | bdi->name = name; | |
896 | bdi->capabilities = 0; | |
897 | err = bdi_init(bdi); | |
898 | if (err) | |
899 | return err; | |
900 | ||
901 | err = bdi_register(bdi, NULL, "%.28s-%ld", name, | |
902 | atomic_long_inc_return(&bdi_seq)); | |
903 | if (err) { | |
904 | bdi_destroy(bdi); | |
905 | return err; | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } | |
910 | EXPORT_SYMBOL(bdi_setup_and_register); | |
911 | ||
912 | static wait_queue_head_t congestion_wqh[2] = { | |
913 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | |
914 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | |
915 | }; | |
916 | static atomic_t nr_wb_congested[2]; | |
917 | ||
918 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) | |
919 | { | |
920 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
921 | enum wb_congested_state bit; | |
922 | ||
923 | bit = sync ? WB_sync_congested : WB_async_congested; | |
924 | if (test_and_clear_bit(bit, &congested->state)) | |
925 | atomic_dec(&nr_wb_congested[sync]); | |
926 | smp_mb__after_atomic(); | |
927 | if (waitqueue_active(wqh)) | |
928 | wake_up(wqh); | |
929 | } | |
930 | EXPORT_SYMBOL(clear_wb_congested); | |
931 | ||
932 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync) | |
933 | { | |
934 | enum wb_congested_state bit; | |
935 | ||
936 | bit = sync ? WB_sync_congested : WB_async_congested; | |
937 | if (!test_and_set_bit(bit, &congested->state)) | |
938 | atomic_inc(&nr_wb_congested[sync]); | |
939 | } | |
940 | EXPORT_SYMBOL(set_wb_congested); | |
941 | ||
942 | /** | |
943 | * congestion_wait - wait for a backing_dev to become uncongested | |
944 | * @sync: SYNC or ASYNC IO | |
945 | * @timeout: timeout in jiffies | |
946 | * | |
947 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit | |
948 | * write congestion. If no backing_devs are congested then just wait for the | |
949 | * next write to be completed. | |
950 | */ | |
951 | long congestion_wait(int sync, long timeout) | |
952 | { | |
953 | long ret; | |
954 | unsigned long start = jiffies; | |
955 | DEFINE_WAIT(wait); | |
956 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
957 | ||
958 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | |
959 | ret = io_schedule_timeout(timeout); | |
960 | finish_wait(wqh, &wait); | |
961 | ||
962 | trace_writeback_congestion_wait(jiffies_to_usecs(timeout), | |
963 | jiffies_to_usecs(jiffies - start)); | |
964 | ||
965 | return ret; | |
966 | } | |
967 | EXPORT_SYMBOL(congestion_wait); | |
968 | ||
969 | /** | |
970 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes | |
971 | * @pgdat: A pgdat to check if it is heavily congested | |
972 | * @sync: SYNC or ASYNC IO | |
973 | * @timeout: timeout in jiffies | |
974 | * | |
975 | * In the event of a congested backing_dev (any backing_dev) and the given | |
976 | * @pgdat has experienced recent congestion, this waits for up to @timeout | |
977 | * jiffies for either a BDI to exit congestion of the given @sync queue | |
978 | * or a write to complete. | |
979 | * | |
980 | * In the absence of pgdat congestion, cond_resched() is called to yield | |
981 | * the processor if necessary but otherwise does not sleep. | |
982 | * | |
983 | * The return value is 0 if the sleep is for the full timeout. Otherwise, | |
984 | * it is the number of jiffies that were still remaining when the function | |
985 | * returned. return_value == timeout implies the function did not sleep. | |
986 | */ | |
987 | long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout) | |
988 | { | |
989 | long ret; | |
990 | unsigned long start = jiffies; | |
991 | DEFINE_WAIT(wait); | |
992 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
993 | ||
994 | /* | |
995 | * If there is no congestion, or heavy congestion is not being | |
996 | * encountered in the current pgdat, yield if necessary instead | |
997 | * of sleeping on the congestion queue | |
998 | */ | |
999 | if (atomic_read(&nr_wb_congested[sync]) == 0 || | |
1000 | !test_bit(PGDAT_CONGESTED, &pgdat->flags)) { | |
1001 | cond_resched(); | |
1002 | ||
1003 | /* In case we scheduled, work out time remaining */ | |
1004 | ret = timeout - (jiffies - start); | |
1005 | if (ret < 0) | |
1006 | ret = 0; | |
1007 | ||
1008 | goto out; | |
1009 | } | |
1010 | ||
1011 | /* Sleep until uncongested or a write happens */ | |
1012 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | |
1013 | ret = io_schedule_timeout(timeout); | |
1014 | finish_wait(wqh, &wait); | |
1015 | ||
1016 | out: | |
1017 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | |
1018 | jiffies_to_usecs(jiffies - start)); | |
1019 | ||
1020 | return ret; | |
1021 | } | |
1022 | EXPORT_SYMBOL(wait_iff_congested); | |
1023 | ||
1024 | int pdflush_proc_obsolete(struct ctl_table *table, int write, | |
1025 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
1026 | { | |
1027 | char kbuf[] = "0\n"; | |
1028 | ||
1029 | if (*ppos || *lenp < sizeof(kbuf)) { | |
1030 | *lenp = 0; | |
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | if (copy_to_user(buffer, kbuf, sizeof(kbuf))) | |
1035 | return -EFAULT; | |
1036 | pr_warn_once("%s exported in /proc is scheduled for removal\n", | |
1037 | table->procname); | |
1038 | ||
1039 | *lenp = 2; | |
1040 | *ppos += *lenp; | |
1041 | return 2; | |
1042 | } |