]>
Commit | Line | Data |
---|---|---|
1 | ||
2 | #include <linux/wait.h> | |
3 | #include <linux/backing-dev.h> | |
4 | #include <linux/kthread.h> | |
5 | #include <linux/freezer.h> | |
6 | #include <linux/fs.h> | |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/writeback.h> | |
12 | #include <linux/device.h> | |
13 | #include <trace/events/writeback.h> | |
14 | ||
15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | |
16 | ||
17 | struct backing_dev_info noop_backing_dev_info = { | |
18 | .name = "noop", | |
19 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | |
20 | }; | |
21 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | |
22 | ||
23 | static struct class *bdi_class; | |
24 | ||
25 | /* | |
26 | * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side | |
27 | * locking. | |
28 | */ | |
29 | DEFINE_SPINLOCK(bdi_lock); | |
30 | LIST_HEAD(bdi_list); | |
31 | ||
32 | /* bdi_wq serves all asynchronous writeback tasks */ | |
33 | struct workqueue_struct *bdi_wq; | |
34 | ||
35 | #ifdef CONFIG_DEBUG_FS | |
36 | #include <linux/debugfs.h> | |
37 | #include <linux/seq_file.h> | |
38 | ||
39 | static struct dentry *bdi_debug_root; | |
40 | ||
41 | static void bdi_debug_init(void) | |
42 | { | |
43 | bdi_debug_root = debugfs_create_dir("bdi", NULL); | |
44 | } | |
45 | ||
46 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | |
47 | { | |
48 | struct backing_dev_info *bdi = m->private; | |
49 | struct bdi_writeback *wb = &bdi->wb; | |
50 | unsigned long background_thresh; | |
51 | unsigned long dirty_thresh; | |
52 | unsigned long wb_thresh; | |
53 | unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; | |
54 | struct inode *inode; | |
55 | ||
56 | nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; | |
57 | spin_lock(&wb->list_lock); | |
58 | list_for_each_entry(inode, &wb->b_dirty, i_io_list) | |
59 | nr_dirty++; | |
60 | list_for_each_entry(inode, &wb->b_io, i_io_list) | |
61 | nr_io++; | |
62 | list_for_each_entry(inode, &wb->b_more_io, i_io_list) | |
63 | nr_more_io++; | |
64 | list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) | |
65 | if (inode->i_state & I_DIRTY_TIME) | |
66 | nr_dirty_time++; | |
67 | spin_unlock(&wb->list_lock); | |
68 | ||
69 | global_dirty_limits(&background_thresh, &dirty_thresh); | |
70 | wb_thresh = wb_calc_thresh(wb, dirty_thresh); | |
71 | ||
72 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | |
73 | seq_printf(m, | |
74 | "BdiWriteback: %10lu kB\n" | |
75 | "BdiReclaimable: %10lu kB\n" | |
76 | "BdiDirtyThresh: %10lu kB\n" | |
77 | "DirtyThresh: %10lu kB\n" | |
78 | "BackgroundThresh: %10lu kB\n" | |
79 | "BdiDirtied: %10lu kB\n" | |
80 | "BdiWritten: %10lu kB\n" | |
81 | "BdiWriteBandwidth: %10lu kBps\n" | |
82 | "b_dirty: %10lu\n" | |
83 | "b_io: %10lu\n" | |
84 | "b_more_io: %10lu\n" | |
85 | "b_dirty_time: %10lu\n" | |
86 | "bdi_list: %10u\n" | |
87 | "state: %10lx\n", | |
88 | (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), | |
89 | (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), | |
90 | K(wb_thresh), | |
91 | K(dirty_thresh), | |
92 | K(background_thresh), | |
93 | (unsigned long) K(wb_stat(wb, WB_DIRTIED)), | |
94 | (unsigned long) K(wb_stat(wb, WB_WRITTEN)), | |
95 | (unsigned long) K(wb->write_bandwidth), | |
96 | nr_dirty, | |
97 | nr_io, | |
98 | nr_more_io, | |
99 | nr_dirty_time, | |
100 | !list_empty(&bdi->bdi_list), bdi->wb.state); | |
101 | #undef K | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | static int bdi_debug_stats_open(struct inode *inode, struct file *file) | |
107 | { | |
108 | return single_open(file, bdi_debug_stats_show, inode->i_private); | |
109 | } | |
110 | ||
111 | static const struct file_operations bdi_debug_stats_fops = { | |
112 | .open = bdi_debug_stats_open, | |
113 | .read = seq_read, | |
114 | .llseek = seq_lseek, | |
115 | .release = single_release, | |
116 | }; | |
117 | ||
118 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) | |
119 | { | |
120 | bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); | |
121 | bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, | |
122 | bdi, &bdi_debug_stats_fops); | |
123 | } | |
124 | ||
125 | static void bdi_debug_unregister(struct backing_dev_info *bdi) | |
126 | { | |
127 | debugfs_remove(bdi->debug_stats); | |
128 | debugfs_remove(bdi->debug_dir); | |
129 | } | |
130 | #else | |
131 | static inline void bdi_debug_init(void) | |
132 | { | |
133 | } | |
134 | static inline void bdi_debug_register(struct backing_dev_info *bdi, | |
135 | const char *name) | |
136 | { | |
137 | } | |
138 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) | |
139 | { | |
140 | } | |
141 | #endif | |
142 | ||
143 | static ssize_t read_ahead_kb_store(struct device *dev, | |
144 | struct device_attribute *attr, | |
145 | const char *buf, size_t count) | |
146 | { | |
147 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
148 | unsigned long read_ahead_kb; | |
149 | ssize_t ret; | |
150 | ||
151 | ret = kstrtoul(buf, 10, &read_ahead_kb); | |
152 | if (ret < 0) | |
153 | return ret; | |
154 | ||
155 | bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); | |
156 | ||
157 | return count; | |
158 | } | |
159 | ||
160 | #define K(pages) ((pages) << (PAGE_SHIFT - 10)) | |
161 | ||
162 | #define BDI_SHOW(name, expr) \ | |
163 | static ssize_t name##_show(struct device *dev, \ | |
164 | struct device_attribute *attr, char *page) \ | |
165 | { \ | |
166 | struct backing_dev_info *bdi = dev_get_drvdata(dev); \ | |
167 | \ | |
168 | return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ | |
169 | } \ | |
170 | static DEVICE_ATTR_RW(name); | |
171 | ||
172 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) | |
173 | ||
174 | static ssize_t min_ratio_store(struct device *dev, | |
175 | struct device_attribute *attr, const char *buf, size_t count) | |
176 | { | |
177 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
178 | unsigned int ratio; | |
179 | ssize_t ret; | |
180 | ||
181 | ret = kstrtouint(buf, 10, &ratio); | |
182 | if (ret < 0) | |
183 | return ret; | |
184 | ||
185 | ret = bdi_set_min_ratio(bdi, ratio); | |
186 | if (!ret) | |
187 | ret = count; | |
188 | ||
189 | return ret; | |
190 | } | |
191 | BDI_SHOW(min_ratio, bdi->min_ratio) | |
192 | ||
193 | static ssize_t max_ratio_store(struct device *dev, | |
194 | struct device_attribute *attr, const char *buf, size_t count) | |
195 | { | |
196 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
197 | unsigned int ratio; | |
198 | ssize_t ret; | |
199 | ||
200 | ret = kstrtouint(buf, 10, &ratio); | |
201 | if (ret < 0) | |
202 | return ret; | |
203 | ||
204 | ret = bdi_set_max_ratio(bdi, ratio); | |
205 | if (!ret) | |
206 | ret = count; | |
207 | ||
208 | return ret; | |
209 | } | |
210 | BDI_SHOW(max_ratio, bdi->max_ratio) | |
211 | ||
212 | static ssize_t stable_pages_required_show(struct device *dev, | |
213 | struct device_attribute *attr, | |
214 | char *page) | |
215 | { | |
216 | struct backing_dev_info *bdi = dev_get_drvdata(dev); | |
217 | ||
218 | return snprintf(page, PAGE_SIZE-1, "%d\n", | |
219 | bdi_cap_stable_pages_required(bdi) ? 1 : 0); | |
220 | } | |
221 | static DEVICE_ATTR_RO(stable_pages_required); | |
222 | ||
223 | static struct attribute *bdi_dev_attrs[] = { | |
224 | &dev_attr_read_ahead_kb.attr, | |
225 | &dev_attr_min_ratio.attr, | |
226 | &dev_attr_max_ratio.attr, | |
227 | &dev_attr_stable_pages_required.attr, | |
228 | NULL, | |
229 | }; | |
230 | ATTRIBUTE_GROUPS(bdi_dev); | |
231 | ||
232 | static __init int bdi_class_init(void) | |
233 | { | |
234 | bdi_class = class_create(THIS_MODULE, "bdi"); | |
235 | if (IS_ERR(bdi_class)) | |
236 | return PTR_ERR(bdi_class); | |
237 | ||
238 | bdi_class->dev_groups = bdi_dev_groups; | |
239 | bdi_debug_init(); | |
240 | ||
241 | return 0; | |
242 | } | |
243 | postcore_initcall(bdi_class_init); | |
244 | ||
245 | static int __init default_bdi_init(void) | |
246 | { | |
247 | int err; | |
248 | ||
249 | bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE | | |
250 | WQ_UNBOUND | WQ_SYSFS, 0); | |
251 | if (!bdi_wq) | |
252 | return -ENOMEM; | |
253 | ||
254 | err = bdi_init(&noop_backing_dev_info); | |
255 | ||
256 | return err; | |
257 | } | |
258 | subsys_initcall(default_bdi_init); | |
259 | ||
260 | /* | |
261 | * This function is used when the first inode for this wb is marked dirty. It | |
262 | * wakes-up the corresponding bdi thread which should then take care of the | |
263 | * periodic background write-out of dirty inodes. Since the write-out would | |
264 | * starts only 'dirty_writeback_interval' centisecs from now anyway, we just | |
265 | * set up a timer which wakes the bdi thread up later. | |
266 | * | |
267 | * Note, we wouldn't bother setting up the timer, but this function is on the | |
268 | * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | |
269 | * by delaying the wake-up. | |
270 | * | |
271 | * We have to be careful not to postpone flush work if it is scheduled for | |
272 | * earlier. Thus we use queue_delayed_work(). | |
273 | */ | |
274 | void wb_wakeup_delayed(struct bdi_writeback *wb) | |
275 | { | |
276 | unsigned long timeout; | |
277 | ||
278 | timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | |
279 | spin_lock_bh(&wb->work_lock); | |
280 | if (test_bit(WB_registered, &wb->state)) | |
281 | queue_delayed_work(bdi_wq, &wb->dwork, timeout); | |
282 | spin_unlock_bh(&wb->work_lock); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Initial write bandwidth: 100 MB/s | |
287 | */ | |
288 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) | |
289 | ||
290 | static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, | |
291 | int blkcg_id, gfp_t gfp) | |
292 | { | |
293 | int i, err; | |
294 | ||
295 | memset(wb, 0, sizeof(*wb)); | |
296 | ||
297 | wb->bdi = bdi; | |
298 | wb->last_old_flush = jiffies; | |
299 | INIT_LIST_HEAD(&wb->b_dirty); | |
300 | INIT_LIST_HEAD(&wb->b_io); | |
301 | INIT_LIST_HEAD(&wb->b_more_io); | |
302 | INIT_LIST_HEAD(&wb->b_dirty_time); | |
303 | spin_lock_init(&wb->list_lock); | |
304 | ||
305 | wb->bw_time_stamp = jiffies; | |
306 | wb->balanced_dirty_ratelimit = INIT_BW; | |
307 | wb->dirty_ratelimit = INIT_BW; | |
308 | wb->write_bandwidth = INIT_BW; | |
309 | wb->avg_write_bandwidth = INIT_BW; | |
310 | ||
311 | spin_lock_init(&wb->work_lock); | |
312 | INIT_LIST_HEAD(&wb->work_list); | |
313 | INIT_DELAYED_WORK(&wb->dwork, wb_workfn); | |
314 | wb->dirty_sleep = jiffies; | |
315 | ||
316 | wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); | |
317 | if (!wb->congested) | |
318 | return -ENOMEM; | |
319 | ||
320 | err = fprop_local_init_percpu(&wb->completions, gfp); | |
321 | if (err) | |
322 | goto out_put_cong; | |
323 | ||
324 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) { | |
325 | err = percpu_counter_init(&wb->stat[i], 0, gfp); | |
326 | if (err) | |
327 | goto out_destroy_stat; | |
328 | } | |
329 | ||
330 | return 0; | |
331 | ||
332 | out_destroy_stat: | |
333 | while (i--) | |
334 | percpu_counter_destroy(&wb->stat[i]); | |
335 | fprop_local_destroy_percpu(&wb->completions); | |
336 | out_put_cong: | |
337 | wb_congested_put(wb->congested); | |
338 | return err; | |
339 | } | |
340 | ||
341 | /* | |
342 | * Remove bdi from the global list and shutdown any threads we have running | |
343 | */ | |
344 | static void wb_shutdown(struct bdi_writeback *wb) | |
345 | { | |
346 | /* Make sure nobody queues further work */ | |
347 | spin_lock_bh(&wb->work_lock); | |
348 | if (!test_and_clear_bit(WB_registered, &wb->state)) { | |
349 | spin_unlock_bh(&wb->work_lock); | |
350 | return; | |
351 | } | |
352 | spin_unlock_bh(&wb->work_lock); | |
353 | ||
354 | /* | |
355 | * Drain work list and shutdown the delayed_work. !WB_registered | |
356 | * tells wb_workfn() that @wb is dying and its work_list needs to | |
357 | * be drained no matter what. | |
358 | */ | |
359 | mod_delayed_work(bdi_wq, &wb->dwork, 0); | |
360 | flush_delayed_work(&wb->dwork); | |
361 | WARN_ON(!list_empty(&wb->work_list)); | |
362 | } | |
363 | ||
364 | static void wb_exit(struct bdi_writeback *wb) | |
365 | { | |
366 | int i; | |
367 | ||
368 | WARN_ON(delayed_work_pending(&wb->dwork)); | |
369 | ||
370 | for (i = 0; i < NR_WB_STAT_ITEMS; i++) | |
371 | percpu_counter_destroy(&wb->stat[i]); | |
372 | ||
373 | fprop_local_destroy_percpu(&wb->completions); | |
374 | wb_congested_put(wb->congested); | |
375 | } | |
376 | ||
377 | #ifdef CONFIG_CGROUP_WRITEBACK | |
378 | ||
379 | #include <linux/memcontrol.h> | |
380 | ||
381 | /* | |
382 | * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree, | |
383 | * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU | |
384 | * protected. cgwb_release_wait is used to wait for the completion of cgwb | |
385 | * releases from bdi destruction path. | |
386 | */ | |
387 | static DEFINE_SPINLOCK(cgwb_lock); | |
388 | static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait); | |
389 | ||
390 | /** | |
391 | * wb_congested_get_create - get or create a wb_congested | |
392 | * @bdi: associated bdi | |
393 | * @blkcg_id: ID of the associated blkcg | |
394 | * @gfp: allocation mask | |
395 | * | |
396 | * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one. | |
397 | * The returned wb_congested has its reference count incremented. Returns | |
398 | * NULL on failure. | |
399 | */ | |
400 | struct bdi_writeback_congested * | |
401 | wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) | |
402 | { | |
403 | struct bdi_writeback_congested *new_congested = NULL, *congested; | |
404 | struct rb_node **node, *parent; | |
405 | unsigned long flags; | |
406 | retry: | |
407 | spin_lock_irqsave(&cgwb_lock, flags); | |
408 | ||
409 | node = &bdi->cgwb_congested_tree.rb_node; | |
410 | parent = NULL; | |
411 | ||
412 | while (*node != NULL) { | |
413 | parent = *node; | |
414 | congested = rb_entry(parent, struct bdi_writeback_congested, | |
415 | rb_node); | |
416 | if (congested->blkcg_id < blkcg_id) | |
417 | node = &parent->rb_left; | |
418 | else if (congested->blkcg_id > blkcg_id) | |
419 | node = &parent->rb_right; | |
420 | else | |
421 | goto found; | |
422 | } | |
423 | ||
424 | if (new_congested) { | |
425 | /* !found and storage for new one already allocated, insert */ | |
426 | congested = new_congested; | |
427 | new_congested = NULL; | |
428 | rb_link_node(&congested->rb_node, parent, node); | |
429 | rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); | |
430 | goto found; | |
431 | } | |
432 | ||
433 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
434 | ||
435 | /* allocate storage for new one and retry */ | |
436 | new_congested = kzalloc(sizeof(*new_congested), gfp); | |
437 | if (!new_congested) | |
438 | return NULL; | |
439 | ||
440 | atomic_set(&new_congested->refcnt, 0); | |
441 | new_congested->bdi = bdi; | |
442 | new_congested->blkcg_id = blkcg_id; | |
443 | goto retry; | |
444 | ||
445 | found: | |
446 | atomic_inc(&congested->refcnt); | |
447 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
448 | kfree(new_congested); | |
449 | return congested; | |
450 | } | |
451 | ||
452 | /** | |
453 | * wb_congested_put - put a wb_congested | |
454 | * @congested: wb_congested to put | |
455 | * | |
456 | * Put @congested and destroy it if the refcnt reaches zero. | |
457 | */ | |
458 | void wb_congested_put(struct bdi_writeback_congested *congested) | |
459 | { | |
460 | unsigned long flags; | |
461 | ||
462 | local_irq_save(flags); | |
463 | if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { | |
464 | local_irq_restore(flags); | |
465 | return; | |
466 | } | |
467 | ||
468 | /* bdi might already have been destroyed leaving @congested unlinked */ | |
469 | if (congested->bdi) { | |
470 | rb_erase(&congested->rb_node, | |
471 | &congested->bdi->cgwb_congested_tree); | |
472 | congested->bdi = NULL; | |
473 | } | |
474 | ||
475 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
476 | kfree(congested); | |
477 | } | |
478 | ||
479 | static void cgwb_release_workfn(struct work_struct *work) | |
480 | { | |
481 | struct bdi_writeback *wb = container_of(work, struct bdi_writeback, | |
482 | release_work); | |
483 | struct backing_dev_info *bdi = wb->bdi; | |
484 | ||
485 | spin_lock_irq(&cgwb_lock); | |
486 | list_del_rcu(&wb->bdi_node); | |
487 | spin_unlock_irq(&cgwb_lock); | |
488 | ||
489 | wb_shutdown(wb); | |
490 | ||
491 | css_put(wb->memcg_css); | |
492 | css_put(wb->blkcg_css); | |
493 | ||
494 | fprop_local_destroy_percpu(&wb->memcg_completions); | |
495 | percpu_ref_exit(&wb->refcnt); | |
496 | wb_exit(wb); | |
497 | kfree_rcu(wb, rcu); | |
498 | ||
499 | if (atomic_dec_and_test(&bdi->usage_cnt)) | |
500 | wake_up_all(&cgwb_release_wait); | |
501 | } | |
502 | ||
503 | static void cgwb_release(struct percpu_ref *refcnt) | |
504 | { | |
505 | struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, | |
506 | refcnt); | |
507 | schedule_work(&wb->release_work); | |
508 | } | |
509 | ||
510 | static void cgwb_kill(struct bdi_writeback *wb) | |
511 | { | |
512 | lockdep_assert_held(&cgwb_lock); | |
513 | ||
514 | WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); | |
515 | list_del(&wb->memcg_node); | |
516 | list_del(&wb->blkcg_node); | |
517 | percpu_ref_kill(&wb->refcnt); | |
518 | } | |
519 | ||
520 | static int cgwb_create(struct backing_dev_info *bdi, | |
521 | struct cgroup_subsys_state *memcg_css, gfp_t gfp) | |
522 | { | |
523 | struct mem_cgroup *memcg; | |
524 | struct cgroup_subsys_state *blkcg_css; | |
525 | struct blkcg *blkcg; | |
526 | struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; | |
527 | struct bdi_writeback *wb; | |
528 | unsigned long flags; | |
529 | int ret = 0; | |
530 | ||
531 | memcg = mem_cgroup_from_css(memcg_css); | |
532 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); | |
533 | blkcg = css_to_blkcg(blkcg_css); | |
534 | memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); | |
535 | blkcg_cgwb_list = &blkcg->cgwb_list; | |
536 | ||
537 | /* look up again under lock and discard on blkcg mismatch */ | |
538 | spin_lock_irqsave(&cgwb_lock, flags); | |
539 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
540 | if (wb && wb->blkcg_css != blkcg_css) { | |
541 | cgwb_kill(wb); | |
542 | wb = NULL; | |
543 | } | |
544 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
545 | if (wb) | |
546 | goto out_put; | |
547 | ||
548 | /* need to create a new one */ | |
549 | wb = kmalloc(sizeof(*wb), gfp); | |
550 | if (!wb) | |
551 | return -ENOMEM; | |
552 | ||
553 | ret = wb_init(wb, bdi, blkcg_css->id, gfp); | |
554 | if (ret) | |
555 | goto err_free; | |
556 | ||
557 | ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); | |
558 | if (ret) | |
559 | goto err_wb_exit; | |
560 | ||
561 | ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); | |
562 | if (ret) | |
563 | goto err_ref_exit; | |
564 | ||
565 | wb->memcg_css = memcg_css; | |
566 | wb->blkcg_css = blkcg_css; | |
567 | INIT_WORK(&wb->release_work, cgwb_release_workfn); | |
568 | set_bit(WB_registered, &wb->state); | |
569 | ||
570 | /* | |
571 | * The root wb determines the registered state of the whole bdi and | |
572 | * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate | |
573 | * whether they're still online. Don't link @wb if any is dead. | |
574 | * See wb_memcg_offline() and wb_blkcg_offline(). | |
575 | */ | |
576 | ret = -ENODEV; | |
577 | spin_lock_irqsave(&cgwb_lock, flags); | |
578 | if (test_bit(WB_registered, &bdi->wb.state) && | |
579 | blkcg_cgwb_list->next && memcg_cgwb_list->next) { | |
580 | /* we might have raced another instance of this function */ | |
581 | ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); | |
582 | if (!ret) { | |
583 | atomic_inc(&bdi->usage_cnt); | |
584 | list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); | |
585 | list_add(&wb->memcg_node, memcg_cgwb_list); | |
586 | list_add(&wb->blkcg_node, blkcg_cgwb_list); | |
587 | css_get(memcg_css); | |
588 | css_get(blkcg_css); | |
589 | } | |
590 | } | |
591 | spin_unlock_irqrestore(&cgwb_lock, flags); | |
592 | if (ret) { | |
593 | if (ret == -EEXIST) | |
594 | ret = 0; | |
595 | goto err_fprop_exit; | |
596 | } | |
597 | goto out_put; | |
598 | ||
599 | err_fprop_exit: | |
600 | fprop_local_destroy_percpu(&wb->memcg_completions); | |
601 | err_ref_exit: | |
602 | percpu_ref_exit(&wb->refcnt); | |
603 | err_wb_exit: | |
604 | wb_exit(wb); | |
605 | err_free: | |
606 | kfree(wb); | |
607 | out_put: | |
608 | css_put(blkcg_css); | |
609 | return ret; | |
610 | } | |
611 | ||
612 | /** | |
613 | * wb_get_create - get wb for a given memcg, create if necessary | |
614 | * @bdi: target bdi | |
615 | * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) | |
616 | * @gfp: allocation mask to use | |
617 | * | |
618 | * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to | |
619 | * create one. The returned wb has its refcount incremented. | |
620 | * | |
621 | * This function uses css_get() on @memcg_css and thus expects its refcnt | |
622 | * to be positive on invocation. IOW, rcu_read_lock() protection on | |
623 | * @memcg_css isn't enough. try_get it before calling this function. | |
624 | * | |
625 | * A wb is keyed by its associated memcg. As blkcg implicitly enables | |
626 | * memcg on the default hierarchy, memcg association is guaranteed to be | |
627 | * more specific (equal or descendant to the associated blkcg) and thus can | |
628 | * identify both the memcg and blkcg associations. | |
629 | * | |
630 | * Because the blkcg associated with a memcg may change as blkcg is enabled | |
631 | * and disabled closer to root in the hierarchy, each wb keeps track of | |
632 | * both the memcg and blkcg associated with it and verifies the blkcg on | |
633 | * each lookup. On mismatch, the existing wb is discarded and a new one is | |
634 | * created. | |
635 | */ | |
636 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, | |
637 | struct cgroup_subsys_state *memcg_css, | |
638 | gfp_t gfp) | |
639 | { | |
640 | struct bdi_writeback *wb; | |
641 | ||
642 | might_sleep_if(gfpflags_allow_blocking(gfp)); | |
643 | ||
644 | if (!memcg_css->parent) | |
645 | return &bdi->wb; | |
646 | ||
647 | do { | |
648 | rcu_read_lock(); | |
649 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
650 | if (wb) { | |
651 | struct cgroup_subsys_state *blkcg_css; | |
652 | ||
653 | /* see whether the blkcg association has changed */ | |
654 | blkcg_css = cgroup_get_e_css(memcg_css->cgroup, | |
655 | &io_cgrp_subsys); | |
656 | if (unlikely(wb->blkcg_css != blkcg_css || | |
657 | !wb_tryget(wb))) | |
658 | wb = NULL; | |
659 | css_put(blkcg_css); | |
660 | } | |
661 | rcu_read_unlock(); | |
662 | } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); | |
663 | ||
664 | return wb; | |
665 | } | |
666 | ||
667 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | |
668 | { | |
669 | int ret; | |
670 | ||
671 | INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); | |
672 | bdi->cgwb_congested_tree = RB_ROOT; | |
673 | atomic_set(&bdi->usage_cnt, 1); | |
674 | ||
675 | ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | |
676 | if (!ret) { | |
677 | bdi->wb.memcg_css = &root_mem_cgroup->css; | |
678 | bdi->wb.blkcg_css = blkcg_root_css; | |
679 | } | |
680 | return ret; | |
681 | } | |
682 | ||
683 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | |
684 | { | |
685 | struct radix_tree_iter iter; | |
686 | struct rb_node *rbn; | |
687 | void **slot; | |
688 | ||
689 | WARN_ON(test_bit(WB_registered, &bdi->wb.state)); | |
690 | ||
691 | spin_lock_irq(&cgwb_lock); | |
692 | ||
693 | radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) | |
694 | cgwb_kill(*slot); | |
695 | ||
696 | while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { | |
697 | struct bdi_writeback_congested *congested = | |
698 | rb_entry(rbn, struct bdi_writeback_congested, rb_node); | |
699 | ||
700 | rb_erase(rbn, &bdi->cgwb_congested_tree); | |
701 | congested->bdi = NULL; /* mark @congested unlinked */ | |
702 | } | |
703 | ||
704 | spin_unlock_irq(&cgwb_lock); | |
705 | ||
706 | /* | |
707 | * All cgwb's and their congested states must be shutdown and | |
708 | * released before returning. Drain the usage counter to wait for | |
709 | * all cgwb's and cgwb_congested's ever created on @bdi. | |
710 | */ | |
711 | atomic_dec(&bdi->usage_cnt); | |
712 | wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); | |
713 | } | |
714 | ||
715 | /** | |
716 | * wb_memcg_offline - kill all wb's associated with a memcg being offlined | |
717 | * @memcg: memcg being offlined | |
718 | * | |
719 | * Also prevents creation of any new wb's associated with @memcg. | |
720 | */ | |
721 | void wb_memcg_offline(struct mem_cgroup *memcg) | |
722 | { | |
723 | LIST_HEAD(to_destroy); | |
724 | struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); | |
725 | struct bdi_writeback *wb, *next; | |
726 | ||
727 | spin_lock_irq(&cgwb_lock); | |
728 | list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) | |
729 | cgwb_kill(wb); | |
730 | memcg_cgwb_list->next = NULL; /* prevent new wb's */ | |
731 | spin_unlock_irq(&cgwb_lock); | |
732 | } | |
733 | ||
734 | /** | |
735 | * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined | |
736 | * @blkcg: blkcg being offlined | |
737 | * | |
738 | * Also prevents creation of any new wb's associated with @blkcg. | |
739 | */ | |
740 | void wb_blkcg_offline(struct blkcg *blkcg) | |
741 | { | |
742 | LIST_HEAD(to_destroy); | |
743 | struct bdi_writeback *wb, *next; | |
744 | ||
745 | spin_lock_irq(&cgwb_lock); | |
746 | list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) | |
747 | cgwb_kill(wb); | |
748 | blkcg->cgwb_list.next = NULL; /* prevent new wb's */ | |
749 | spin_unlock_irq(&cgwb_lock); | |
750 | } | |
751 | ||
752 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
753 | ||
754 | static int cgwb_bdi_init(struct backing_dev_info *bdi) | |
755 | { | |
756 | int err; | |
757 | ||
758 | bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL); | |
759 | if (!bdi->wb_congested) | |
760 | return -ENOMEM; | |
761 | ||
762 | atomic_set(&bdi->wb_congested->refcnt, 1); | |
763 | ||
764 | err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); | |
765 | if (err) { | |
766 | wb_congested_put(bdi->wb_congested); | |
767 | return err; | |
768 | } | |
769 | return 0; | |
770 | } | |
771 | ||
772 | static void cgwb_bdi_destroy(struct backing_dev_info *bdi) | |
773 | { | |
774 | wb_congested_put(bdi->wb_congested); | |
775 | } | |
776 | ||
777 | #endif /* CONFIG_CGROUP_WRITEBACK */ | |
778 | ||
779 | int bdi_init(struct backing_dev_info *bdi) | |
780 | { | |
781 | int ret; | |
782 | ||
783 | bdi->dev = NULL; | |
784 | ||
785 | kref_init(&bdi->refcnt); | |
786 | bdi->min_ratio = 0; | |
787 | bdi->max_ratio = 100; | |
788 | bdi->max_prop_frac = FPROP_FRAC_BASE; | |
789 | INIT_LIST_HEAD(&bdi->bdi_list); | |
790 | INIT_LIST_HEAD(&bdi->wb_list); | |
791 | init_waitqueue_head(&bdi->wb_waitq); | |
792 | ||
793 | ret = cgwb_bdi_init(bdi); | |
794 | ||
795 | list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); | |
796 | ||
797 | return ret; | |
798 | } | |
799 | EXPORT_SYMBOL(bdi_init); | |
800 | ||
801 | struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id) | |
802 | { | |
803 | struct backing_dev_info *bdi; | |
804 | ||
805 | bdi = kmalloc_node(sizeof(struct backing_dev_info), | |
806 | gfp_mask | __GFP_ZERO, node_id); | |
807 | if (!bdi) | |
808 | return NULL; | |
809 | ||
810 | if (bdi_init(bdi)) { | |
811 | kfree(bdi); | |
812 | return NULL; | |
813 | } | |
814 | return bdi; | |
815 | } | |
816 | ||
817 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |
818 | const char *fmt, ...) | |
819 | { | |
820 | va_list args; | |
821 | struct device *dev; | |
822 | ||
823 | if (bdi->dev) /* The driver needs to use separate queues per device */ | |
824 | return 0; | |
825 | ||
826 | va_start(args, fmt); | |
827 | dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); | |
828 | va_end(args); | |
829 | if (IS_ERR(dev)) | |
830 | return PTR_ERR(dev); | |
831 | ||
832 | bdi->dev = dev; | |
833 | ||
834 | bdi_debug_register(bdi, dev_name(dev)); | |
835 | set_bit(WB_registered, &bdi->wb.state); | |
836 | ||
837 | spin_lock_bh(&bdi_lock); | |
838 | list_add_tail_rcu(&bdi->bdi_list, &bdi_list); | |
839 | spin_unlock_bh(&bdi_lock); | |
840 | ||
841 | trace_writeback_bdi_register(bdi); | |
842 | return 0; | |
843 | } | |
844 | EXPORT_SYMBOL(bdi_register); | |
845 | ||
846 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) | |
847 | { | |
848 | return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); | |
849 | } | |
850 | EXPORT_SYMBOL(bdi_register_dev); | |
851 | ||
852 | int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) | |
853 | { | |
854 | int rc; | |
855 | ||
856 | rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt), | |
857 | MINOR(owner->devt)); | |
858 | if (rc) | |
859 | return rc; | |
860 | bdi->owner = owner; | |
861 | get_device(owner); | |
862 | return 0; | |
863 | } | |
864 | EXPORT_SYMBOL(bdi_register_owner); | |
865 | ||
866 | /* | |
867 | * Remove bdi from bdi_list, and ensure that it is no longer visible | |
868 | */ | |
869 | static void bdi_remove_from_list(struct backing_dev_info *bdi) | |
870 | { | |
871 | spin_lock_bh(&bdi_lock); | |
872 | list_del_rcu(&bdi->bdi_list); | |
873 | spin_unlock_bh(&bdi_lock); | |
874 | ||
875 | synchronize_rcu_expedited(); | |
876 | } | |
877 | ||
878 | void bdi_unregister(struct backing_dev_info *bdi) | |
879 | { | |
880 | /* make sure nobody finds us on the bdi_list anymore */ | |
881 | bdi_remove_from_list(bdi); | |
882 | wb_shutdown(&bdi->wb); | |
883 | cgwb_bdi_destroy(bdi); | |
884 | ||
885 | if (bdi->dev) { | |
886 | bdi_debug_unregister(bdi); | |
887 | device_unregister(bdi->dev); | |
888 | bdi->dev = NULL; | |
889 | } | |
890 | ||
891 | if (bdi->owner) { | |
892 | put_device(bdi->owner); | |
893 | bdi->owner = NULL; | |
894 | } | |
895 | } | |
896 | ||
897 | static void bdi_exit(struct backing_dev_info *bdi) | |
898 | { | |
899 | WARN_ON_ONCE(bdi->dev); | |
900 | wb_exit(&bdi->wb); | |
901 | } | |
902 | ||
903 | static void release_bdi(struct kref *ref) | |
904 | { | |
905 | struct backing_dev_info *bdi = | |
906 | container_of(ref, struct backing_dev_info, refcnt); | |
907 | ||
908 | bdi_exit(bdi); | |
909 | kfree(bdi); | |
910 | } | |
911 | ||
912 | void bdi_put(struct backing_dev_info *bdi) | |
913 | { | |
914 | kref_put(&bdi->refcnt, release_bdi); | |
915 | } | |
916 | ||
917 | void bdi_destroy(struct backing_dev_info *bdi) | |
918 | { | |
919 | bdi_unregister(bdi); | |
920 | bdi_exit(bdi); | |
921 | } | |
922 | EXPORT_SYMBOL(bdi_destroy); | |
923 | ||
924 | /* | |
925 | * For use from filesystems to quickly init and register a bdi associated | |
926 | * with dirty writeback | |
927 | */ | |
928 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) | |
929 | { | |
930 | int err; | |
931 | ||
932 | bdi->name = name; | |
933 | bdi->capabilities = 0; | |
934 | err = bdi_init(bdi); | |
935 | if (err) | |
936 | return err; | |
937 | ||
938 | err = bdi_register(bdi, NULL, "%.28s-%ld", name, | |
939 | atomic_long_inc_return(&bdi_seq)); | |
940 | if (err) { | |
941 | bdi_destroy(bdi); | |
942 | return err; | |
943 | } | |
944 | ||
945 | return 0; | |
946 | } | |
947 | EXPORT_SYMBOL(bdi_setup_and_register); | |
948 | ||
949 | static wait_queue_head_t congestion_wqh[2] = { | |
950 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | |
951 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | |
952 | }; | |
953 | static atomic_t nr_wb_congested[2]; | |
954 | ||
955 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) | |
956 | { | |
957 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
958 | enum wb_congested_state bit; | |
959 | ||
960 | bit = sync ? WB_sync_congested : WB_async_congested; | |
961 | if (test_and_clear_bit(bit, &congested->state)) | |
962 | atomic_dec(&nr_wb_congested[sync]); | |
963 | smp_mb__after_atomic(); | |
964 | if (waitqueue_active(wqh)) | |
965 | wake_up(wqh); | |
966 | } | |
967 | EXPORT_SYMBOL(clear_wb_congested); | |
968 | ||
969 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync) | |
970 | { | |
971 | enum wb_congested_state bit; | |
972 | ||
973 | bit = sync ? WB_sync_congested : WB_async_congested; | |
974 | if (!test_and_set_bit(bit, &congested->state)) | |
975 | atomic_inc(&nr_wb_congested[sync]); | |
976 | } | |
977 | EXPORT_SYMBOL(set_wb_congested); | |
978 | ||
979 | /** | |
980 | * congestion_wait - wait for a backing_dev to become uncongested | |
981 | * @sync: SYNC or ASYNC IO | |
982 | * @timeout: timeout in jiffies | |
983 | * | |
984 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit | |
985 | * write congestion. If no backing_devs are congested then just wait for the | |
986 | * next write to be completed. | |
987 | */ | |
988 | long congestion_wait(int sync, long timeout) | |
989 | { | |
990 | long ret; | |
991 | unsigned long start = jiffies; | |
992 | DEFINE_WAIT(wait); | |
993 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
994 | ||
995 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | |
996 | ret = io_schedule_timeout(timeout); | |
997 | finish_wait(wqh, &wait); | |
998 | ||
999 | trace_writeback_congestion_wait(jiffies_to_usecs(timeout), | |
1000 | jiffies_to_usecs(jiffies - start)); | |
1001 | ||
1002 | return ret; | |
1003 | } | |
1004 | EXPORT_SYMBOL(congestion_wait); | |
1005 | ||
1006 | /** | |
1007 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes | |
1008 | * @pgdat: A pgdat to check if it is heavily congested | |
1009 | * @sync: SYNC or ASYNC IO | |
1010 | * @timeout: timeout in jiffies | |
1011 | * | |
1012 | * In the event of a congested backing_dev (any backing_dev) and the given | |
1013 | * @pgdat has experienced recent congestion, this waits for up to @timeout | |
1014 | * jiffies for either a BDI to exit congestion of the given @sync queue | |
1015 | * or a write to complete. | |
1016 | * | |
1017 | * In the absence of pgdat congestion, cond_resched() is called to yield | |
1018 | * the processor if necessary but otherwise does not sleep. | |
1019 | * | |
1020 | * The return value is 0 if the sleep is for the full timeout. Otherwise, | |
1021 | * it is the number of jiffies that were still remaining when the function | |
1022 | * returned. return_value == timeout implies the function did not sleep. | |
1023 | */ | |
1024 | long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout) | |
1025 | { | |
1026 | long ret; | |
1027 | unsigned long start = jiffies; | |
1028 | DEFINE_WAIT(wait); | |
1029 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | |
1030 | ||
1031 | /* | |
1032 | * If there is no congestion, or heavy congestion is not being | |
1033 | * encountered in the current pgdat, yield if necessary instead | |
1034 | * of sleeping on the congestion queue | |
1035 | */ | |
1036 | if (atomic_read(&nr_wb_congested[sync]) == 0 || | |
1037 | !test_bit(PGDAT_CONGESTED, &pgdat->flags)) { | |
1038 | cond_resched(); | |
1039 | ||
1040 | /* In case we scheduled, work out time remaining */ | |
1041 | ret = timeout - (jiffies - start); | |
1042 | if (ret < 0) | |
1043 | ret = 0; | |
1044 | ||
1045 | goto out; | |
1046 | } | |
1047 | ||
1048 | /* Sleep until uncongested or a write happens */ | |
1049 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | |
1050 | ret = io_schedule_timeout(timeout); | |
1051 | finish_wait(wqh, &wait); | |
1052 | ||
1053 | out: | |
1054 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | |
1055 | jiffies_to_usecs(jiffies - start)); | |
1056 | ||
1057 | return ret; | |
1058 | } | |
1059 | EXPORT_SYMBOL(wait_iff_congested); | |
1060 | ||
1061 | int pdflush_proc_obsolete(struct ctl_table *table, int write, | |
1062 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
1063 | { | |
1064 | char kbuf[] = "0\n"; | |
1065 | ||
1066 | if (*ppos || *lenp < sizeof(kbuf)) { | |
1067 | *lenp = 0; | |
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | if (copy_to_user(buffer, kbuf, sizeof(kbuf))) | |
1072 | return -EFAULT; | |
1073 | pr_warn_once("%s exported in /proc is scheduled for removal\n", | |
1074 | table->procname); | |
1075 | ||
1076 | *lenp = 2; | |
1077 | *ppos += *lenp; | |
1078 | return 2; | |
1079 | } |