]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/backing-dev.h
writeback: implement bdi_for_each_wb()
[mirror_ubuntu-artful-kernel.git] / include / linux / backing-dev.h
CommitLineData
1da177e4
LT
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
cf0ca9fe 11#include <linux/kernel.h>
e4ad08fe 12#include <linux/fs.h>
03ba3782 13#include <linux/sched.h>
a212b105 14#include <linux/blkdev.h>
03ba3782 15#include <linux/writeback.h>
52ebea74 16#include <linux/blk-cgroup.h>
66114cad 17#include <linux/backing-dev-defs.h>
1da177e4 18
8077c0d9 19int __must_check bdi_init(struct backing_dev_info *bdi);
b2e8fb6e
PZ
20void bdi_destroy(struct backing_dev_info *bdi);
21
d2cc4dde 22__printf(3, 4)
cf0ca9fe
PZ
23int bdi_register(struct backing_dev_info *bdi, struct device *parent,
24 const char *fmt, ...);
25int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
26void bdi_unregister(struct backing_dev_info *bdi);
b4caecd4 27int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
0e175a18
CW
28void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
29 enum wb_reason reason);
c5444198 30void bdi_start_background_writeback(struct backing_dev_info *bdi);
f0054bb1 31void wb_workfn(struct work_struct *work);
f0054bb1 32void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 33
03ba3782 34extern spinlock_t bdi_lock;
66f3b8e2
JA
35extern struct list_head bdi_list;
36
839a8e86
TH
37extern struct workqueue_struct *bdi_wq;
38
d6c10f1f 39static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
03ba3782 40{
d6c10f1f 41 return test_bit(WB_has_dirty_io, &wb->state);
03ba3782
JA
42}
43
95a46c65
TH
44static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
45{
46 /*
47 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
48 * any dirty wbs. See wb_update_write_bandwidth().
49 */
50 return atomic_long_read(&bdi->tot_write_bandwidth);
51}
52
93f78d88
TH
53static inline void __add_wb_stat(struct bdi_writeback *wb,
54 enum wb_stat_item item, s64 amount)
b2e8fb6e 55{
93f78d88 56 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
57}
58
93f78d88
TH
59static inline void __inc_wb_stat(struct bdi_writeback *wb,
60 enum wb_stat_item item)
b2e8fb6e 61{
93f78d88 62 __add_wb_stat(wb, item, 1);
b2e8fb6e
PZ
63}
64
93f78d88 65static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
66{
67 unsigned long flags;
68
69 local_irq_save(flags);
93f78d88 70 __inc_wb_stat(wb, item);
b2e8fb6e
PZ
71 local_irq_restore(flags);
72}
73
93f78d88
TH
74static inline void __dec_wb_stat(struct bdi_writeback *wb,
75 enum wb_stat_item item)
b2e8fb6e 76{
93f78d88 77 __add_wb_stat(wb, item, -1);
b2e8fb6e
PZ
78}
79
93f78d88 80static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
81{
82 unsigned long flags;
83
84 local_irq_save(flags);
93f78d88 85 __dec_wb_stat(wb, item);
b2e8fb6e
PZ
86 local_irq_restore(flags);
87}
88
93f78d88 89static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 90{
93f78d88 91 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
92}
93
93f78d88
TH
94static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
95 enum wb_stat_item item)
b2e8fb6e 96{
93f78d88 97 return percpu_counter_sum_positive(&wb->stat[item]);
b2e8fb6e
PZ
98}
99
93f78d88 100static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 101{
b2e8fb6e
PZ
102 s64 sum;
103 unsigned long flags;
104
105 local_irq_save(flags);
93f78d88 106 sum = __wb_stat_sum(wb, item);
b2e8fb6e
PZ
107 local_irq_restore(flags);
108
109 return sum;
e0bf68dd
PZ
110}
111
93f78d88 112extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 113
b2e8fb6e
PZ
114/*
115 * maximal error of a stat counter.
116 */
93f78d88 117static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
e0bf68dd 118{
b2e8fb6e 119#ifdef CONFIG_SMP
93f78d88 120 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
121#else
122 return 1;
123#endif
e0bf68dd 124}
1da177e4 125
189d3c4a 126int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 127int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 128
1da177e4
LT
129/*
130 * Flags in backing_dev_info::capability
e4ad08fe
MS
131 *
132 * The first three flags control whether dirty pages will contribute to the
133 * VM's accounting and whether writepages() should be called for dirty pages
134 * (something that would not, for example, be appropriate for ramfs)
135 *
136 * WARNING: these flags are closely related and should not normally be
137 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
138 * three flags into a single convenience macro.
139 *
140 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
141 * BDI_CAP_NO_WRITEBACK: Don't write pages back
142 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
5a537485 143 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
89e9b9e0
TH
144 *
145 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
1da177e4 146 */
e4ad08fe
MS
147#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
148#define BDI_CAP_NO_WRITEBACK 0x00000002
b4caecd4
CH
149#define BDI_CAP_NO_ACCT_WB 0x00000004
150#define BDI_CAP_STABLE_WRITES 0x00000008
151#define BDI_CAP_STRICTLIMIT 0x00000010
89e9b9e0 152#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
1da177e4 153
e4ad08fe
MS
154#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
155 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
156
5129a469 157extern struct backing_dev_info noop_backing_dev_info;
1da177e4 158
1da177e4 159int writeback_in_progress(struct backing_dev_info *bdi);
1da177e4 160
a212b105
TH
161static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
162{
163 struct super_block *sb;
164
165 if (!inode)
166 return &noop_backing_dev_info;
167
168 sb = inode->i_sb;
169#ifdef CONFIG_BLOCK
170 if (sb_is_blkdev_sb(sb))
171 return blk_get_backing_dev_info(I_BDEV(inode));
172#endif
173 return sb->s_bdi;
174}
175
ec8a6f26 176static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
1da177e4 177{
ec8a6f26 178 struct backing_dev_info *bdi = wb->bdi;
1da177e4 179
ec8a6f26
TH
180 if (bdi->congested_fn)
181 return bdi->congested_fn(bdi->congested_data, cong_bits);
182 return wb->congested->state & cong_bits;
1da177e4
LT
183}
184
8aa7e847 185long congestion_wait(int sync, long timeout);
0e093d99 186long wait_iff_congested(struct zone *zone, int sync, long timeout);
3965c9ae
WL
187int pdflush_proc_obsolete(struct ctl_table *table, int write,
188 void __user *buffer, size_t *lenp, loff_t *ppos);
1da177e4 189
7d311cda
DW
190static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
191{
192 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
193}
194
e4ad08fe
MS
195static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
196{
197 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
198}
199
200static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
201{
202 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
203}
1da177e4 204
e4ad08fe
MS
205static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
206{
207 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
208 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
209 BDI_CAP_NO_WRITEBACK));
210}
1da177e4 211
e4ad08fe
MS
212static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
213{
de1414a6 214 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
e4ad08fe 215}
1da177e4 216
e4ad08fe
MS
217static inline bool mapping_cap_account_dirty(struct address_space *mapping)
218{
de1414a6 219 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
e4ad08fe 220}
1da177e4 221
03ba3782
JA
222static inline int bdi_sched_wait(void *word)
223{
224 schedule();
225 return 0;
226}
227
89e9b9e0
TH
228#ifdef CONFIG_CGROUP_WRITEBACK
229
52ebea74
TH
230struct bdi_writeback_congested *
231wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
232void wb_congested_put(struct bdi_writeback_congested *congested);
233struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
234 struct cgroup_subsys_state *memcg_css,
235 gfp_t gfp);
236void __inode_attach_wb(struct inode *inode, struct page *page);
237void wb_memcg_offline(struct mem_cgroup *memcg);
238void wb_blkcg_offline(struct blkcg *blkcg);
703c2708 239int inode_congested(struct inode *inode, int cong_bits);
52ebea74 240
89e9b9e0
TH
241/**
242 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
243 * @inode: inode of interest
244 *
245 * cgroup writeback requires support from both the bdi and filesystem.
246 * Test whether @inode has both.
247 */
248static inline bool inode_cgwb_enabled(struct inode *inode)
249{
250 struct backing_dev_info *bdi = inode_to_bdi(inode);
251
252 return bdi_cap_account_dirty(bdi) &&
253 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
254 (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
255}
256
52ebea74
TH
257/**
258 * wb_tryget - try to increment a wb's refcount
259 * @wb: bdi_writeback to get
260 */
261static inline bool wb_tryget(struct bdi_writeback *wb)
262{
263 if (wb != &wb->bdi->wb)
264 return percpu_ref_tryget(&wb->refcnt);
265 return true;
266}
267
268/**
269 * wb_get - increment a wb's refcount
270 * @wb: bdi_writeback to get
271 */
272static inline void wb_get(struct bdi_writeback *wb)
273{
274 if (wb != &wb->bdi->wb)
275 percpu_ref_get(&wb->refcnt);
276}
277
278/**
279 * wb_put - decrement a wb's refcount
280 * @wb: bdi_writeback to put
281 */
282static inline void wb_put(struct bdi_writeback *wb)
283{
284 if (wb != &wb->bdi->wb)
285 percpu_ref_put(&wb->refcnt);
286}
287
288/**
289 * wb_find_current - find wb for %current on a bdi
290 * @bdi: bdi of interest
291 *
292 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
293 * Must be called under rcu_read_lock() which protects the returend wb.
294 * NULL if not found.
295 */
296static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
297{
298 struct cgroup_subsys_state *memcg_css;
299 struct bdi_writeback *wb;
300
301 memcg_css = task_css(current, memory_cgrp_id);
302 if (!memcg_css->parent)
303 return &bdi->wb;
304
305 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
306
307 /*
308 * %current's blkcg equals the effective blkcg of its memcg. No
309 * need to use the relatively expensive cgroup_get_e_css().
310 */
311 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
312 return wb;
313 return NULL;
314}
315
316/**
317 * wb_get_create_current - get or create wb for %current on a bdi
318 * @bdi: bdi of interest
319 * @gfp: allocation mask
320 *
321 * Equivalent to wb_get_create() on %current's memcg. This function is
322 * called from a relatively hot path and optimizes the common cases using
323 * wb_find_current().
324 */
325static inline struct bdi_writeback *
326wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
327{
328 struct bdi_writeback *wb;
329
330 rcu_read_lock();
331 wb = wb_find_current(bdi);
332 if (wb && unlikely(!wb_tryget(wb)))
333 wb = NULL;
334 rcu_read_unlock();
335
336 if (unlikely(!wb)) {
337 struct cgroup_subsys_state *memcg_css;
338
339 memcg_css = task_get_css(current, memory_cgrp_id);
340 wb = wb_get_create(bdi, memcg_css, gfp);
341 css_put(memcg_css);
342 }
343 return wb;
344}
345
346/**
347 * inode_attach_wb - associate an inode with its wb
348 * @inode: inode of interest
349 * @page: page being dirtied (may be NULL)
350 *
351 * If @inode doesn't have its wb, associate it with the wb matching the
352 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
353 * @inode->i_lock.
354 */
355static inline void inode_attach_wb(struct inode *inode, struct page *page)
356{
357 if (!inode->i_wb)
358 __inode_attach_wb(inode, page);
359}
360
361/**
362 * inode_detach_wb - disassociate an inode from its wb
363 * @inode: inode of interest
364 *
365 * @inode is being freed. Detach from its wb.
366 */
367static inline void inode_detach_wb(struct inode *inode)
368{
369 if (inode->i_wb) {
370 wb_put(inode->i_wb);
371 inode->i_wb = NULL;
372 }
373}
374
375/**
376 * inode_to_wb - determine the wb of an inode
377 * @inode: inode of interest
378 *
379 * Returns the wb @inode is currently associated with.
380 */
381static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
382{
383 return inode->i_wb;
384}
385
ebe41ab0
TH
386struct wb_iter {
387 int start_blkcg_id;
388 struct radix_tree_iter tree_iter;
389 void **slot;
390};
391
392static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
393 struct backing_dev_info *bdi)
394{
395 struct radix_tree_iter *titer = &iter->tree_iter;
396
397 WARN_ON_ONCE(!rcu_read_lock_held());
398
399 if (iter->start_blkcg_id >= 0) {
400 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
401 iter->start_blkcg_id = -1;
402 } else {
403 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
404 }
405
406 if (!iter->slot)
407 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
408 if (iter->slot)
409 return *iter->slot;
410 return NULL;
411}
412
413static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
414 struct backing_dev_info *bdi,
415 int start_blkcg_id)
416{
417 iter->start_blkcg_id = start_blkcg_id;
418
419 if (start_blkcg_id)
420 return __wb_iter_next(iter, bdi);
421 else
422 return &bdi->wb;
423}
424
425/**
426 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
427 * @wb_cur: cursor struct bdi_writeback pointer
428 * @bdi: bdi to walk wb's of
429 * @iter: pointer to struct wb_iter to be used as iteration buffer
430 * @start_blkcg_id: blkcg ID to start iteration from
431 *
432 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
433 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
434 * to be used as temp storage during iteration. rcu_read_lock() must be
435 * held throughout iteration.
436 */
437#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
438 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
439 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
440
89e9b9e0
TH
441#else /* CONFIG_CGROUP_WRITEBACK */
442
443static inline bool inode_cgwb_enabled(struct inode *inode)
444{
445 return false;
446}
447
52ebea74
TH
448static inline struct bdi_writeback_congested *
449wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
450{
451 return bdi->wb.congested;
452}
453
454static inline void wb_congested_put(struct bdi_writeback_congested *congested)
455{
456}
457
458static inline bool wb_tryget(struct bdi_writeback *wb)
459{
460 return true;
461}
462
463static inline void wb_get(struct bdi_writeback *wb)
464{
465}
466
467static inline void wb_put(struct bdi_writeback *wb)
468{
469}
470
471static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
472{
473 return &bdi->wb;
474}
475
476static inline struct bdi_writeback *
477wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
478{
479 return &bdi->wb;
480}
481
482static inline void inode_attach_wb(struct inode *inode, struct page *page)
483{
484}
485
486static inline void inode_detach_wb(struct inode *inode)
487{
488}
489
490static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
491{
492 return &inode_to_bdi(inode)->wb;
493}
494
495static inline void wb_memcg_offline(struct mem_cgroup *memcg)
496{
497}
498
499static inline void wb_blkcg_offline(struct blkcg *blkcg)
500{
501}
502
ebe41ab0
TH
503struct wb_iter {
504 int next_id;
505};
506
507#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
508 for ((iter)->next_id = (start_blkcg_id); \
509 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
510
703c2708
TH
511static inline int inode_congested(struct inode *inode, int cong_bits)
512{
513 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
514}
515
89e9b9e0
TH
516#endif /* CONFIG_CGROUP_WRITEBACK */
517
703c2708
TH
518static inline int inode_read_congested(struct inode *inode)
519{
520 return inode_congested(inode, 1 << WB_sync_congested);
521}
522
523static inline int inode_write_congested(struct inode *inode)
524{
525 return inode_congested(inode, 1 << WB_async_congested);
526}
527
528static inline int inode_rw_congested(struct inode *inode)
529{
530 return inode_congested(inode, (1 << WB_sync_congested) |
531 (1 << WB_async_congested));
532}
533
ec8a6f26
TH
534static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
535{
536 return wb_congested(&bdi->wb, cong_bits);
537}
538
539static inline int bdi_read_congested(struct backing_dev_info *bdi)
540{
541 return bdi_congested(bdi, 1 << WB_sync_congested);
542}
543
544static inline int bdi_write_congested(struct backing_dev_info *bdi)
545{
546 return bdi_congested(bdi, 1 << WB_async_congested);
547}
548
549static inline int bdi_rw_congested(struct backing_dev_info *bdi)
550{
551 return bdi_congested(bdi, (1 << WB_sync_congested) |
552 (1 << WB_async_congested));
553}
554
89e9b9e0 555#endif /* _LINUX_BACKING_DEV_H */