]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/backing-dev.h
writeback: add {CONFIG|BDI_CAP|FS}_CGROUP_WRITEBACK
[mirror_ubuntu-artful-kernel.git] / include / linux / backing-dev.h
CommitLineData
1da177e4
LT
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
cf0ca9fe 11#include <linux/kernel.h>
e4ad08fe 12#include <linux/fs.h>
03ba3782 13#include <linux/sched.h>
a212b105 14#include <linux/blkdev.h>
03ba3782 15#include <linux/writeback.h>
66114cad 16#include <linux/backing-dev-defs.h>
1da177e4 17
8077c0d9 18int __must_check bdi_init(struct backing_dev_info *bdi);
b2e8fb6e
PZ
19void bdi_destroy(struct backing_dev_info *bdi);
20
d2cc4dde 21__printf(3, 4)
cf0ca9fe
PZ
22int bdi_register(struct backing_dev_info *bdi, struct device *parent,
23 const char *fmt, ...);
24int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
25void bdi_unregister(struct backing_dev_info *bdi);
b4caecd4 26int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
0e175a18
CW
27void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
28 enum wb_reason reason);
c5444198 29void bdi_start_background_writeback(struct backing_dev_info *bdi);
f0054bb1 30void wb_workfn(struct work_struct *work);
03ba3782 31int bdi_has_dirty_io(struct backing_dev_info *bdi);
f0054bb1 32void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 33
03ba3782 34extern spinlock_t bdi_lock;
66f3b8e2
JA
35extern struct list_head bdi_list;
36
839a8e86
TH
37extern struct workqueue_struct *bdi_wq;
38
03ba3782
JA
39static inline int wb_has_dirty_io(struct bdi_writeback *wb)
40{
41 return !list_empty(&wb->b_dirty) ||
42 !list_empty(&wb->b_io) ||
43 !list_empty(&wb->b_more_io);
44}
45
93f78d88
TH
46static inline void __add_wb_stat(struct bdi_writeback *wb,
47 enum wb_stat_item item, s64 amount)
b2e8fb6e 48{
93f78d88 49 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
50}
51
93f78d88
TH
52static inline void __inc_wb_stat(struct bdi_writeback *wb,
53 enum wb_stat_item item)
b2e8fb6e 54{
93f78d88 55 __add_wb_stat(wb, item, 1);
b2e8fb6e
PZ
56}
57
93f78d88 58static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
59{
60 unsigned long flags;
61
62 local_irq_save(flags);
93f78d88 63 __inc_wb_stat(wb, item);
b2e8fb6e
PZ
64 local_irq_restore(flags);
65}
66
93f78d88
TH
67static inline void __dec_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item)
b2e8fb6e 69{
93f78d88 70 __add_wb_stat(wb, item, -1);
b2e8fb6e
PZ
71}
72
93f78d88 73static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e
PZ
74{
75 unsigned long flags;
76
77 local_irq_save(flags);
93f78d88 78 __dec_wb_stat(wb, item);
b2e8fb6e
PZ
79 local_irq_restore(flags);
80}
81
93f78d88 82static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 83{
93f78d88 84 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
85}
86
93f78d88
TH
87static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
88 enum wb_stat_item item)
b2e8fb6e 89{
93f78d88 90 return percpu_counter_sum_positive(&wb->stat[item]);
b2e8fb6e
PZ
91}
92
93f78d88 93static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 94{
b2e8fb6e
PZ
95 s64 sum;
96 unsigned long flags;
97
98 local_irq_save(flags);
93f78d88 99 sum = __wb_stat_sum(wb, item);
b2e8fb6e
PZ
100 local_irq_restore(flags);
101
102 return sum;
e0bf68dd
PZ
103}
104
93f78d88 105extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 106
b2e8fb6e
PZ
107/*
108 * maximal error of a stat counter.
109 */
93f78d88 110static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
e0bf68dd 111{
b2e8fb6e 112#ifdef CONFIG_SMP
93f78d88 113 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
114#else
115 return 1;
116#endif
e0bf68dd 117}
1da177e4 118
189d3c4a 119int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 120int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 121
1da177e4
LT
122/*
123 * Flags in backing_dev_info::capability
e4ad08fe
MS
124 *
125 * The first three flags control whether dirty pages will contribute to the
126 * VM's accounting and whether writepages() should be called for dirty pages
127 * (something that would not, for example, be appropriate for ramfs)
128 *
129 * WARNING: these flags are closely related and should not normally be
130 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
131 * three flags into a single convenience macro.
132 *
133 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
134 * BDI_CAP_NO_WRITEBACK: Don't write pages back
135 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
5a537485 136 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
89e9b9e0
TH
137 *
138 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
1da177e4 139 */
e4ad08fe
MS
140#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
141#define BDI_CAP_NO_WRITEBACK 0x00000002
b4caecd4
CH
142#define BDI_CAP_NO_ACCT_WB 0x00000004
143#define BDI_CAP_STABLE_WRITES 0x00000008
144#define BDI_CAP_STRICTLIMIT 0x00000010
89e9b9e0 145#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
1da177e4 146
e4ad08fe
MS
147#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
148 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
149
5129a469 150extern struct backing_dev_info noop_backing_dev_info;
1da177e4 151
1da177e4 152int writeback_in_progress(struct backing_dev_info *bdi);
1da177e4 153
a212b105
TH
154static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
155{
156 struct super_block *sb;
157
158 if (!inode)
159 return &noop_backing_dev_info;
160
161 sb = inode->i_sb;
162#ifdef CONFIG_BLOCK
163 if (sb_is_blkdev_sb(sb))
164 return blk_get_backing_dev_info(I_BDEV(inode));
165#endif
166 return sb->s_bdi;
167}
168
1da177e4
LT
169static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
170{
171 if (bdi->congested_fn)
172 return bdi->congested_fn(bdi->congested_data, bdi_bits);
4aa9c692 173 return (bdi->wb.congested->state & bdi_bits);
1da177e4
LT
174}
175
176static inline int bdi_read_congested(struct backing_dev_info *bdi)
177{
4452226e 178 return bdi_congested(bdi, 1 << WB_sync_congested);
1da177e4
LT
179}
180
181static inline int bdi_write_congested(struct backing_dev_info *bdi)
182{
4452226e 183 return bdi_congested(bdi, 1 << WB_async_congested);
1da177e4
LT
184}
185
186static inline int bdi_rw_congested(struct backing_dev_info *bdi)
187{
4452226e
TH
188 return bdi_congested(bdi, (1 << WB_sync_congested) |
189 (1 << WB_async_congested));
1da177e4
LT
190}
191
8aa7e847 192long congestion_wait(int sync, long timeout);
0e093d99 193long wait_iff_congested(struct zone *zone, int sync, long timeout);
3965c9ae
WL
194int pdflush_proc_obsolete(struct ctl_table *table, int write,
195 void __user *buffer, size_t *lenp, loff_t *ppos);
1da177e4 196
7d311cda
DW
197static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
198{
199 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
200}
201
e4ad08fe
MS
202static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
203{
204 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
205}
206
207static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
208{
209 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
210}
1da177e4 211
e4ad08fe
MS
212static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
213{
214 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
215 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
216 BDI_CAP_NO_WRITEBACK));
217}
1da177e4 218
e4ad08fe
MS
219static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
220{
de1414a6 221 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
e4ad08fe 222}
1da177e4 223
e4ad08fe
MS
224static inline bool mapping_cap_account_dirty(struct address_space *mapping)
225{
de1414a6 226 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
e4ad08fe 227}
1da177e4 228
03ba3782
JA
229static inline int bdi_sched_wait(void *word)
230{
231 schedule();
232 return 0;
233}
234
89e9b9e0
TH
235#ifdef CONFIG_CGROUP_WRITEBACK
236
237/**
238 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
239 * @inode: inode of interest
240 *
241 * cgroup writeback requires support from both the bdi and filesystem.
242 * Test whether @inode has both.
243 */
244static inline bool inode_cgwb_enabled(struct inode *inode)
245{
246 struct backing_dev_info *bdi = inode_to_bdi(inode);
247
248 return bdi_cap_account_dirty(bdi) &&
249 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
250 (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
251}
252
253#else /* CONFIG_CGROUP_WRITEBACK */
254
255static inline bool inode_cgwb_enabled(struct inode *inode)
256{
257 return false;
258}
259
260#endif /* CONFIG_CGROUP_WRITEBACK */
261
262#endif /* _LINUX_BACKING_DEV_H */