]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/backing-dev.h
blkio: Increment the blkio cgroup stats for real now
[mirror_ubuntu-artful-kernel.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/writeback.h>
18 #include <asm/atomic.h>
19
20 struct page;
21 struct device;
22 struct dentry;
23
24 /*
25 * Bits in backing_dev_info.state
26 */
27 enum bdi_state {
28 BDI_pending, /* On its way to being activated */
29 BDI_wb_alloc, /* Default embedded wb allocated */
30 BDI_async_congested, /* The async (write) queue is getting full */
31 BDI_sync_congested, /* The sync queue is getting full */
32 BDI_registered, /* bdi_register() was done */
33 BDI_unused, /* Available bits start here */
34 };
35
36 typedef int (congested_fn)(void *, int);
37
38 enum bdi_stat_item {
39 BDI_RECLAIMABLE,
40 BDI_WRITEBACK,
41 NR_BDI_STAT_ITEMS
42 };
43
44 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45
46 struct bdi_writeback {
47 struct list_head list; /* hangs off the bdi */
48
49 struct backing_dev_info *bdi; /* our parent bdi */
50 unsigned int nr;
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct task_struct *task; /* writeback task */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58 };
59
60 struct backing_dev_info {
61 struct list_head bdi_list;
62 struct rcu_head rcu_head;
63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
64 unsigned long state; /* Always use atomic bitops on this */
65 unsigned int capabilities; /* Device capabilities */
66 congested_fn *congested_fn; /* Function pointer if device is md/dm */
67 void *congested_data; /* Pointer to aux data for congested func */
68 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
69 void *unplug_io_data;
70
71 char *name;
72
73 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
74
75 struct prop_local_percpu completions;
76 int dirty_exceeded;
77
78 unsigned int min_ratio;
79 unsigned int max_ratio, max_prop_frac;
80
81 struct bdi_writeback wb; /* default writeback info for this bdi */
82 spinlock_t wb_lock; /* protects update side of wb_list */
83 struct list_head wb_list; /* the flusher threads hanging off this bdi */
84 unsigned long wb_mask; /* bitmask of registered tasks */
85 unsigned int wb_cnt; /* number of registered tasks */
86
87 struct list_head work_list;
88
89 struct device *dev;
90
91 #ifdef CONFIG_DEBUG_FS
92 struct dentry *debug_dir;
93 struct dentry *debug_stats;
94 #endif
95 };
96
97 int bdi_init(struct backing_dev_info *bdi);
98 void bdi_destroy(struct backing_dev_info *bdi);
99
100 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
101 const char *fmt, ...);
102 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
103 void bdi_unregister(struct backing_dev_info *bdi);
104 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
105 long nr_pages);
106 int bdi_writeback_task(struct bdi_writeback *wb);
107 int bdi_has_dirty_io(struct backing_dev_info *bdi);
108
109 extern spinlock_t bdi_lock;
110 extern struct list_head bdi_list;
111
112 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
113 {
114 return !list_empty(&wb->b_dirty) ||
115 !list_empty(&wb->b_io) ||
116 !list_empty(&wb->b_more_io);
117 }
118
119 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
120 enum bdi_stat_item item, s64 amount)
121 {
122 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
123 }
124
125 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
126 enum bdi_stat_item item)
127 {
128 __add_bdi_stat(bdi, item, 1);
129 }
130
131 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
132 enum bdi_stat_item item)
133 {
134 unsigned long flags;
135
136 local_irq_save(flags);
137 __inc_bdi_stat(bdi, item);
138 local_irq_restore(flags);
139 }
140
141 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
142 enum bdi_stat_item item)
143 {
144 __add_bdi_stat(bdi, item, -1);
145 }
146
147 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
148 enum bdi_stat_item item)
149 {
150 unsigned long flags;
151
152 local_irq_save(flags);
153 __dec_bdi_stat(bdi, item);
154 local_irq_restore(flags);
155 }
156
157 static inline s64 bdi_stat(struct backing_dev_info *bdi,
158 enum bdi_stat_item item)
159 {
160 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
161 }
162
163 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
164 enum bdi_stat_item item)
165 {
166 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
167 }
168
169 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
170 enum bdi_stat_item item)
171 {
172 s64 sum;
173 unsigned long flags;
174
175 local_irq_save(flags);
176 sum = __bdi_stat_sum(bdi, item);
177 local_irq_restore(flags);
178
179 return sum;
180 }
181
182 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
183
184 /*
185 * maximal error of a stat counter.
186 */
187 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
188 {
189 #ifdef CONFIG_SMP
190 return nr_cpu_ids * BDI_STAT_BATCH;
191 #else
192 return 1;
193 #endif
194 }
195
196 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
197 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
198
199 /*
200 * Flags in backing_dev_info::capability
201 *
202 * The first three flags control whether dirty pages will contribute to the
203 * VM's accounting and whether writepages() should be called for dirty pages
204 * (something that would not, for example, be appropriate for ramfs)
205 *
206 * WARNING: these flags are closely related and should not normally be
207 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
208 * three flags into a single convenience macro.
209 *
210 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
211 * BDI_CAP_NO_WRITEBACK: Don't write pages back
212 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
213 *
214 * These flags let !MMU mmap() govern direct device mapping vs immediate
215 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
216 *
217 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
218 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
219 * BDI_CAP_READ_MAP: Can be mapped for reading
220 * BDI_CAP_WRITE_MAP: Can be mapped for writing
221 * BDI_CAP_EXEC_MAP: Can be mapped for execution
222 *
223 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
224 */
225 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
226 #define BDI_CAP_NO_WRITEBACK 0x00000002
227 #define BDI_CAP_MAP_COPY 0x00000004
228 #define BDI_CAP_MAP_DIRECT 0x00000008
229 #define BDI_CAP_READ_MAP 0x00000010
230 #define BDI_CAP_WRITE_MAP 0x00000020
231 #define BDI_CAP_EXEC_MAP 0x00000040
232 #define BDI_CAP_NO_ACCT_WB 0x00000080
233 #define BDI_CAP_SWAP_BACKED 0x00000100
234
235 #define BDI_CAP_VMFLAGS \
236 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
237
238 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
239 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
240
241 #if defined(VM_MAYREAD) && \
242 (BDI_CAP_READ_MAP != VM_MAYREAD || \
243 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
244 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
245 #error please change backing_dev_info::capabilities flags
246 #endif
247
248 extern struct backing_dev_info default_backing_dev_info;
249 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
250
251 int writeback_in_progress(struct backing_dev_info *bdi);
252
253 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
254 {
255 if (bdi->congested_fn)
256 return bdi->congested_fn(bdi->congested_data, bdi_bits);
257 return (bdi->state & bdi_bits);
258 }
259
260 static inline int bdi_read_congested(struct backing_dev_info *bdi)
261 {
262 return bdi_congested(bdi, 1 << BDI_sync_congested);
263 }
264
265 static inline int bdi_write_congested(struct backing_dev_info *bdi)
266 {
267 return bdi_congested(bdi, 1 << BDI_async_congested);
268 }
269
270 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
271 {
272 return bdi_congested(bdi, (1 << BDI_sync_congested) |
273 (1 << BDI_async_congested));
274 }
275
276 enum {
277 BLK_RW_ASYNC = 0,
278 BLK_RW_SYNC = 1,
279 };
280
281 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
282 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
283 long congestion_wait(int sync, long timeout);
284
285
286 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
287 {
288 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
289 }
290
291 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
292 {
293 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
294 }
295
296 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
297 {
298 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
299 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
300 BDI_CAP_NO_WRITEBACK));
301 }
302
303 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
304 {
305 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
306 }
307
308 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
309 {
310 return bdi == &default_backing_dev_info;
311 }
312
313 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
314 {
315 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
316 }
317
318 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
319 {
320 return bdi_cap_account_dirty(mapping->backing_dev_info);
321 }
322
323 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
324 {
325 return bdi_cap_swap_backed(mapping->backing_dev_info);
326 }
327
328 static inline int bdi_sched_wait(void *word)
329 {
330 schedule();
331 return 0;
332 }
333
334 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
335 struct page *page)
336 {
337 if (bdi && bdi->unplug_io_fn)
338 bdi->unplug_io_fn(bdi, page);
339 }
340
341 static inline void blk_run_address_space(struct address_space *mapping)
342 {
343 if (mapping)
344 blk_run_backing_dev(mapping->backing_dev_info, NULL);
345 }
346
347 #endif /* _LINUX_BACKING_DEV_H */