]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/backing-dev.h
bdi: allow block devices to say that they require stable page writes
[mirror_ubuntu-artful-kernel.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/flex_proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <linux/atomic.h>
20 #include <linux/sysctl.h>
21
22 struct page;
23 struct device;
24 struct dentry;
25
26 /*
27 * Bits in backing_dev_info.state
28 */
29 enum bdi_state {
30 BDI_pending, /* On its way to being activated */
31 BDI_wb_alloc, /* Default embedded wb allocated */
32 BDI_async_congested, /* The async (write) queue is getting full */
33 BDI_sync_congested, /* The sync queue is getting full */
34 BDI_registered, /* bdi_register() was done */
35 BDI_writeback_running, /* Writeback is in progress */
36 BDI_unused, /* Available bits start here */
37 };
38
39 typedef int (congested_fn)(void *, int);
40
41 enum bdi_stat_item {
42 BDI_RECLAIMABLE,
43 BDI_WRITEBACK,
44 BDI_DIRTIED,
45 BDI_WRITTEN,
46 NR_BDI_STAT_ITEMS
47 };
48
49 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
50
51 struct bdi_writeback {
52 struct backing_dev_info *bdi; /* our parent bdi */
53 unsigned int nr;
54
55 unsigned long last_old_flush; /* last old data flush */
56 unsigned long last_active; /* last time bdi thread was active */
57
58 struct task_struct *task; /* writeback thread */
59 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
60 struct list_head b_dirty; /* dirty inodes */
61 struct list_head b_io; /* parked for writeback */
62 struct list_head b_more_io; /* parked for more writeback */
63 spinlock_t list_lock; /* protects the b_* lists */
64 };
65
66 struct backing_dev_info {
67 struct list_head bdi_list;
68 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
69 unsigned long state; /* Always use atomic bitops on this */
70 unsigned int capabilities; /* Device capabilities */
71 congested_fn *congested_fn; /* Function pointer if device is md/dm */
72 void *congested_data; /* Pointer to aux data for congested func */
73
74 char *name;
75
76 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
77
78 unsigned long bw_time_stamp; /* last time write bw is updated */
79 unsigned long dirtied_stamp;
80 unsigned long written_stamp; /* pages written at bw_time_stamp */
81 unsigned long write_bandwidth; /* the estimated write bandwidth */
82 unsigned long avg_write_bandwidth; /* further smoothed write bw */
83
84 /*
85 * The base dirty throttle rate, re-calculated on every 200ms.
86 * All the bdi tasks' dirty rate will be curbed under it.
87 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
88 * in small steps and is much more smooth/stable than the latter.
89 */
90 unsigned long dirty_ratelimit;
91 unsigned long balanced_dirty_ratelimit;
92
93 struct fprop_local_percpu completions;
94 int dirty_exceeded;
95
96 unsigned int min_ratio;
97 unsigned int max_ratio, max_prop_frac;
98
99 struct bdi_writeback wb; /* default writeback info for this bdi */
100 spinlock_t wb_lock; /* protects work_list */
101
102 struct list_head work_list;
103
104 struct device *dev;
105
106 struct timer_list laptop_mode_wb_timer;
107
108 #ifdef CONFIG_DEBUG_FS
109 struct dentry *debug_dir;
110 struct dentry *debug_stats;
111 #endif
112 };
113
114 int bdi_init(struct backing_dev_info *bdi);
115 void bdi_destroy(struct backing_dev_info *bdi);
116
117 __printf(3, 4)
118 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
119 const char *fmt, ...);
120 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
121 void bdi_unregister(struct backing_dev_info *bdi);
122 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
123 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
124 enum wb_reason reason);
125 void bdi_start_background_writeback(struct backing_dev_info *bdi);
126 int bdi_writeback_thread(void *data);
127 int bdi_has_dirty_io(struct backing_dev_info *bdi);
128 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
129 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
130
131 extern spinlock_t bdi_lock;
132 extern struct list_head bdi_list;
133 extern struct list_head bdi_pending_list;
134
135 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
136 {
137 return !list_empty(&wb->b_dirty) ||
138 !list_empty(&wb->b_io) ||
139 !list_empty(&wb->b_more_io);
140 }
141
142 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
143 enum bdi_stat_item item, s64 amount)
144 {
145 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
146 }
147
148 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
149 enum bdi_stat_item item)
150 {
151 __add_bdi_stat(bdi, item, 1);
152 }
153
154 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
155 enum bdi_stat_item item)
156 {
157 unsigned long flags;
158
159 local_irq_save(flags);
160 __inc_bdi_stat(bdi, item);
161 local_irq_restore(flags);
162 }
163
164 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
165 enum bdi_stat_item item)
166 {
167 __add_bdi_stat(bdi, item, -1);
168 }
169
170 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
171 enum bdi_stat_item item)
172 {
173 unsigned long flags;
174
175 local_irq_save(flags);
176 __dec_bdi_stat(bdi, item);
177 local_irq_restore(flags);
178 }
179
180 static inline s64 bdi_stat(struct backing_dev_info *bdi,
181 enum bdi_stat_item item)
182 {
183 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
184 }
185
186 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
187 enum bdi_stat_item item)
188 {
189 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
190 }
191
192 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
193 enum bdi_stat_item item)
194 {
195 s64 sum;
196 unsigned long flags;
197
198 local_irq_save(flags);
199 sum = __bdi_stat_sum(bdi, item);
200 local_irq_restore(flags);
201
202 return sum;
203 }
204
205 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
206
207 /*
208 * maximal error of a stat counter.
209 */
210 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
211 {
212 #ifdef CONFIG_SMP
213 return nr_cpu_ids * BDI_STAT_BATCH;
214 #else
215 return 1;
216 #endif
217 }
218
219 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
220 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
221
222 /*
223 * Flags in backing_dev_info::capability
224 *
225 * The first three flags control whether dirty pages will contribute to the
226 * VM's accounting and whether writepages() should be called for dirty pages
227 * (something that would not, for example, be appropriate for ramfs)
228 *
229 * WARNING: these flags are closely related and should not normally be
230 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
231 * three flags into a single convenience macro.
232 *
233 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
234 * BDI_CAP_NO_WRITEBACK: Don't write pages back
235 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
236 *
237 * These flags let !MMU mmap() govern direct device mapping vs immediate
238 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
239 *
240 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
241 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
242 * BDI_CAP_READ_MAP: Can be mapped for reading
243 * BDI_CAP_WRITE_MAP: Can be mapped for writing
244 * BDI_CAP_EXEC_MAP: Can be mapped for execution
245 *
246 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
247 */
248 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
249 #define BDI_CAP_NO_WRITEBACK 0x00000002
250 #define BDI_CAP_MAP_COPY 0x00000004
251 #define BDI_CAP_MAP_DIRECT 0x00000008
252 #define BDI_CAP_READ_MAP 0x00000010
253 #define BDI_CAP_WRITE_MAP 0x00000020
254 #define BDI_CAP_EXEC_MAP 0x00000040
255 #define BDI_CAP_NO_ACCT_WB 0x00000080
256 #define BDI_CAP_SWAP_BACKED 0x00000100
257 #define BDI_CAP_STABLE_WRITES 0x00000200
258
259 #define BDI_CAP_VMFLAGS \
260 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
261
262 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
263 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
264
265 #if defined(VM_MAYREAD) && \
266 (BDI_CAP_READ_MAP != VM_MAYREAD || \
267 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
268 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
269 #error please change backing_dev_info::capabilities flags
270 #endif
271
272 extern struct backing_dev_info default_backing_dev_info;
273 extern struct backing_dev_info noop_backing_dev_info;
274
275 int writeback_in_progress(struct backing_dev_info *bdi);
276
277 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
278 {
279 if (bdi->congested_fn)
280 return bdi->congested_fn(bdi->congested_data, bdi_bits);
281 return (bdi->state & bdi_bits);
282 }
283
284 static inline int bdi_read_congested(struct backing_dev_info *bdi)
285 {
286 return bdi_congested(bdi, 1 << BDI_sync_congested);
287 }
288
289 static inline int bdi_write_congested(struct backing_dev_info *bdi)
290 {
291 return bdi_congested(bdi, 1 << BDI_async_congested);
292 }
293
294 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
295 {
296 return bdi_congested(bdi, (1 << BDI_sync_congested) |
297 (1 << BDI_async_congested));
298 }
299
300 enum {
301 BLK_RW_ASYNC = 0,
302 BLK_RW_SYNC = 1,
303 };
304
305 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
306 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
307 long congestion_wait(int sync, long timeout);
308 long wait_iff_congested(struct zone *zone, int sync, long timeout);
309 int pdflush_proc_obsolete(struct ctl_table *table, int write,
310 void __user *buffer, size_t *lenp, loff_t *ppos);
311
312 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
313 {
314 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
315 }
316
317 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
318 {
319 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
320 }
321
322 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
323 {
324 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
325 }
326
327 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
328 {
329 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
330 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
331 BDI_CAP_NO_WRITEBACK));
332 }
333
334 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
335 {
336 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
337 }
338
339 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
340 {
341 return bdi == &default_backing_dev_info;
342 }
343
344 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
345 {
346 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
347 }
348
349 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
350 {
351 return bdi_cap_account_dirty(mapping->backing_dev_info);
352 }
353
354 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
355 {
356 return bdi_cap_swap_backed(mapping->backing_dev_info);
357 }
358
359 static inline int bdi_sched_wait(void *word)
360 {
361 schedule();
362 return 0;
363 }
364
365 #endif /* _LINUX_BACKING_DEV_H */