]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * include/linux/backing-dev.h | |
3 | * | |
4 | * low-level device information and state which is propagated up through | |
5 | * to high-level code. | |
6 | */ | |
7 | ||
8 | #ifndef _LINUX_BACKING_DEV_H | |
9 | #define _LINUX_BACKING_DEV_H | |
10 | ||
11 | #include <linux/percpu_counter.h> | |
12 | #include <linux/log2.h> | |
13 | #include <linux/flex_proportions.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/timer.h> | |
18 | #include <linux/writeback.h> | |
19 | #include <linux/atomic.h> | |
20 | #include <linux/sysctl.h> | |
21 | ||
22 | struct page; | |
23 | struct device; | |
24 | struct dentry; | |
25 | ||
26 | /* | |
27 | * Bits in backing_dev_info.state | |
28 | */ | |
29 | enum bdi_state { | |
30 | BDI_pending, /* On its way to being activated */ | |
31 | BDI_wb_alloc, /* Default embedded wb allocated */ | |
32 | BDI_async_congested, /* The async (write) queue is getting full */ | |
33 | BDI_sync_congested, /* The sync queue is getting full */ | |
34 | BDI_registered, /* bdi_register() was done */ | |
35 | BDI_writeback_running, /* Writeback is in progress */ | |
36 | BDI_unused, /* Available bits start here */ | |
37 | }; | |
38 | ||
39 | typedef int (congested_fn)(void *, int); | |
40 | ||
41 | enum bdi_stat_item { | |
42 | BDI_RECLAIMABLE, | |
43 | BDI_WRITEBACK, | |
44 | BDI_DIRTIED, | |
45 | BDI_WRITTEN, | |
46 | NR_BDI_STAT_ITEMS | |
47 | }; | |
48 | ||
49 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | |
50 | ||
51 | struct bdi_writeback { | |
52 | struct backing_dev_info *bdi; /* our parent bdi */ | |
53 | unsigned int nr; | |
54 | ||
55 | unsigned long last_old_flush; /* last old data flush */ | |
56 | unsigned long last_active; /* last time bdi thread was active */ | |
57 | ||
58 | struct task_struct *task; /* writeback thread */ | |
59 | struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */ | |
60 | struct list_head b_dirty; /* dirty inodes */ | |
61 | struct list_head b_io; /* parked for writeback */ | |
62 | struct list_head b_more_io; /* parked for more writeback */ | |
63 | spinlock_t list_lock; /* protects the b_* lists */ | |
64 | }; | |
65 | ||
66 | struct backing_dev_info { | |
67 | struct list_head bdi_list; | |
68 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ | |
69 | unsigned long state; /* Always use atomic bitops on this */ | |
70 | unsigned int capabilities; /* Device capabilities */ | |
71 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ | |
72 | void *congested_data; /* Pointer to aux data for congested func */ | |
73 | ||
74 | char *name; | |
75 | ||
76 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; | |
77 | ||
78 | unsigned long bw_time_stamp; /* last time write bw is updated */ | |
79 | unsigned long dirtied_stamp; | |
80 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | |
81 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | |
82 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ | |
83 | ||
84 | /* | |
85 | * The base dirty throttle rate, re-calculated on every 200ms. | |
86 | * All the bdi tasks' dirty rate will be curbed under it. | |
87 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | |
88 | * in small steps and is much more smooth/stable than the latter. | |
89 | */ | |
90 | unsigned long dirty_ratelimit; | |
91 | unsigned long balanced_dirty_ratelimit; | |
92 | ||
93 | struct fprop_local_percpu completions; | |
94 | int dirty_exceeded; | |
95 | ||
96 | unsigned int min_ratio; | |
97 | unsigned int max_ratio, max_prop_frac; | |
98 | ||
99 | struct bdi_writeback wb; /* default writeback info for this bdi */ | |
100 | spinlock_t wb_lock; /* protects work_list */ | |
101 | ||
102 | struct list_head work_list; | |
103 | ||
104 | struct device *dev; | |
105 | ||
106 | struct timer_list laptop_mode_wb_timer; | |
107 | ||
108 | #ifdef CONFIG_DEBUG_FS | |
109 | struct dentry *debug_dir; | |
110 | struct dentry *debug_stats; | |
111 | #endif | |
112 | }; | |
113 | ||
114 | int bdi_init(struct backing_dev_info *bdi); | |
115 | void bdi_destroy(struct backing_dev_info *bdi); | |
116 | ||
117 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |
118 | const char *fmt, ...); | |
119 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | |
120 | void bdi_unregister(struct backing_dev_info *bdi); | |
121 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); | |
122 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |
123 | enum wb_reason reason); | |
124 | void bdi_start_background_writeback(struct backing_dev_info *bdi); | |
125 | int bdi_writeback_thread(void *data); | |
126 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | |
127 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); | |
128 | void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); | |
129 | ||
130 | extern spinlock_t bdi_lock; | |
131 | extern struct list_head bdi_list; | |
132 | extern struct list_head bdi_pending_list; | |
133 | ||
134 | static inline int wb_has_dirty_io(struct bdi_writeback *wb) | |
135 | { | |
136 | return !list_empty(&wb->b_dirty) || | |
137 | !list_empty(&wb->b_io) || | |
138 | !list_empty(&wb->b_more_io); | |
139 | } | |
140 | ||
141 | static inline void __add_bdi_stat(struct backing_dev_info *bdi, | |
142 | enum bdi_stat_item item, s64 amount) | |
143 | { | |
144 | __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); | |
145 | } | |
146 | ||
147 | static inline void __inc_bdi_stat(struct backing_dev_info *bdi, | |
148 | enum bdi_stat_item item) | |
149 | { | |
150 | __add_bdi_stat(bdi, item, 1); | |
151 | } | |
152 | ||
153 | static inline void inc_bdi_stat(struct backing_dev_info *bdi, | |
154 | enum bdi_stat_item item) | |
155 | { | |
156 | unsigned long flags; | |
157 | ||
158 | local_irq_save(flags); | |
159 | __inc_bdi_stat(bdi, item); | |
160 | local_irq_restore(flags); | |
161 | } | |
162 | ||
163 | static inline void __dec_bdi_stat(struct backing_dev_info *bdi, | |
164 | enum bdi_stat_item item) | |
165 | { | |
166 | __add_bdi_stat(bdi, item, -1); | |
167 | } | |
168 | ||
169 | static inline void dec_bdi_stat(struct backing_dev_info *bdi, | |
170 | enum bdi_stat_item item) | |
171 | { | |
172 | unsigned long flags; | |
173 | ||
174 | local_irq_save(flags); | |
175 | __dec_bdi_stat(bdi, item); | |
176 | local_irq_restore(flags); | |
177 | } | |
178 | ||
179 | static inline s64 bdi_stat(struct backing_dev_info *bdi, | |
180 | enum bdi_stat_item item) | |
181 | { | |
182 | return percpu_counter_read_positive(&bdi->bdi_stat[item]); | |
183 | } | |
184 | ||
185 | static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, | |
186 | enum bdi_stat_item item) | |
187 | { | |
188 | return percpu_counter_sum_positive(&bdi->bdi_stat[item]); | |
189 | } | |
190 | ||
191 | static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, | |
192 | enum bdi_stat_item item) | |
193 | { | |
194 | s64 sum; | |
195 | unsigned long flags; | |
196 | ||
197 | local_irq_save(flags); | |
198 | sum = __bdi_stat_sum(bdi, item); | |
199 | local_irq_restore(flags); | |
200 | ||
201 | return sum; | |
202 | } | |
203 | ||
204 | extern void bdi_writeout_inc(struct backing_dev_info *bdi); | |
205 | ||
206 | /* | |
207 | * maximal error of a stat counter. | |
208 | */ | |
209 | static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) | |
210 | { | |
211 | #ifdef CONFIG_SMP | |
212 | return nr_cpu_ids * BDI_STAT_BATCH; | |
213 | #else | |
214 | return 1; | |
215 | #endif | |
216 | } | |
217 | ||
218 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); | |
219 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); | |
220 | ||
221 | /* | |
222 | * Flags in backing_dev_info::capability | |
223 | * | |
224 | * The first three flags control whether dirty pages will contribute to the | |
225 | * VM's accounting and whether writepages() should be called for dirty pages | |
226 | * (something that would not, for example, be appropriate for ramfs) | |
227 | * | |
228 | * WARNING: these flags are closely related and should not normally be | |
229 | * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these | |
230 | * three flags into a single convenience macro. | |
231 | * | |
232 | * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting | |
233 | * BDI_CAP_NO_WRITEBACK: Don't write pages back | |
234 | * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages | |
235 | * | |
236 | * These flags let !MMU mmap() govern direct device mapping vs immediate | |
237 | * copying more easily for MAP_PRIVATE, especially for ROM filesystems. | |
238 | * | |
239 | * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE) | |
240 | * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED) | |
241 | * BDI_CAP_READ_MAP: Can be mapped for reading | |
242 | * BDI_CAP_WRITE_MAP: Can be mapped for writing | |
243 | * BDI_CAP_EXEC_MAP: Can be mapped for execution | |
244 | * | |
245 | * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed. | |
246 | */ | |
247 | #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 | |
248 | #define BDI_CAP_NO_WRITEBACK 0x00000002 | |
249 | #define BDI_CAP_MAP_COPY 0x00000004 | |
250 | #define BDI_CAP_MAP_DIRECT 0x00000008 | |
251 | #define BDI_CAP_READ_MAP 0x00000010 | |
252 | #define BDI_CAP_WRITE_MAP 0x00000020 | |
253 | #define BDI_CAP_EXEC_MAP 0x00000040 | |
254 | #define BDI_CAP_NO_ACCT_WB 0x00000080 | |
255 | #define BDI_CAP_SWAP_BACKED 0x00000100 | |
256 | ||
257 | #define BDI_CAP_VMFLAGS \ | |
258 | (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) | |
259 | ||
260 | #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ | |
261 | (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) | |
262 | ||
263 | #if defined(VM_MAYREAD) && \ | |
264 | (BDI_CAP_READ_MAP != VM_MAYREAD || \ | |
265 | BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ | |
266 | BDI_CAP_EXEC_MAP != VM_MAYEXEC) | |
267 | #error please change backing_dev_info::capabilities flags | |
268 | #endif | |
269 | ||
270 | extern struct backing_dev_info default_backing_dev_info; | |
271 | extern struct backing_dev_info noop_backing_dev_info; | |
272 | ||
273 | int writeback_in_progress(struct backing_dev_info *bdi); | |
274 | ||
275 | static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | |
276 | { | |
277 | if (bdi->congested_fn) | |
278 | return bdi->congested_fn(bdi->congested_data, bdi_bits); | |
279 | return (bdi->state & bdi_bits); | |
280 | } | |
281 | ||
282 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | |
283 | { | |
284 | return bdi_congested(bdi, 1 << BDI_sync_congested); | |
285 | } | |
286 | ||
287 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | |
288 | { | |
289 | return bdi_congested(bdi, 1 << BDI_async_congested); | |
290 | } | |
291 | ||
292 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | |
293 | { | |
294 | return bdi_congested(bdi, (1 << BDI_sync_congested) | | |
295 | (1 << BDI_async_congested)); | |
296 | } | |
297 | ||
298 | enum { | |
299 | BLK_RW_ASYNC = 0, | |
300 | BLK_RW_SYNC = 1, | |
301 | }; | |
302 | ||
303 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync); | |
304 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); | |
305 | long congestion_wait(int sync, long timeout); | |
306 | long wait_iff_congested(struct zone *zone, int sync, long timeout); | |
307 | int pdflush_proc_obsolete(struct ctl_table *table, int write, | |
308 | void __user *buffer, size_t *lenp, loff_t *ppos); | |
309 | ||
310 | static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) | |
311 | { | |
312 | return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); | |
313 | } | |
314 | ||
315 | static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) | |
316 | { | |
317 | return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); | |
318 | } | |
319 | ||
320 | static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) | |
321 | { | |
322 | /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ | |
323 | return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | | |
324 | BDI_CAP_NO_WRITEBACK)); | |
325 | } | |
326 | ||
327 | static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) | |
328 | { | |
329 | return bdi->capabilities & BDI_CAP_SWAP_BACKED; | |
330 | } | |
331 | ||
332 | static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi) | |
333 | { | |
334 | return bdi == &default_backing_dev_info; | |
335 | } | |
336 | ||
337 | static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) | |
338 | { | |
339 | return bdi_cap_writeback_dirty(mapping->backing_dev_info); | |
340 | } | |
341 | ||
342 | static inline bool mapping_cap_account_dirty(struct address_space *mapping) | |
343 | { | |
344 | return bdi_cap_account_dirty(mapping->backing_dev_info); | |
345 | } | |
346 | ||
347 | static inline bool mapping_cap_swap_backed(struct address_space *mapping) | |
348 | { | |
349 | return bdi_cap_swap_backed(mapping->backing_dev_info); | |
350 | } | |
351 | ||
352 | static inline int bdi_sched_wait(void *word) | |
353 | { | |
354 | schedule(); | |
355 | return 0; | |
356 | } | |
357 | ||
358 | #endif /* _LINUX_BACKING_DEV_H */ |