]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/page-writeback.c. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * Contains functions related to writing back dirty pages at the | |
7 | * address_space level. | |
8 | * | |
9 | * 10Apr2002 akpm@zip.com.au | |
10 | * Initial version | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/writeback.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/backing-dev.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/mpage.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/notifier.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/sysctl.h> | |
30 | #include <linux/cpu.h> | |
31 | #include <linux/syscalls.h> | |
32 | ||
33 | /* | |
34 | * The maximum number of pages to writeout in a single bdflush/kupdate | |
35 | * operation. We do this so we don't hold I_LOCK against an inode for | |
36 | * enormous amounts of time, which would block a userspace task which has | |
37 | * been forced to throttle against that inode. Also, the code reevaluates | |
38 | * the dirty each time it has written this many pages. | |
39 | */ | |
40 | #define MAX_WRITEBACK_PAGES 1024 | |
41 | ||
42 | /* | |
43 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | |
44 | * will look to see if it needs to force writeback or throttling. | |
45 | */ | |
46 | static long ratelimit_pages = 32; | |
47 | ||
48 | static long total_pages; /* The total number of pages in the machine. */ | |
e236a166 | 49 | static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */ |
1da177e4 LT |
50 | |
51 | /* | |
52 | * When balance_dirty_pages decides that the caller needs to perform some | |
53 | * non-background writeback, this is how many pages it will attempt to write. | |
54 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | |
55 | * large amounts of I/O are submitted. | |
56 | */ | |
57 | static inline long sync_writeback_pages(void) | |
58 | { | |
59 | return ratelimit_pages + ratelimit_pages / 2; | |
60 | } | |
61 | ||
62 | /* The following parameters are exported via /proc/sys/vm */ | |
63 | ||
64 | /* | |
65 | * Start background writeback (via pdflush) at this percentage | |
66 | */ | |
67 | int dirty_background_ratio = 10; | |
68 | ||
69 | /* | |
70 | * The generator of dirty data starts writeback at this percentage | |
71 | */ | |
72 | int vm_dirty_ratio = 40; | |
73 | ||
74 | /* | |
fd5403c7 | 75 | * The interval between `kupdate'-style writebacks, in jiffies |
1da177e4 | 76 | */ |
f6ef9438 | 77 | int dirty_writeback_interval = 5 * HZ; |
1da177e4 LT |
78 | |
79 | /* | |
fd5403c7 | 80 | * The longest number of jiffies for which data is allowed to remain dirty |
1da177e4 | 81 | */ |
f6ef9438 | 82 | int dirty_expire_interval = 30 * HZ; |
1da177e4 LT |
83 | |
84 | /* | |
85 | * Flag that makes the machine dump writes/reads and block dirtyings. | |
86 | */ | |
87 | int block_dump; | |
88 | ||
89 | /* | |
ed5b43f1 BS |
90 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
91 | * a full sync is triggered after this time elapses without any disk activity. | |
1da177e4 LT |
92 | */ |
93 | int laptop_mode; | |
94 | ||
95 | EXPORT_SYMBOL(laptop_mode); | |
96 | ||
97 | /* End of sysctl-exported parameters */ | |
98 | ||
99 | ||
100 | static void background_writeout(unsigned long _min_pages); | |
101 | ||
102 | struct writeback_state | |
103 | { | |
104 | unsigned long nr_dirty; | |
105 | unsigned long nr_unstable; | |
106 | unsigned long nr_mapped; | |
107 | unsigned long nr_writeback; | |
108 | }; | |
109 | ||
110 | static void get_writeback_state(struct writeback_state *wbs) | |
111 | { | |
112 | wbs->nr_dirty = read_page_state(nr_dirty); | |
113 | wbs->nr_unstable = read_page_state(nr_unstable); | |
114 | wbs->nr_mapped = read_page_state(nr_mapped); | |
115 | wbs->nr_writeback = read_page_state(nr_writeback); | |
116 | } | |
117 | ||
118 | /* | |
119 | * Work out the current dirty-memory clamping and background writeout | |
120 | * thresholds. | |
121 | * | |
122 | * The main aim here is to lower them aggressively if there is a lot of mapped | |
123 | * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
124 | * pages. It is better to clamp down on writers than to start swapping, and | |
125 | * performing lots of scanning. | |
126 | * | |
127 | * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
128 | * | |
129 | * We don't permit the clamping level to fall below 5% - that is getting rather | |
130 | * excessive. | |
131 | * | |
132 | * We make sure that the background writeout level is below the adjusted | |
133 | * clamping level. | |
134 | */ | |
135 | static void | |
136 | get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | |
137 | struct address_space *mapping) | |
138 | { | |
139 | int background_ratio; /* Percentages */ | |
140 | int dirty_ratio; | |
141 | int unmapped_ratio; | |
142 | long background; | |
143 | long dirty; | |
144 | unsigned long available_memory = total_pages; | |
145 | struct task_struct *tsk; | |
146 | ||
147 | get_writeback_state(wbs); | |
148 | ||
149 | #ifdef CONFIG_HIGHMEM | |
150 | /* | |
151 | * If this mapping can only allocate from low memory, | |
152 | * we exclude high memory from our count. | |
153 | */ | |
154 | if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) | |
155 | available_memory -= totalhigh_pages; | |
156 | #endif | |
157 | ||
158 | ||
159 | unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; | |
160 | ||
161 | dirty_ratio = vm_dirty_ratio; | |
162 | if (dirty_ratio > unmapped_ratio / 2) | |
163 | dirty_ratio = unmapped_ratio / 2; | |
164 | ||
165 | if (dirty_ratio < 5) | |
166 | dirty_ratio = 5; | |
167 | ||
168 | background_ratio = dirty_background_ratio; | |
169 | if (background_ratio >= dirty_ratio) | |
170 | background_ratio = dirty_ratio / 2; | |
171 | ||
172 | background = (background_ratio * available_memory) / 100; | |
173 | dirty = (dirty_ratio * available_memory) / 100; | |
174 | tsk = current; | |
175 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | |
176 | background += background / 4; | |
177 | dirty += dirty / 4; | |
178 | } | |
179 | *pbackground = background; | |
180 | *pdirty = dirty; | |
181 | } | |
182 | ||
183 | /* | |
184 | * balance_dirty_pages() must be called by processes which are generating dirty | |
185 | * data. It looks at the number of dirty pages in the machine and will force | |
186 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | |
187 | * If we're over `background_thresh' then pdflush is woken to perform some | |
188 | * writeout. | |
189 | */ | |
190 | static void balance_dirty_pages(struct address_space *mapping) | |
191 | { | |
192 | struct writeback_state wbs; | |
193 | long nr_reclaimable; | |
194 | long background_thresh; | |
195 | long dirty_thresh; | |
196 | unsigned long pages_written = 0; | |
197 | unsigned long write_chunk = sync_writeback_pages(); | |
198 | ||
199 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
200 | ||
201 | for (;;) { | |
202 | struct writeback_control wbc = { | |
203 | .bdi = bdi, | |
204 | .sync_mode = WB_SYNC_NONE, | |
205 | .older_than_this = NULL, | |
206 | .nr_to_write = write_chunk, | |
111ebb6e | 207 | .range_cyclic = 1, |
1da177e4 LT |
208 | }; |
209 | ||
210 | get_dirty_limits(&wbs, &background_thresh, | |
211 | &dirty_thresh, mapping); | |
212 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | |
213 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | |
214 | break; | |
215 | ||
e236a166 AM |
216 | if (!dirty_exceeded) |
217 | dirty_exceeded = 1; | |
1da177e4 LT |
218 | |
219 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | |
220 | * Unstable writes are a feature of certain networked | |
221 | * filesystems (i.e. NFS) in which data may have been | |
222 | * written to the server's write cache, but has not yet | |
223 | * been flushed to permanent storage. | |
224 | */ | |
225 | if (nr_reclaimable) { | |
226 | writeback_inodes(&wbc); | |
227 | get_dirty_limits(&wbs, &background_thresh, | |
228 | &dirty_thresh, mapping); | |
229 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | |
230 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | |
231 | break; | |
232 | pages_written += write_chunk - wbc.nr_to_write; | |
233 | if (pages_written >= write_chunk) | |
234 | break; /* We've done our duty */ | |
235 | } | |
236 | blk_congestion_wait(WRITE, HZ/10); | |
237 | } | |
238 | ||
e236a166 | 239 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) |
1da177e4 LT |
240 | dirty_exceeded = 0; |
241 | ||
242 | if (writeback_in_progress(bdi)) | |
243 | return; /* pdflush is already working this queue */ | |
244 | ||
245 | /* | |
246 | * In laptop mode, we wait until hitting the higher threshold before | |
247 | * starting background writeout, and then write out all the way down | |
248 | * to the lower threshold. So slow writers cause minimal disk activity. | |
249 | * | |
250 | * In normal mode, we start background writeout at the lower | |
251 | * background_thresh, to keep the amount of dirty memory low. | |
252 | */ | |
253 | if ((laptop_mode && pages_written) || | |
254 | (!laptop_mode && (nr_reclaimable > background_thresh))) | |
255 | pdflush_operation(background_writeout, 0); | |
256 | } | |
257 | ||
258 | /** | |
fa5a734e | 259 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
67be2dd1 | 260 | * @mapping: address_space which was dirtied |
a580290c | 261 | * @nr_pages_dirtied: number of pages which the caller has just dirtied |
1da177e4 LT |
262 | * |
263 | * Processes which are dirtying memory should call in here once for each page | |
264 | * which was newly dirtied. The function will periodically check the system's | |
265 | * dirty state and will initiate writeback if needed. | |
266 | * | |
267 | * On really big machines, get_writeback_state is expensive, so try to avoid | |
268 | * calling it too often (ratelimiting). But once we're over the dirty memory | |
269 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | |
270 | * from overshooting the limit by (ratelimit_pages) each. | |
271 | */ | |
fa5a734e AM |
272 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
273 | unsigned long nr_pages_dirtied) | |
1da177e4 | 274 | { |
fa5a734e AM |
275 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
276 | unsigned long ratelimit; | |
277 | unsigned long *p; | |
1da177e4 LT |
278 | |
279 | ratelimit = ratelimit_pages; | |
280 | if (dirty_exceeded) | |
281 | ratelimit = 8; | |
282 | ||
283 | /* | |
284 | * Check the rate limiting. Also, we do not want to throttle real-time | |
285 | * tasks in balance_dirty_pages(). Period. | |
286 | */ | |
fa5a734e AM |
287 | preempt_disable(); |
288 | p = &__get_cpu_var(ratelimits); | |
289 | *p += nr_pages_dirtied; | |
290 | if (unlikely(*p >= ratelimit)) { | |
291 | *p = 0; | |
292 | preempt_enable(); | |
1da177e4 LT |
293 | balance_dirty_pages(mapping); |
294 | return; | |
295 | } | |
fa5a734e | 296 | preempt_enable(); |
1da177e4 | 297 | } |
fa5a734e | 298 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
1da177e4 LT |
299 | |
300 | void throttle_vm_writeout(void) | |
301 | { | |
302 | struct writeback_state wbs; | |
303 | long background_thresh; | |
304 | long dirty_thresh; | |
305 | ||
306 | for ( ; ; ) { | |
307 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | |
308 | ||
309 | /* | |
310 | * Boost the allowable dirty threshold a bit for page | |
311 | * allocators so they don't get DoS'ed by heavy writers | |
312 | */ | |
313 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | |
314 | ||
315 | if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) | |
316 | break; | |
317 | blk_congestion_wait(WRITE, HZ/10); | |
318 | } | |
319 | } | |
320 | ||
321 | ||
322 | /* | |
323 | * writeback at least _min_pages, and keep writing until the amount of dirty | |
324 | * memory is less than the background threshold, or until we're all clean. | |
325 | */ | |
326 | static void background_writeout(unsigned long _min_pages) | |
327 | { | |
328 | long min_pages = _min_pages; | |
329 | struct writeback_control wbc = { | |
330 | .bdi = NULL, | |
331 | .sync_mode = WB_SYNC_NONE, | |
332 | .older_than_this = NULL, | |
333 | .nr_to_write = 0, | |
334 | .nonblocking = 1, | |
111ebb6e | 335 | .range_cyclic = 1, |
1da177e4 LT |
336 | }; |
337 | ||
338 | for ( ; ; ) { | |
339 | struct writeback_state wbs; | |
340 | long background_thresh; | |
341 | long dirty_thresh; | |
342 | ||
343 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | |
344 | if (wbs.nr_dirty + wbs.nr_unstable < background_thresh | |
345 | && min_pages <= 0) | |
346 | break; | |
347 | wbc.encountered_congestion = 0; | |
348 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
349 | wbc.pages_skipped = 0; | |
350 | writeback_inodes(&wbc); | |
351 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
352 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | |
353 | /* Wrote less than expected */ | |
354 | blk_congestion_wait(WRITE, HZ/10); | |
355 | if (!wbc.encountered_congestion) | |
356 | break; | |
357 | } | |
358 | } | |
359 | } | |
360 | ||
361 | /* | |
362 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | |
363 | * the whole world. Returns 0 if a pdflush thread was dispatched. Returns | |
364 | * -1 if all pdflush threads were busy. | |
365 | */ | |
687a21ce | 366 | int wakeup_pdflush(long nr_pages) |
1da177e4 LT |
367 | { |
368 | if (nr_pages == 0) { | |
369 | struct writeback_state wbs; | |
370 | ||
371 | get_writeback_state(&wbs); | |
372 | nr_pages = wbs.nr_dirty + wbs.nr_unstable; | |
373 | } | |
374 | return pdflush_operation(background_writeout, nr_pages); | |
375 | } | |
376 | ||
377 | static void wb_timer_fn(unsigned long unused); | |
378 | static void laptop_timer_fn(unsigned long unused); | |
379 | ||
8d06afab IM |
380 | static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); |
381 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | |
1da177e4 LT |
382 | |
383 | /* | |
384 | * Periodic writeback of "old" data. | |
385 | * | |
386 | * Define "old": the first time one of an inode's pages is dirtied, we mark the | |
387 | * dirtying-time in the inode's address_space. So this periodic writeback code | |
388 | * just walks the superblock inode list, writing back any inodes which are | |
389 | * older than a specific point in time. | |
390 | * | |
f6ef9438 BS |
391 | * Try to run once per dirty_writeback_interval. But if a writeback event |
392 | * takes longer than a dirty_writeback_interval interval, then leave a | |
1da177e4 LT |
393 | * one-second gap. |
394 | * | |
395 | * older_than_this takes precedence over nr_to_write. So we'll only write back | |
396 | * all dirty pages if they are all attached to "old" mappings. | |
397 | */ | |
398 | static void wb_kupdate(unsigned long arg) | |
399 | { | |
400 | unsigned long oldest_jif; | |
401 | unsigned long start_jif; | |
402 | unsigned long next_jif; | |
403 | long nr_to_write; | |
404 | struct writeback_state wbs; | |
405 | struct writeback_control wbc = { | |
406 | .bdi = NULL, | |
407 | .sync_mode = WB_SYNC_NONE, | |
408 | .older_than_this = &oldest_jif, | |
409 | .nr_to_write = 0, | |
410 | .nonblocking = 1, | |
411 | .for_kupdate = 1, | |
111ebb6e | 412 | .range_cyclic = 1, |
1da177e4 LT |
413 | }; |
414 | ||
415 | sync_supers(); | |
416 | ||
417 | get_writeback_state(&wbs); | |
f6ef9438 | 418 | oldest_jif = jiffies - dirty_expire_interval; |
1da177e4 | 419 | start_jif = jiffies; |
f6ef9438 | 420 | next_jif = start_jif + dirty_writeback_interval; |
1da177e4 LT |
421 | nr_to_write = wbs.nr_dirty + wbs.nr_unstable + |
422 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | |
423 | while (nr_to_write > 0) { | |
424 | wbc.encountered_congestion = 0; | |
425 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
426 | writeback_inodes(&wbc); | |
427 | if (wbc.nr_to_write > 0) { | |
428 | if (wbc.encountered_congestion) | |
429 | blk_congestion_wait(WRITE, HZ/10); | |
430 | else | |
431 | break; /* All the old data is written */ | |
432 | } | |
433 | nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
434 | } | |
435 | if (time_before(next_jif, jiffies + HZ)) | |
436 | next_jif = jiffies + HZ; | |
f6ef9438 | 437 | if (dirty_writeback_interval) |
1da177e4 LT |
438 | mod_timer(&wb_timer, next_jif); |
439 | } | |
440 | ||
441 | /* | |
442 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | |
443 | */ | |
444 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | |
445 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | |
446 | { | |
f6ef9438 BS |
447 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); |
448 | if (dirty_writeback_interval) { | |
1da177e4 | 449 | mod_timer(&wb_timer, |
f6ef9438 BS |
450 | jiffies + dirty_writeback_interval); |
451 | } else { | |
1da177e4 LT |
452 | del_timer(&wb_timer); |
453 | } | |
454 | return 0; | |
455 | } | |
456 | ||
457 | static void wb_timer_fn(unsigned long unused) | |
458 | { | |
459 | if (pdflush_operation(wb_kupdate, 0) < 0) | |
460 | mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ | |
461 | } | |
462 | ||
463 | static void laptop_flush(unsigned long unused) | |
464 | { | |
465 | sys_sync(); | |
466 | } | |
467 | ||
468 | static void laptop_timer_fn(unsigned long unused) | |
469 | { | |
470 | pdflush_operation(laptop_flush, 0); | |
471 | } | |
472 | ||
473 | /* | |
474 | * We've spun up the disk and we're in laptop mode: schedule writeback | |
475 | * of all dirty data a few seconds from now. If the flush is already scheduled | |
476 | * then push it back - the user is still using the disk. | |
477 | */ | |
478 | void laptop_io_completion(void) | |
479 | { | |
ed5b43f1 | 480 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4 LT |
481 | } |
482 | ||
483 | /* | |
484 | * We're in laptop mode and we've just synced. The sync's writes will have | |
485 | * caused another writeback to be scheduled by laptop_io_completion. | |
486 | * Nothing needs to be written back anymore, so we unschedule the writeback. | |
487 | */ | |
488 | void laptop_sync_completion(void) | |
489 | { | |
490 | del_timer(&laptop_mode_wb_timer); | |
491 | } | |
492 | ||
493 | /* | |
494 | * If ratelimit_pages is too high then we can get into dirty-data overload | |
495 | * if a large number of processes all perform writes at the same time. | |
496 | * If it is too low then SMP machines will call the (expensive) | |
497 | * get_writeback_state too often. | |
498 | * | |
499 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | |
500 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | |
501 | * thresholds before writeback cuts in. | |
502 | * | |
503 | * But the limit should not be set too high. Because it also controls the | |
504 | * amount of memory which the balance_dirty_pages() caller has to write back. | |
505 | * If this is too large then the caller will block on the IO queue all the | |
506 | * time. So limit it to four megabytes - the balance_dirty_pages() caller | |
507 | * will write six megabyte chunks, max. | |
508 | */ | |
509 | ||
510 | static void set_ratelimit(void) | |
511 | { | |
512 | ratelimit_pages = total_pages / (num_online_cpus() * 32); | |
513 | if (ratelimit_pages < 16) | |
514 | ratelimit_pages = 16; | |
515 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | |
516 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | |
517 | } | |
518 | ||
519 | static int | |
520 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | |
521 | { | |
522 | set_ratelimit(); | |
523 | return 0; | |
524 | } | |
525 | ||
526 | static struct notifier_block ratelimit_nb = { | |
527 | .notifier_call = ratelimit_handler, | |
528 | .next = NULL, | |
529 | }; | |
530 | ||
531 | /* | |
532 | * If the machine has a large highmem:lowmem ratio then scale back the default | |
533 | * dirty memory thresholds: allowing too much dirty highmem pins an excessive | |
534 | * number of buffer_heads. | |
535 | */ | |
536 | void __init page_writeback_init(void) | |
537 | { | |
538 | long buffer_pages = nr_free_buffer_pages(); | |
539 | long correction; | |
540 | ||
541 | total_pages = nr_free_pagecache_pages(); | |
542 | ||
543 | correction = (100 * 4 * buffer_pages) / total_pages; | |
544 | ||
545 | if (correction < 100) { | |
546 | dirty_background_ratio *= correction; | |
547 | dirty_background_ratio /= 100; | |
548 | vm_dirty_ratio *= correction; | |
549 | vm_dirty_ratio /= 100; | |
550 | ||
551 | if (dirty_background_ratio <= 0) | |
552 | dirty_background_ratio = 1; | |
553 | if (vm_dirty_ratio <= 0) | |
554 | vm_dirty_ratio = 1; | |
555 | } | |
f6ef9438 | 556 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); |
1da177e4 LT |
557 | set_ratelimit(); |
558 | register_cpu_notifier(&ratelimit_nb); | |
559 | } | |
560 | ||
561 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | |
562 | { | |
22905f77 AM |
563 | int ret; |
564 | ||
1da177e4 LT |
565 | if (wbc->nr_to_write <= 0) |
566 | return 0; | |
22905f77 | 567 | wbc->for_writepages = 1; |
1da177e4 | 568 | if (mapping->a_ops->writepages) |
22905f77 AM |
569 | ret = mapping->a_ops->writepages(mapping, wbc); |
570 | else | |
571 | ret = generic_writepages(mapping, wbc); | |
572 | wbc->for_writepages = 0; | |
573 | return ret; | |
1da177e4 LT |
574 | } |
575 | ||
576 | /** | |
577 | * write_one_page - write out a single page and optionally wait on I/O | |
578 | * | |
67be2dd1 MW |
579 | * @page: the page to write |
580 | * @wait: if true, wait on writeout | |
1da177e4 LT |
581 | * |
582 | * The page must be locked by the caller and will be unlocked upon return. | |
583 | * | |
584 | * write_one_page() returns a negative error code if I/O failed. | |
585 | */ | |
586 | int write_one_page(struct page *page, int wait) | |
587 | { | |
588 | struct address_space *mapping = page->mapping; | |
589 | int ret = 0; | |
590 | struct writeback_control wbc = { | |
591 | .sync_mode = WB_SYNC_ALL, | |
592 | .nr_to_write = 1, | |
593 | }; | |
594 | ||
595 | BUG_ON(!PageLocked(page)); | |
596 | ||
597 | if (wait) | |
598 | wait_on_page_writeback(page); | |
599 | ||
600 | if (clear_page_dirty_for_io(page)) { | |
601 | page_cache_get(page); | |
602 | ret = mapping->a_ops->writepage(page, &wbc); | |
603 | if (ret == 0 && wait) { | |
604 | wait_on_page_writeback(page); | |
605 | if (PageError(page)) | |
606 | ret = -EIO; | |
607 | } | |
608 | page_cache_release(page); | |
609 | } else { | |
610 | unlock_page(page); | |
611 | } | |
612 | return ret; | |
613 | } | |
614 | EXPORT_SYMBOL(write_one_page); | |
615 | ||
616 | /* | |
617 | * For address_spaces which do not use buffers. Just tag the page as dirty in | |
618 | * its radix tree. | |
619 | * | |
620 | * This is also used when a single buffer is being dirtied: we want to set the | |
621 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | |
622 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | |
623 | * | |
624 | * Most callers have locked the page, which pins the address_space in memory. | |
625 | * But zap_pte_range() does not lock the page, however in that case the | |
626 | * mapping is pinned by the vma's ->vm_file reference. | |
627 | * | |
628 | * We take care to handle the case where the page was truncated from the | |
629 | * mapping by re-checking page_mapping() insode tree_lock. | |
630 | */ | |
631 | int __set_page_dirty_nobuffers(struct page *page) | |
632 | { | |
1da177e4 LT |
633 | if (!TestSetPageDirty(page)) { |
634 | struct address_space *mapping = page_mapping(page); | |
635 | struct address_space *mapping2; | |
636 | ||
637 | if (mapping) { | |
638 | write_lock_irq(&mapping->tree_lock); | |
639 | mapping2 = page_mapping(page); | |
640 | if (mapping2) { /* Race with truncate? */ | |
641 | BUG_ON(mapping2 != mapping); | |
642 | if (mapping_cap_account_dirty(mapping)) | |
643 | inc_page_state(nr_dirty); | |
644 | radix_tree_tag_set(&mapping->page_tree, | |
645 | page_index(page), PAGECACHE_TAG_DIRTY); | |
646 | } | |
647 | write_unlock_irq(&mapping->tree_lock); | |
648 | if (mapping->host) { | |
649 | /* !PageAnon && !swapper_space */ | |
650 | __mark_inode_dirty(mapping->host, | |
651 | I_DIRTY_PAGES); | |
652 | } | |
653 | } | |
4741c9fd | 654 | return 1; |
1da177e4 | 655 | } |
4741c9fd | 656 | return 0; |
1da177e4 LT |
657 | } |
658 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | |
659 | ||
660 | /* | |
661 | * When a writepage implementation decides that it doesn't want to write this | |
662 | * page for some reason, it should redirty the locked page via | |
663 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | |
664 | */ | |
665 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | |
666 | { | |
667 | wbc->pages_skipped++; | |
668 | return __set_page_dirty_nobuffers(page); | |
669 | } | |
670 | EXPORT_SYMBOL(redirty_page_for_writepage); | |
671 | ||
672 | /* | |
673 | * If the mapping doesn't provide a set_page_dirty a_op, then | |
674 | * just fall through and assume that it wants buffer_heads. | |
675 | */ | |
676 | int fastcall set_page_dirty(struct page *page) | |
677 | { | |
678 | struct address_space *mapping = page_mapping(page); | |
679 | ||
680 | if (likely(mapping)) { | |
681 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | |
682 | if (spd) | |
683 | return (*spd)(page); | |
684 | return __set_page_dirty_buffers(page); | |
685 | } | |
4741c9fd AM |
686 | if (!PageDirty(page)) { |
687 | if (!TestSetPageDirty(page)) | |
688 | return 1; | |
689 | } | |
1da177e4 LT |
690 | return 0; |
691 | } | |
692 | EXPORT_SYMBOL(set_page_dirty); | |
693 | ||
694 | /* | |
695 | * set_page_dirty() is racy if the caller has no reference against | |
696 | * page->mapping->host, and if the page is unlocked. This is because another | |
697 | * CPU could truncate the page off the mapping and then free the mapping. | |
698 | * | |
699 | * Usually, the page _is_ locked, or the caller is a user-space process which | |
700 | * holds a reference on the inode by having an open file. | |
701 | * | |
702 | * In other cases, the page should be locked before running set_page_dirty(). | |
703 | */ | |
704 | int set_page_dirty_lock(struct page *page) | |
705 | { | |
706 | int ret; | |
707 | ||
708 | lock_page(page); | |
709 | ret = set_page_dirty(page); | |
710 | unlock_page(page); | |
711 | return ret; | |
712 | } | |
713 | EXPORT_SYMBOL(set_page_dirty_lock); | |
714 | ||
715 | /* | |
716 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
717 | * Returns true if the page was previously dirty. | |
718 | */ | |
719 | int test_clear_page_dirty(struct page *page) | |
720 | { | |
721 | struct address_space *mapping = page_mapping(page); | |
722 | unsigned long flags; | |
723 | ||
724 | if (mapping) { | |
725 | write_lock_irqsave(&mapping->tree_lock, flags); | |
726 | if (TestClearPageDirty(page)) { | |
727 | radix_tree_tag_clear(&mapping->page_tree, | |
728 | page_index(page), | |
729 | PAGECACHE_TAG_DIRTY); | |
730 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
731 | if (mapping_cap_account_dirty(mapping)) | |
732 | dec_page_state(nr_dirty); | |
733 | return 1; | |
734 | } | |
735 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
736 | return 0; | |
737 | } | |
738 | return TestClearPageDirty(page); | |
739 | } | |
740 | EXPORT_SYMBOL(test_clear_page_dirty); | |
741 | ||
742 | /* | |
743 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
744 | * Returns true if the page was previously dirty. | |
745 | * | |
746 | * This is for preparing to put the page under writeout. We leave the page | |
747 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | |
748 | * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage | |
749 | * implementation will run either set_page_writeback() or set_page_dirty(), | |
750 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | |
751 | * back into sync. | |
752 | * | |
753 | * This incoherency between the page's dirty flag and radix-tree tag is | |
754 | * unfortunate, but it only exists while the page is locked. | |
755 | */ | |
756 | int clear_page_dirty_for_io(struct page *page) | |
757 | { | |
758 | struct address_space *mapping = page_mapping(page); | |
759 | ||
760 | if (mapping) { | |
761 | if (TestClearPageDirty(page)) { | |
762 | if (mapping_cap_account_dirty(mapping)) | |
763 | dec_page_state(nr_dirty); | |
764 | return 1; | |
765 | } | |
766 | return 0; | |
767 | } | |
768 | return TestClearPageDirty(page); | |
769 | } | |
58bb01a9 | 770 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4 LT |
771 | |
772 | int test_clear_page_writeback(struct page *page) | |
773 | { | |
774 | struct address_space *mapping = page_mapping(page); | |
775 | int ret; | |
776 | ||
777 | if (mapping) { | |
778 | unsigned long flags; | |
779 | ||
780 | write_lock_irqsave(&mapping->tree_lock, flags); | |
781 | ret = TestClearPageWriteback(page); | |
782 | if (ret) | |
783 | radix_tree_tag_clear(&mapping->page_tree, | |
784 | page_index(page), | |
785 | PAGECACHE_TAG_WRITEBACK); | |
786 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
787 | } else { | |
788 | ret = TestClearPageWriteback(page); | |
789 | } | |
790 | return ret; | |
791 | } | |
792 | ||
793 | int test_set_page_writeback(struct page *page) | |
794 | { | |
795 | struct address_space *mapping = page_mapping(page); | |
796 | int ret; | |
797 | ||
798 | if (mapping) { | |
799 | unsigned long flags; | |
800 | ||
801 | write_lock_irqsave(&mapping->tree_lock, flags); | |
802 | ret = TestSetPageWriteback(page); | |
803 | if (!ret) | |
804 | radix_tree_tag_set(&mapping->page_tree, | |
805 | page_index(page), | |
806 | PAGECACHE_TAG_WRITEBACK); | |
807 | if (!PageDirty(page)) | |
808 | radix_tree_tag_clear(&mapping->page_tree, | |
809 | page_index(page), | |
810 | PAGECACHE_TAG_DIRTY); | |
811 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
812 | } else { | |
813 | ret = TestSetPageWriteback(page); | |
814 | } | |
815 | return ret; | |
816 | ||
817 | } | |
818 | EXPORT_SYMBOL(test_set_page_writeback); | |
819 | ||
820 | /* | |
821 | * Return true if any of the pages in the mapping are marged with the | |
822 | * passed tag. | |
823 | */ | |
824 | int mapping_tagged(struct address_space *mapping, int tag) | |
825 | { | |
826 | unsigned long flags; | |
827 | int ret; | |
828 | ||
829 | read_lock_irqsave(&mapping->tree_lock, flags); | |
830 | ret = radix_tree_tagged(&mapping->page_tree, tag); | |
831 | read_unlock_irqrestore(&mapping->tree_lock, flags); | |
832 | return ret; | |
833 | } | |
834 | EXPORT_SYMBOL(mapping_tagged); |