]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/page-writeback.c
writeback: avoid unnecessary calculation of bdi dirty thresholds
[mirror_ubuntu-zesty-kernel.git] / mm / page-writeback.c
CommitLineData
1da177e4 1/*
f30c2269 2 * mm/page-writeback.c
1da177e4
LT
3 *
4 * Copyright (C) 2002, Linus Torvalds.
04fbfdc1 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
1da177e4
LT
6 *
7 * Contains functions related to writing back dirty pages at the
8 * address_space level.
9 *
e1f8e874 10 * 10Apr2002 Andrew Morton
1da177e4
LT
11 * Initial version
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
55e829af 25#include <linux/task_io_accounting_ops.h>
1da177e4
LT
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
d08b3851 28#include <linux/rmap.h>
1da177e4
LT
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
cf9a2ae8 35#include <linux/buffer_head.h>
811d736f 36#include <linux/pagevec.h>
028c2dd1 37#include <trace/events/writeback.h>
1da177e4 38
1da177e4
LT
39/*
40 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
41 * will look to see if it needs to force writeback or throttling.
42 */
43static long ratelimit_pages = 32;
44
1da177e4
LT
45/*
46 * When balance_dirty_pages decides that the caller needs to perform some
47 * non-background writeback, this is how many pages it will attempt to write.
3a2e9a5a 48 * It should be somewhat larger than dirtied pages to ensure that reasonably
1da177e4
LT
49 * large amounts of I/O are submitted.
50 */
3a2e9a5a 51static inline long sync_writeback_pages(unsigned long dirtied)
1da177e4 52{
3a2e9a5a
WF
53 if (dirtied < ratelimit_pages)
54 dirtied = ratelimit_pages;
55
56 return dirtied + dirtied / 2;
1da177e4
LT
57}
58
59/* The following parameters are exported via /proc/sys/vm */
60
61/*
5b0830cb 62 * Start background writeback (via writeback threads) at this percentage
1da177e4 63 */
1b5e62b4 64int dirty_background_ratio = 10;
1da177e4 65
2da02997
DR
66/*
67 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
68 * dirty_background_ratio * the amount of dirtyable memory
69 */
70unsigned long dirty_background_bytes;
71
195cf453
BG
72/*
73 * free highmem will not be subtracted from the total free memory
74 * for calculating free ratios if vm_highmem_is_dirtyable is true
75 */
76int vm_highmem_is_dirtyable;
77
1da177e4
LT
78/*
79 * The generator of dirty data starts writeback at this percentage
80 */
1b5e62b4 81int vm_dirty_ratio = 20;
1da177e4 82
2da02997
DR
83/*
84 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
85 * vm_dirty_ratio * the amount of dirtyable memory
86 */
87unsigned long vm_dirty_bytes;
88
1da177e4 89/*
704503d8 90 * The interval between `kupdate'-style writebacks
1da177e4 91 */
22ef37ee 92unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4
LT
93
94/*
704503d8 95 * The longest time for which data is allowed to remain dirty
1da177e4 96 */
22ef37ee 97unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4
LT
98
99/*
100 * Flag that makes the machine dump writes/reads and block dirtyings.
101 */
102int block_dump;
103
104/*
ed5b43f1
BS
105 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
106 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
107 */
108int laptop_mode;
109
110EXPORT_SYMBOL(laptop_mode);
111
112/* End of sysctl-exported parameters */
113
114
04fbfdc1
PZ
115/*
116 * Scale the writeback cache size proportional to the relative writeout speeds.
117 *
118 * We do this by keeping a floating proportion between BDIs, based on page
119 * writeback completions [end_page_writeback()]. Those devices that write out
120 * pages fastest will get the larger share, while the slower will get a smaller
121 * share.
122 *
123 * We use page writeout completions because we are interested in getting rid of
124 * dirty pages. Having them written out is the primary goal.
125 *
126 * We introduce a concept of time, a period over which we measure these events,
127 * because demand can/will vary over time. The length of this period itself is
128 * measured in page writeback completions.
129 *
130 */
131static struct prop_descriptor vm_completions;
3e26c149 132static struct prop_descriptor vm_dirties;
04fbfdc1 133
04fbfdc1
PZ
134/*
135 * couple the period to the dirty_ratio:
136 *
137 * period/2 ~ roundup_pow_of_two(dirty limit)
138 */
139static int calc_period_shift(void)
140{
141 unsigned long dirty_total;
142
2da02997
DR
143 if (vm_dirty_bytes)
144 dirty_total = vm_dirty_bytes / PAGE_SIZE;
145 else
146 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
147 100;
04fbfdc1
PZ
148 return 2 + ilog2(dirty_total - 1);
149}
150
151/*
2da02997 152 * update the period when the dirty threshold changes.
04fbfdc1 153 */
2da02997
DR
154static void update_completion_period(void)
155{
156 int shift = calc_period_shift();
157 prop_change_shift(&vm_completions, shift);
158 prop_change_shift(&vm_dirties, shift);
159}
160
161int dirty_background_ratio_handler(struct ctl_table *table, int write,
8d65af78 162 void __user *buffer, size_t *lenp,
2da02997
DR
163 loff_t *ppos)
164{
165 int ret;
166
8d65af78 167 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
168 if (ret == 0 && write)
169 dirty_background_bytes = 0;
170 return ret;
171}
172
173int dirty_background_bytes_handler(struct ctl_table *table, int write,
8d65af78 174 void __user *buffer, size_t *lenp,
2da02997
DR
175 loff_t *ppos)
176{
177 int ret;
178
8d65af78 179 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
180 if (ret == 0 && write)
181 dirty_background_ratio = 0;
182 return ret;
183}
184
04fbfdc1 185int dirty_ratio_handler(struct ctl_table *table, int write,
8d65af78 186 void __user *buffer, size_t *lenp,
04fbfdc1
PZ
187 loff_t *ppos)
188{
189 int old_ratio = vm_dirty_ratio;
2da02997
DR
190 int ret;
191
8d65af78 192 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc1 193 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
2da02997
DR
194 update_completion_period();
195 vm_dirty_bytes = 0;
196 }
197 return ret;
198}
199
200
201int dirty_bytes_handler(struct ctl_table *table, int write,
8d65af78 202 void __user *buffer, size_t *lenp,
2da02997
DR
203 loff_t *ppos)
204{
fc3501d4 205 unsigned long old_bytes = vm_dirty_bytes;
2da02997
DR
206 int ret;
207
8d65af78 208 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
209 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
210 update_completion_period();
211 vm_dirty_ratio = 0;
04fbfdc1
PZ
212 }
213 return ret;
214}
215
216/*
217 * Increment the BDI's writeout completion count and the global writeout
218 * completion count. Called from test_clear_page_writeback().
219 */
220static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
221{
a42dde04
PZ
222 __prop_inc_percpu_max(&vm_completions, &bdi->completions,
223 bdi->max_prop_frac);
04fbfdc1
PZ
224}
225
dd5656e5
MS
226void bdi_writeout_inc(struct backing_dev_info *bdi)
227{
228 unsigned long flags;
229
230 local_irq_save(flags);
231 __bdi_writeout_inc(bdi);
232 local_irq_restore(flags);
233}
234EXPORT_SYMBOL_GPL(bdi_writeout_inc);
235
1cf6e7d8 236void task_dirty_inc(struct task_struct *tsk)
3e26c149
PZ
237{
238 prop_inc_single(&vm_dirties, &tsk->dirties);
239}
240
04fbfdc1
PZ
241/*
242 * Obtain an accurate fraction of the BDI's portion.
243 */
244static void bdi_writeout_fraction(struct backing_dev_info *bdi,
245 long *numerator, long *denominator)
246{
247 if (bdi_cap_writeback_dirty(bdi)) {
248 prop_fraction_percpu(&vm_completions, &bdi->completions,
249 numerator, denominator);
250 } else {
251 *numerator = 0;
252 *denominator = 1;
253 }
254}
255
3e26c149
PZ
256static inline void task_dirties_fraction(struct task_struct *tsk,
257 long *numerator, long *denominator)
258{
259 prop_fraction_single(&vm_dirties, &tsk->dirties,
260 numerator, denominator);
261}
262
263/*
264 * scale the dirty limit
265 *
266 * task specific dirty limit:
267 *
268 * dirty -= (dirty/8) * p_{t}
269 */
16c4042f
WF
270static unsigned long task_dirty_limit(struct task_struct *tsk,
271 unsigned long bdi_dirty)
3e26c149
PZ
272{
273 long numerator, denominator;
16c4042f 274 unsigned long dirty = bdi_dirty;
3e26c149
PZ
275 u64 inv = dirty >> 3;
276
277 task_dirties_fraction(tsk, &numerator, &denominator);
278 inv *= numerator;
279 do_div(inv, denominator);
280
281 dirty -= inv;
3e26c149 282
16c4042f 283 return max(dirty, bdi_dirty/2);
3e26c149
PZ
284}
285
189d3c4a
PZ
286/*
287 *
288 */
189d3c4a
PZ
289static unsigned int bdi_min_ratio;
290
291int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
292{
293 int ret = 0;
189d3c4a 294
cfc4ba53 295 spin_lock_bh(&bdi_lock);
a42dde04 296 if (min_ratio > bdi->max_ratio) {
189d3c4a 297 ret = -EINVAL;
a42dde04
PZ
298 } else {
299 min_ratio -= bdi->min_ratio;
300 if (bdi_min_ratio + min_ratio < 100) {
301 bdi_min_ratio += min_ratio;
302 bdi->min_ratio += min_ratio;
303 } else {
304 ret = -EINVAL;
305 }
306 }
cfc4ba53 307 spin_unlock_bh(&bdi_lock);
a42dde04
PZ
308
309 return ret;
310}
311
312int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
313{
a42dde04
PZ
314 int ret = 0;
315
316 if (max_ratio > 100)
317 return -EINVAL;
318
cfc4ba53 319 spin_lock_bh(&bdi_lock);
a42dde04
PZ
320 if (bdi->min_ratio > max_ratio) {
321 ret = -EINVAL;
322 } else {
323 bdi->max_ratio = max_ratio;
324 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
325 }
cfc4ba53 326 spin_unlock_bh(&bdi_lock);
189d3c4a
PZ
327
328 return ret;
329}
a42dde04 330EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a 331
1da177e4
LT
332/*
333 * Work out the current dirty-memory clamping and background writeout
334 * thresholds.
335 *
336 * The main aim here is to lower them aggressively if there is a lot of mapped
337 * memory around. To avoid stressing page reclaim with lots of unreclaimable
338 * pages. It is better to clamp down on writers than to start swapping, and
339 * performing lots of scanning.
340 *
341 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
342 *
343 * We don't permit the clamping level to fall below 5% - that is getting rather
344 * excessive.
345 *
346 * We make sure that the background writeout level is below the adjusted
347 * clamping level.
348 */
1b424464
CL
349
350static unsigned long highmem_dirtyable_memory(unsigned long total)
351{
352#ifdef CONFIG_HIGHMEM
353 int node;
354 unsigned long x = 0;
355
37b07e41 356 for_each_node_state(node, N_HIGH_MEMORY) {
1b424464
CL
357 struct zone *z =
358 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
359
adea02a1
WF
360 x += zone_page_state(z, NR_FREE_PAGES) +
361 zone_reclaimable_pages(z);
1b424464
CL
362 }
363 /*
364 * Make sure that the number of highmem pages is never larger
365 * than the number of the total dirtyable memory. This can only
366 * occur in very strange VM situations but we want to make sure
367 * that this does not occur.
368 */
369 return min(x, total);
370#else
371 return 0;
372#endif
373}
374
3eefae99
SR
375/**
376 * determine_dirtyable_memory - amount of memory that may be used
377 *
378 * Returns the numebr of pages that can currently be freed and used
379 * by the kernel for direct mappings.
380 */
381unsigned long determine_dirtyable_memory(void)
1b424464
CL
382{
383 unsigned long x;
384
adea02a1 385 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
195cf453
BG
386
387 if (!vm_highmem_is_dirtyable)
388 x -= highmem_dirtyable_memory(x);
389
1b424464
CL
390 return x + 1; /* Ensure that we never return 0 */
391}
392
16c4042f 393void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
1da177e4 394{
364aeb28
DR
395 unsigned long background;
396 unsigned long dirty;
1b424464 397 unsigned long available_memory = determine_dirtyable_memory();
1da177e4
LT
398 struct task_struct *tsk;
399
2da02997
DR
400 if (vm_dirty_bytes)
401 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
402 else {
403 int dirty_ratio;
404
405 dirty_ratio = vm_dirty_ratio;
406 if (dirty_ratio < 5)
407 dirty_ratio = 5;
408 dirty = (dirty_ratio * available_memory) / 100;
409 }
1da177e4 410
2da02997
DR
411 if (dirty_background_bytes)
412 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
413 else
414 background = (dirty_background_ratio * available_memory) / 100;
1da177e4 415
2da02997
DR
416 if (background >= dirty)
417 background = dirty / 2;
1da177e4
LT
418 tsk = current;
419 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
420 background += background / 4;
421 dirty += dirty / 4;
422 }
423 *pbackground = background;
424 *pdirty = dirty;
16c4042f 425}
04fbfdc1 426
16c4042f
WF
427unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
428 unsigned long dirty)
429{
430 u64 bdi_dirty;
431 long numerator, denominator;
04fbfdc1 432
16c4042f
WF
433 /*
434 * Calculate this BDI's share of the dirty ratio.
435 */
436 bdi_writeout_fraction(bdi, &numerator, &denominator);
04fbfdc1 437
16c4042f
WF
438 bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
439 bdi_dirty *= numerator;
440 do_div(bdi_dirty, denominator);
04fbfdc1 441
16c4042f
WF
442 bdi_dirty += (dirty * bdi->min_ratio) / 100;
443 if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
444 bdi_dirty = dirty * bdi->max_ratio / 100;
445
446 return bdi_dirty;
1da177e4
LT
447}
448
449/*
450 * balance_dirty_pages() must be called by processes which are generating dirty
451 * data. It looks at the number of dirty pages in the machine and will force
452 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
5b0830cb
JA
453 * If we're over `background_thresh' then the writeback threads are woken to
454 * perform some writeout.
1da177e4 455 */
3a2e9a5a
WF
456static void balance_dirty_pages(struct address_space *mapping,
457 unsigned long write_chunk)
1da177e4 458{
5fce25a9
PZ
459 long nr_reclaimable, bdi_nr_reclaimable;
460 long nr_writeback, bdi_nr_writeback;
364aeb28
DR
461 unsigned long background_thresh;
462 unsigned long dirty_thresh;
463 unsigned long bdi_thresh;
1da177e4 464 unsigned long pages_written = 0;
87c6a9b2 465 unsigned long pause = 1;
e50e3720 466 bool dirty_exceeded = false;
1da177e4
LT
467 struct backing_dev_info *bdi = mapping->backing_dev_info;
468
469 for (;;) {
470 struct writeback_control wbc = {
1da177e4
LT
471 .sync_mode = WB_SYNC_NONE,
472 .older_than_this = NULL,
473 .nr_to_write = write_chunk,
111ebb6e 474 .range_cyclic = 1,
1da177e4
LT
475 };
476
5fce25a9
PZ
477 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
478 global_page_state(NR_UNSTABLE_NFS);
479 nr_writeback = global_page_state(NR_WRITEBACK);
480
16c4042f
WF
481 global_dirty_limits(&background_thresh, &dirty_thresh);
482
483 /*
484 * Throttle it only when the background writeback cannot
485 * catch-up. This avoids (excessively) small writeouts
486 * when the bdi limits are ramping up.
487 */
488 if (nr_reclaimable + nr_writeback <
489 (background_thresh + dirty_thresh) / 2)
490 break;
491
492 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
493 bdi_thresh = task_dirty_limit(current, bdi_thresh);
494
e50e3720
WF
495 /*
496 * In order to avoid the stacked BDI deadlock we need
497 * to ensure we accurately count the 'dirty' pages when
498 * the threshold is low.
499 *
500 * Otherwise it would be possible to get thresh+n pages
501 * reported dirty, even though there are thresh-m pages
502 * actually dirty; with m+n sitting in the percpu
503 * deltas.
504 */
505 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
506 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
507 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
508 } else {
509 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
510 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
511 }
5fce25a9 512
e50e3720
WF
513 /*
514 * The bdi thresh is somehow "soft" limit derived from the
515 * global "hard" limit. The former helps to prevent heavy IO
516 * bdi or process from holding back light ones; The latter is
517 * the last resort safeguard.
518 */
519 dirty_exceeded =
520 (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh)
521 || (nr_reclaimable + nr_writeback >= dirty_thresh);
522
523 if (!dirty_exceeded)
04fbfdc1 524 break;
1da177e4 525
04fbfdc1
PZ
526 if (!bdi->dirty_exceeded)
527 bdi->dirty_exceeded = 1;
1da177e4
LT
528
529 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
530 * Unstable writes are a feature of certain networked
531 * filesystems (i.e. NFS) in which data may have been
532 * written to the server's write cache, but has not yet
533 * been flushed to permanent storage.
d7831a0b
RK
534 * Only move pages to writeback if this bdi is over its
535 * threshold otherwise wait until the disk writes catch
536 * up.
1da177e4 537 */
028c2dd1 538 trace_wbc_balance_dirty_start(&wbc, bdi);
d7831a0b 539 if (bdi_nr_reclaimable > bdi_thresh) {
9c3a8ee8 540 writeback_inodes_wb(&bdi->wb, &wbc);
1da177e4 541 pages_written += write_chunk - wbc.nr_to_write;
028c2dd1 542 trace_wbc_balance_dirty_written(&wbc, bdi);
e50e3720
WF
543 if (pages_written >= write_chunk)
544 break; /* We've done our duty */
04fbfdc1 545 }
028c2dd1 546 trace_wbc_balance_dirty_wait(&wbc, bdi);
d25105e8
WF
547 __set_current_state(TASK_INTERRUPTIBLE);
548 io_schedule_timeout(pause);
87c6a9b2
JA
549
550 /*
551 * Increase the delay for each loop, up to our previous
552 * default of taking a 100ms nap.
553 */
554 pause <<= 1;
555 if (pause > HZ / 10)
556 pause = HZ / 10;
1da177e4
LT
557 }
558
e50e3720 559 if (!dirty_exceeded && bdi->dirty_exceeded)
04fbfdc1 560 bdi->dirty_exceeded = 0;
1da177e4
LT
561
562 if (writeback_in_progress(bdi))
5b0830cb 563 return;
1da177e4
LT
564
565 /*
566 * In laptop mode, we wait until hitting the higher threshold before
567 * starting background writeout, and then write out all the way down
568 * to the lower threshold. So slow writers cause minimal disk activity.
569 *
570 * In normal mode, we start background writeout at the lower
571 * background_thresh, to keep the amount of dirty memory low.
572 */
573 if ((laptop_mode && pages_written) ||
e50e3720 574 (!laptop_mode && (nr_reclaimable > background_thresh)))
c5444198 575 bdi_start_background_writeback(bdi);
1da177e4
LT
576}
577
a200ee18 578void set_page_dirty_balance(struct page *page, int page_mkwrite)
edc79b2a 579{
a200ee18 580 if (set_page_dirty(page) || page_mkwrite) {
edc79b2a
PZ
581 struct address_space *mapping = page_mapping(page);
582
583 if (mapping)
584 balance_dirty_pages_ratelimited(mapping);
585 }
586}
587
245b2e70
TH
588static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
589
1da177e4 590/**
fa5a734e 591 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1 592 * @mapping: address_space which was dirtied
a580290c 593 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4
LT
594 *
595 * Processes which are dirtying memory should call in here once for each page
596 * which was newly dirtied. The function will periodically check the system's
597 * dirty state and will initiate writeback if needed.
598 *
599 * On really big machines, get_writeback_state is expensive, so try to avoid
600 * calling it too often (ratelimiting). But once we're over the dirty memory
601 * limit we decrease the ratelimiting by a lot, to prevent individual processes
602 * from overshooting the limit by (ratelimit_pages) each.
603 */
fa5a734e
AM
604void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
605 unsigned long nr_pages_dirtied)
1da177e4 606{
fa5a734e
AM
607 unsigned long ratelimit;
608 unsigned long *p;
1da177e4
LT
609
610 ratelimit = ratelimit_pages;
04fbfdc1 611 if (mapping->backing_dev_info->dirty_exceeded)
1da177e4
LT
612 ratelimit = 8;
613
614 /*
615 * Check the rate limiting. Also, we do not want to throttle real-time
616 * tasks in balance_dirty_pages(). Period.
617 */
fa5a734e 618 preempt_disable();
245b2e70 619 p = &__get_cpu_var(bdp_ratelimits);
fa5a734e
AM
620 *p += nr_pages_dirtied;
621 if (unlikely(*p >= ratelimit)) {
3a2e9a5a 622 ratelimit = sync_writeback_pages(*p);
fa5a734e
AM
623 *p = 0;
624 preempt_enable();
3a2e9a5a 625 balance_dirty_pages(mapping, ratelimit);
1da177e4
LT
626 return;
627 }
fa5a734e 628 preempt_enable();
1da177e4 629}
fa5a734e 630EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4 631
232ea4d6 632void throttle_vm_writeout(gfp_t gfp_mask)
1da177e4 633{
364aeb28
DR
634 unsigned long background_thresh;
635 unsigned long dirty_thresh;
1da177e4
LT
636
637 for ( ; ; ) {
16c4042f 638 global_dirty_limits(&background_thresh, &dirty_thresh);
1da177e4
LT
639
640 /*
641 * Boost the allowable dirty threshold a bit for page
642 * allocators so they don't get DoS'ed by heavy writers
643 */
644 dirty_thresh += dirty_thresh / 10; /* wheeee... */
645
c24f21bd
CL
646 if (global_page_state(NR_UNSTABLE_NFS) +
647 global_page_state(NR_WRITEBACK) <= dirty_thresh)
648 break;
8aa7e847 649 congestion_wait(BLK_RW_ASYNC, HZ/10);
369f2389
FW
650
651 /*
652 * The caller might hold locks which can prevent IO completion
653 * or progress in the filesystem. So we cannot just sit here
654 * waiting for IO to complete.
655 */
656 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
657 break;
1da177e4
LT
658 }
659}
660
1da177e4
LT
661/*
662 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
663 */
664int dirty_writeback_centisecs_handler(ctl_table *table, int write,
8d65af78 665 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 666{
8d65af78 667 proc_dointvec(table, write, buffer, length, ppos);
6423104b 668 bdi_arm_supers_timer();
1da177e4
LT
669 return 0;
670}
671
c2c4986e 672#ifdef CONFIG_BLOCK
31373d09 673void laptop_mode_timer_fn(unsigned long data)
1da177e4 674{
31373d09
MG
675 struct request_queue *q = (struct request_queue *)data;
676 int nr_pages = global_page_state(NR_FILE_DIRTY) +
677 global_page_state(NR_UNSTABLE_NFS);
1da177e4 678
31373d09
MG
679 /*
680 * We want to write everything out, not just down to the dirty
681 * threshold
682 */
31373d09 683 if (bdi_has_dirty_io(&q->backing_dev_info))
c5444198 684 bdi_start_writeback(&q->backing_dev_info, nr_pages);
1da177e4
LT
685}
686
687/*
688 * We've spun up the disk and we're in laptop mode: schedule writeback
689 * of all dirty data a few seconds from now. If the flush is already scheduled
690 * then push it back - the user is still using the disk.
691 */
31373d09 692void laptop_io_completion(struct backing_dev_info *info)
1da177e4 693{
31373d09 694 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
695}
696
697/*
698 * We're in laptop mode and we've just synced. The sync's writes will have
699 * caused another writeback to be scheduled by laptop_io_completion.
700 * Nothing needs to be written back anymore, so we unschedule the writeback.
701 */
702void laptop_sync_completion(void)
703{
31373d09
MG
704 struct backing_dev_info *bdi;
705
706 rcu_read_lock();
707
708 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
709 del_timer(&bdi->laptop_mode_wb_timer);
710
711 rcu_read_unlock();
1da177e4 712}
c2c4986e 713#endif
1da177e4
LT
714
715/*
716 * If ratelimit_pages is too high then we can get into dirty-data overload
717 * if a large number of processes all perform writes at the same time.
718 * If it is too low then SMP machines will call the (expensive)
719 * get_writeback_state too often.
720 *
721 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
722 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
723 * thresholds before writeback cuts in.
724 *
725 * But the limit should not be set too high. Because it also controls the
726 * amount of memory which the balance_dirty_pages() caller has to write back.
727 * If this is too large then the caller will block on the IO queue all the
728 * time. So limit it to four megabytes - the balance_dirty_pages() caller
729 * will write six megabyte chunks, max.
730 */
731
2d1d43f6 732void writeback_set_ratelimit(void)
1da177e4 733{
40c99aae 734 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
1da177e4
LT
735 if (ratelimit_pages < 16)
736 ratelimit_pages = 16;
737 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
738 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
739}
740
26c2143b 741static int __cpuinit
1da177e4
LT
742ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
743{
2d1d43f6 744 writeback_set_ratelimit();
aa0f0303 745 return NOTIFY_DONE;
1da177e4
LT
746}
747
74b85f37 748static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4
LT
749 .notifier_call = ratelimit_handler,
750 .next = NULL,
751};
752
753/*
dc6e29da
LT
754 * Called early on to tune the page writeback dirty limits.
755 *
756 * We used to scale dirty pages according to how total memory
757 * related to pages that could be allocated for buffers (by
758 * comparing nr_free_buffer_pages() to vm_total_pages.
759 *
760 * However, that was when we used "dirty_ratio" to scale with
761 * all memory, and we don't do that any more. "dirty_ratio"
762 * is now applied to total non-HIGHPAGE memory (by subtracting
763 * totalhigh_pages from vm_total_pages), and as such we can't
764 * get into the old insane situation any more where we had
765 * large amounts of dirty pages compared to a small amount of
766 * non-HIGHMEM memory.
767 *
768 * But we might still want to scale the dirty_ratio by how
769 * much memory the box has..
1da177e4
LT
770 */
771void __init page_writeback_init(void)
772{
04fbfdc1
PZ
773 int shift;
774
2d1d43f6 775 writeback_set_ratelimit();
1da177e4 776 register_cpu_notifier(&ratelimit_nb);
04fbfdc1
PZ
777
778 shift = calc_period_shift();
779 prop_descriptor_init(&vm_completions, shift);
3e26c149 780 prop_descriptor_init(&vm_dirties, shift);
1da177e4
LT
781}
782
f446daae
JK
783/**
784 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
785 * @mapping: address space structure to write
786 * @start: starting page index
787 * @end: ending page index (inclusive)
788 *
789 * This function scans the page range from @start to @end (inclusive) and tags
790 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
791 * that write_cache_pages (or whoever calls this function) will then use
792 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
793 * used to avoid livelocking of writeback by a process steadily creating new
794 * dirty pages in the file (thus it is important for this function to be quick
795 * so that it can tag pages faster than a dirtying process can create them).
796 */
797/*
798 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
799 */
f446daae
JK
800void tag_pages_for_writeback(struct address_space *mapping,
801 pgoff_t start, pgoff_t end)
802{
3c111a07 803#define WRITEBACK_TAG_BATCH 4096
f446daae
JK
804 unsigned long tagged;
805
806 do {
807 spin_lock_irq(&mapping->tree_lock);
808 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
809 &start, end, WRITEBACK_TAG_BATCH,
810 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
811 spin_unlock_irq(&mapping->tree_lock);
812 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
813 cond_resched();
814 } while (tagged >= WRITEBACK_TAG_BATCH);
815}
816EXPORT_SYMBOL(tag_pages_for_writeback);
817
811d736f 818/**
0ea97180 819 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f
DH
820 * @mapping: address space structure to write
821 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea97180
MS
822 * @writepage: function called for each page
823 * @data: data passed to writepage function
811d736f 824 *
0ea97180 825 * If a page is already under I/O, write_cache_pages() skips it, even
811d736f
DH
826 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
827 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
828 * and msync() need to guarantee that all the data which was dirty at the time
829 * the call was made get new I/O started against them. If wbc->sync_mode is
830 * WB_SYNC_ALL then we were called for data integrity and we must wait for
831 * existing IO to complete.
f446daae
JK
832 *
833 * To avoid livelocks (when other process dirties new pages), we first tag
834 * pages which should be written back with TOWRITE tag and only then start
835 * writing them. For data-integrity sync we have to be careful so that we do
836 * not miss some pages (e.g., because some other process has cleared TOWRITE
837 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
838 * by the process clearing the DIRTY tag (and submitting the page for IO).
811d736f 839 */
0ea97180
MS
840int write_cache_pages(struct address_space *mapping,
841 struct writeback_control *wbc, writepage_t writepage,
842 void *data)
811d736f 843{
811d736f
DH
844 int ret = 0;
845 int done = 0;
811d736f
DH
846 struct pagevec pvec;
847 int nr_pages;
31a12666 848 pgoff_t uninitialized_var(writeback_index);
811d736f
DH
849 pgoff_t index;
850 pgoff_t end; /* Inclusive */
bd19e012 851 pgoff_t done_index;
31a12666 852 int cycled;
811d736f 853 int range_whole = 0;
f446daae 854 int tag;
811d736f 855
811d736f
DH
856 pagevec_init(&pvec, 0);
857 if (wbc->range_cyclic) {
31a12666
NP
858 writeback_index = mapping->writeback_index; /* prev offset */
859 index = writeback_index;
860 if (index == 0)
861 cycled = 1;
862 else
863 cycled = 0;
811d736f
DH
864 end = -1;
865 } else {
866 index = wbc->range_start >> PAGE_CACHE_SHIFT;
867 end = wbc->range_end >> PAGE_CACHE_SHIFT;
868 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
869 range_whole = 1;
31a12666 870 cycled = 1; /* ignore range_cyclic tests */
811d736f 871 }
f446daae
JK
872 if (wbc->sync_mode == WB_SYNC_ALL)
873 tag = PAGECACHE_TAG_TOWRITE;
874 else
875 tag = PAGECACHE_TAG_DIRTY;
811d736f 876retry:
f446daae
JK
877 if (wbc->sync_mode == WB_SYNC_ALL)
878 tag_pages_for_writeback(mapping, index, end);
bd19e012 879 done_index = index;
5a3d5c98
NP
880 while (!done && (index <= end)) {
881 int i;
882
f446daae 883 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
5a3d5c98
NP
884 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
885 if (nr_pages == 0)
886 break;
811d736f 887
811d736f
DH
888 for (i = 0; i < nr_pages; i++) {
889 struct page *page = pvec.pages[i];
890
891 /*
d5482cdf
NP
892 * At this point, the page may be truncated or
893 * invalidated (changing page->mapping to NULL), or
894 * even swizzled back from swapper_space to tmpfs file
895 * mapping. However, page->index will not change
896 * because we have a reference on the page.
811d736f 897 */
d5482cdf
NP
898 if (page->index > end) {
899 /*
900 * can't be range_cyclic (1st pass) because
901 * end == -1 in that case.
902 */
903 done = 1;
904 break;
905 }
906
907 done_index = page->index + 1;
908
811d736f
DH
909 lock_page(page);
910
5a3d5c98
NP
911 /*
912 * Page truncated or invalidated. We can freely skip it
913 * then, even for data integrity operations: the page
914 * has disappeared concurrently, so there could be no
915 * real expectation of this data interity operation
916 * even if there is now a new, dirty page at the same
917 * pagecache address.
918 */
811d736f 919 if (unlikely(page->mapping != mapping)) {
5a3d5c98 920continue_unlock:
811d736f
DH
921 unlock_page(page);
922 continue;
923 }
924
515f4a03
NP
925 if (!PageDirty(page)) {
926 /* someone wrote it for us */
927 goto continue_unlock;
928 }
929
930 if (PageWriteback(page)) {
931 if (wbc->sync_mode != WB_SYNC_NONE)
932 wait_on_page_writeback(page);
933 else
934 goto continue_unlock;
935 }
811d736f 936
515f4a03
NP
937 BUG_ON(PageWriteback(page));
938 if (!clear_page_dirty_for_io(page))
5a3d5c98 939 goto continue_unlock;
811d736f 940
9e094383 941 trace_wbc_writepage(wbc, mapping->backing_dev_info);
0ea97180 942 ret = (*writepage)(page, wbc, data);
00266770
NP
943 if (unlikely(ret)) {
944 if (ret == AOP_WRITEPAGE_ACTIVATE) {
945 unlock_page(page);
946 ret = 0;
947 } else {
948 /*
949 * done_index is set past this page,
950 * so media errors will not choke
951 * background writeout for the entire
952 * file. This has consequences for
953 * range_cyclic semantics (ie. it may
954 * not be suitable for data integrity
955 * writeout).
956 */
957 done = 1;
958 break;
959 }
0b564927 960 }
00266770 961
0b564927
DC
962 if (wbc->nr_to_write > 0) {
963 if (--wbc->nr_to_write == 0 &&
89e12190
FC
964 wbc->sync_mode == WB_SYNC_NONE) {
965 /*
966 * We stop writing back only if we are
967 * not doing integrity sync. In case of
968 * integrity sync we have to keep going
969 * because someone may be concurrently
970 * dirtying pages, and we might have
971 * synced a lot of newly appeared dirty
972 * pages, but have not synced all of the
973 * old dirty pages.
974 */
975 done = 1;
976 break;
977 }
05fe478d 978 }
811d736f
DH
979 }
980 pagevec_release(&pvec);
981 cond_resched();
982 }
3a4c6800 983 if (!cycled && !done) {
811d736f 984 /*
31a12666 985 * range_cyclic:
811d736f
DH
986 * We hit the last page and there is more work to be done: wrap
987 * back to the start of the file
988 */
31a12666 989 cycled = 1;
811d736f 990 index = 0;
31a12666 991 end = writeback_index - 1;
811d736f
DH
992 goto retry;
993 }
0b564927
DC
994 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
995 mapping->writeback_index = done_index;
06d6cf69 996
811d736f
DH
997 return ret;
998}
0ea97180
MS
999EXPORT_SYMBOL(write_cache_pages);
1000
1001/*
1002 * Function used by generic_writepages to call the real writepage
1003 * function and set the mapping flags on error
1004 */
1005static int __writepage(struct page *page, struct writeback_control *wbc,
1006 void *data)
1007{
1008 struct address_space *mapping = data;
1009 int ret = mapping->a_ops->writepage(page, wbc);
1010 mapping_set_error(mapping, ret);
1011 return ret;
1012}
1013
1014/**
1015 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1016 * @mapping: address space structure to write
1017 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1018 *
1019 * This is a library function, which implements the writepages()
1020 * address_space_operation.
1021 */
1022int generic_writepages(struct address_space *mapping,
1023 struct writeback_control *wbc)
1024{
1025 /* deal with chardevs and other special file */
1026 if (!mapping->a_ops->writepage)
1027 return 0;
1028
1029 return write_cache_pages(mapping, wbc, __writepage, mapping);
1030}
811d736f
DH
1031
1032EXPORT_SYMBOL(generic_writepages);
1033
1da177e4
LT
1034int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1035{
22905f77
AM
1036 int ret;
1037
1da177e4
LT
1038 if (wbc->nr_to_write <= 0)
1039 return 0;
1040 if (mapping->a_ops->writepages)
d08b3851 1041 ret = mapping->a_ops->writepages(mapping, wbc);
22905f77
AM
1042 else
1043 ret = generic_writepages(mapping, wbc);
22905f77 1044 return ret;
1da177e4
LT
1045}
1046
1047/**
1048 * write_one_page - write out a single page and optionally wait on I/O
67be2dd1
MW
1049 * @page: the page to write
1050 * @wait: if true, wait on writeout
1da177e4
LT
1051 *
1052 * The page must be locked by the caller and will be unlocked upon return.
1053 *
1054 * write_one_page() returns a negative error code if I/O failed.
1055 */
1056int write_one_page(struct page *page, int wait)
1057{
1058 struct address_space *mapping = page->mapping;
1059 int ret = 0;
1060 struct writeback_control wbc = {
1061 .sync_mode = WB_SYNC_ALL,
1062 .nr_to_write = 1,
1063 };
1064
1065 BUG_ON(!PageLocked(page));
1066
1067 if (wait)
1068 wait_on_page_writeback(page);
1069
1070 if (clear_page_dirty_for_io(page)) {
1071 page_cache_get(page);
1072 ret = mapping->a_ops->writepage(page, &wbc);
1073 if (ret == 0 && wait) {
1074 wait_on_page_writeback(page);
1075 if (PageError(page))
1076 ret = -EIO;
1077 }
1078 page_cache_release(page);
1079 } else {
1080 unlock_page(page);
1081 }
1082 return ret;
1083}
1084EXPORT_SYMBOL(write_one_page);
1085
76719325
KC
1086/*
1087 * For address_spaces which do not use buffers nor write back.
1088 */
1089int __set_page_dirty_no_writeback(struct page *page)
1090{
1091 if (!PageDirty(page))
1092 SetPageDirty(page);
1093 return 0;
1094}
1095
e3a7cca1
ES
1096/*
1097 * Helper function for set_page_dirty family.
1098 * NOTE: This relies on being atomic wrt interrupts.
1099 */
1100void account_page_dirtied(struct page *page, struct address_space *mapping)
1101{
1102 if (mapping_cap_account_dirty(mapping)) {
1103 __inc_zone_page_state(page, NR_FILE_DIRTY);
1104 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1105 task_dirty_inc(current);
1106 task_io_account_write(PAGE_CACHE_SIZE);
1107 }
1108}
1109
1da177e4
LT
1110/*
1111 * For address_spaces which do not use buffers. Just tag the page as dirty in
1112 * its radix tree.
1113 *
1114 * This is also used when a single buffer is being dirtied: we want to set the
1115 * page dirty in that case, but not all the buffers. This is a "bottom-up"
1116 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1117 *
1118 * Most callers have locked the page, which pins the address_space in memory.
1119 * But zap_pte_range() does not lock the page, however in that case the
1120 * mapping is pinned by the vma's ->vm_file reference.
1121 *
1122 * We take care to handle the case where the page was truncated from the
183ff22b 1123 * mapping by re-checking page_mapping() inside tree_lock.
1da177e4
LT
1124 */
1125int __set_page_dirty_nobuffers(struct page *page)
1126{
1da177e4
LT
1127 if (!TestSetPageDirty(page)) {
1128 struct address_space *mapping = page_mapping(page);
1129 struct address_space *mapping2;
1130
8c08540f
AM
1131 if (!mapping)
1132 return 1;
1133
19fd6231 1134 spin_lock_irq(&mapping->tree_lock);
8c08540f
AM
1135 mapping2 = page_mapping(page);
1136 if (mapping2) { /* Race with truncate? */
1137 BUG_ON(mapping2 != mapping);
787d2214 1138 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
e3a7cca1 1139 account_page_dirtied(page, mapping);
8c08540f
AM
1140 radix_tree_tag_set(&mapping->page_tree,
1141 page_index(page), PAGECACHE_TAG_DIRTY);
1142 }
19fd6231 1143 spin_unlock_irq(&mapping->tree_lock);
8c08540f
AM
1144 if (mapping->host) {
1145 /* !PageAnon && !swapper_space */
1146 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4 1147 }
4741c9fd 1148 return 1;
1da177e4 1149 }
4741c9fd 1150 return 0;
1da177e4
LT
1151}
1152EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1153
1154/*
1155 * When a writepage implementation decides that it doesn't want to write this
1156 * page for some reason, it should redirty the locked page via
1157 * redirty_page_for_writepage() and it should then unlock the page and return 0
1158 */
1159int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1160{
1161 wbc->pages_skipped++;
1162 return __set_page_dirty_nobuffers(page);
1163}
1164EXPORT_SYMBOL(redirty_page_for_writepage);
1165
1166/*
6746aff7
WF
1167 * Dirty a page.
1168 *
1169 * For pages with a mapping this should be done under the page lock
1170 * for the benefit of asynchronous memory errors who prefer a consistent
1171 * dirty state. This rule can be broken in some special cases,
1172 * but should be better not to.
1173 *
1da177e4
LT
1174 * If the mapping doesn't provide a set_page_dirty a_op, then
1175 * just fall through and assume that it wants buffer_heads.
1176 */
1cf6e7d8 1177int set_page_dirty(struct page *page)
1da177e4
LT
1178{
1179 struct address_space *mapping = page_mapping(page);
1180
1181 if (likely(mapping)) {
1182 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
9361401e
DH
1183#ifdef CONFIG_BLOCK
1184 if (!spd)
1185 spd = __set_page_dirty_buffers;
1186#endif
1187 return (*spd)(page);
1da177e4 1188 }
4741c9fd
AM
1189 if (!PageDirty(page)) {
1190 if (!TestSetPageDirty(page))
1191 return 1;
1192 }
1da177e4
LT
1193 return 0;
1194}
1195EXPORT_SYMBOL(set_page_dirty);
1196
1197/*
1198 * set_page_dirty() is racy if the caller has no reference against
1199 * page->mapping->host, and if the page is unlocked. This is because another
1200 * CPU could truncate the page off the mapping and then free the mapping.
1201 *
1202 * Usually, the page _is_ locked, or the caller is a user-space process which
1203 * holds a reference on the inode by having an open file.
1204 *
1205 * In other cases, the page should be locked before running set_page_dirty().
1206 */
1207int set_page_dirty_lock(struct page *page)
1208{
1209 int ret;
1210
db37648c 1211 lock_page_nosync(page);
1da177e4
LT
1212 ret = set_page_dirty(page);
1213 unlock_page(page);
1214 return ret;
1215}
1216EXPORT_SYMBOL(set_page_dirty_lock);
1217
1da177e4
LT
1218/*
1219 * Clear a page's dirty flag, while caring for dirty memory accounting.
1220 * Returns true if the page was previously dirty.
1221 *
1222 * This is for preparing to put the page under writeout. We leave the page
1223 * tagged as dirty in the radix tree so that a concurrent write-for-sync
1224 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
1225 * implementation will run either set_page_writeback() or set_page_dirty(),
1226 * at which stage we bring the page's dirty flag and radix-tree dirty tag
1227 * back into sync.
1228 *
1229 * This incoherency between the page's dirty flag and radix-tree tag is
1230 * unfortunate, but it only exists while the page is locked.
1231 */
1232int clear_page_dirty_for_io(struct page *page)
1233{
1234 struct address_space *mapping = page_mapping(page);
1235
79352894
NP
1236 BUG_ON(!PageLocked(page));
1237
fe3cba17 1238 ClearPageReclaim(page);
7658cc28
LT
1239 if (mapping && mapping_cap_account_dirty(mapping)) {
1240 /*
1241 * Yes, Virginia, this is indeed insane.
1242 *
1243 * We use this sequence to make sure that
1244 * (a) we account for dirty stats properly
1245 * (b) we tell the low-level filesystem to
1246 * mark the whole page dirty if it was
1247 * dirty in a pagetable. Only to then
1248 * (c) clean the page again and return 1 to
1249 * cause the writeback.
1250 *
1251 * This way we avoid all nasty races with the
1252 * dirty bit in multiple places and clearing
1253 * them concurrently from different threads.
1254 *
1255 * Note! Normally the "set_page_dirty(page)"
1256 * has no effect on the actual dirty bit - since
1257 * that will already usually be set. But we
1258 * need the side effects, and it can help us
1259 * avoid races.
1260 *
1261 * We basically use the page "master dirty bit"
1262 * as a serialization point for all the different
1263 * threads doing their things.
7658cc28
LT
1264 */
1265 if (page_mkclean(page))
1266 set_page_dirty(page);
79352894
NP
1267 /*
1268 * We carefully synchronise fault handlers against
1269 * installing a dirty pte and marking the page dirty
1270 * at this point. We do this by having them hold the
1271 * page lock at some point after installing their
1272 * pte, but before marking the page dirty.
1273 * Pages are always locked coming in here, so we get
1274 * the desired exclusion. See mm/memory.c:do_wp_page()
1275 * for more comments.
1276 */
7658cc28 1277 if (TestClearPageDirty(page)) {
8c08540f 1278 dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e41
PZ
1279 dec_bdi_stat(mapping->backing_dev_info,
1280 BDI_RECLAIMABLE);
7658cc28 1281 return 1;
1da177e4 1282 }
7658cc28 1283 return 0;
1da177e4 1284 }
7658cc28 1285 return TestClearPageDirty(page);
1da177e4 1286}
58bb01a9 1287EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4
LT
1288
1289int test_clear_page_writeback(struct page *page)
1290{
1291 struct address_space *mapping = page_mapping(page);
1292 int ret;
1293
1294 if (mapping) {
69cb51d1 1295 struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4
LT
1296 unsigned long flags;
1297
19fd6231 1298 spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4 1299 ret = TestClearPageWriteback(page);
69cb51d1 1300 if (ret) {
1da177e4
LT
1301 radix_tree_tag_clear(&mapping->page_tree,
1302 page_index(page),
1303 PAGECACHE_TAG_WRITEBACK);
e4ad08fe 1304 if (bdi_cap_account_writeback(bdi)) {
69cb51d1 1305 __dec_bdi_stat(bdi, BDI_WRITEBACK);
04fbfdc1
PZ
1306 __bdi_writeout_inc(bdi);
1307 }
69cb51d1 1308 }
19fd6231 1309 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4
LT
1310 } else {
1311 ret = TestClearPageWriteback(page);
1312 }
d688abf5
AM
1313 if (ret)
1314 dec_zone_page_state(page, NR_WRITEBACK);
1da177e4
LT
1315 return ret;
1316}
1317
1318int test_set_page_writeback(struct page *page)
1319{
1320 struct address_space *mapping = page_mapping(page);
1321 int ret;
1322
1323 if (mapping) {
69cb51d1 1324 struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4
LT
1325 unsigned long flags;
1326
19fd6231 1327 spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4 1328 ret = TestSetPageWriteback(page);
69cb51d1 1329 if (!ret) {
1da177e4
LT
1330 radix_tree_tag_set(&mapping->page_tree,
1331 page_index(page),
1332 PAGECACHE_TAG_WRITEBACK);
e4ad08fe 1333 if (bdi_cap_account_writeback(bdi))
69cb51d1
PZ
1334 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1335 }
1da177e4
LT
1336 if (!PageDirty(page))
1337 radix_tree_tag_clear(&mapping->page_tree,
1338 page_index(page),
1339 PAGECACHE_TAG_DIRTY);
f446daae
JK
1340 radix_tree_tag_clear(&mapping->page_tree,
1341 page_index(page),
1342 PAGECACHE_TAG_TOWRITE);
19fd6231 1343 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4
LT
1344 } else {
1345 ret = TestSetPageWriteback(page);
1346 }
d688abf5
AM
1347 if (!ret)
1348 inc_zone_page_state(page, NR_WRITEBACK);
1da177e4
LT
1349 return ret;
1350
1351}
1352EXPORT_SYMBOL(test_set_page_writeback);
1353
1354/*
00128188 1355 * Return true if any of the pages in the mapping are marked with the
1da177e4
LT
1356 * passed tag.
1357 */
1358int mapping_tagged(struct address_space *mapping, int tag)
1359{
1da177e4 1360 int ret;
00128188 1361 rcu_read_lock();
1da177e4 1362 ret = radix_tree_tagged(&mapping->page_tree, tag);
00128188 1363 rcu_read_unlock();
1da177e4
LT
1364 return ret;
1365}
1366EXPORT_SYMBOL(mapping_tagged);