]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/fs-writeback.c
writeback: implement backing_dev_info->tot_write_bandwidth
[mirror_ubuntu-artful-kernel.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
630d9c47 17#include <linux/export.h>
1da177e4 18#include <linux/spinlock.h>
5a0e3ad6 19#include <linux/slab.h>
1da177e4
LT
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
bc31b86a 23#include <linux/pagemap.h>
03ba3782 24#include <linux/kthread.h>
1da177e4
LT
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
455b2864 28#include <linux/tracepoint.h>
719ea2fb 29#include <linux/device.h>
07f3f05c 30#include "internal.h"
1da177e4 31
bc31b86a
WF
32/*
33 * 4MB minimal write chunk size
34 */
35#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
36
c4a77a6c
JA
37/*
38 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
83ba7b07 40struct wb_writeback_work {
c4a77a6c
JA
41 long nr_pages;
42 struct super_block *sb;
0dc83bd3 43 unsigned long *older_than_this;
c4a77a6c 44 enum writeback_sync_modes sync_mode;
6e6938b6 45 unsigned int tagged_writepages:1;
52957fe1
HS
46 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1;
48 unsigned int for_background:1;
7747bd4b 49 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
0e175a18 50 enum wb_reason reason; /* why was writeback initiated? */
c4a77a6c 51
8010c3b6 52 struct list_head list; /* pending work list */
83ba7b07 53 struct completion *done; /* set if the caller waits */
03ba3782
JA
54};
55
a2f48706
TT
56/*
57 * If an inode is constantly having its pages dirtied, but then the
58 * updates stop dirtytime_expire_interval seconds in the past, it's
59 * possible for the worst case time between when an inode has its
60 * timestamps updated and when they finally get written out to be two
61 * dirtytime_expire_intervals. We set the default to 12 hours (in
62 * seconds), which means most of the time inodes will have their
63 * timestamps written to disk after 12 hours, but in the worst case a
64 * few inodes might not their timestamps updated for 24 hours.
65 */
66unsigned int dirtytime_expire_interval = 12 * 60 * 60;
67
f11b00f3
AB
68/**
69 * writeback_in_progress - determine whether there is writeback in progress
70 * @bdi: the device's backing_dev_info structure.
71 *
03ba3782
JA
72 * Determine whether there is writeback waiting to be handled against a
73 * backing device.
f11b00f3
AB
74 */
75int writeback_in_progress(struct backing_dev_info *bdi)
76{
4452226e 77 return test_bit(WB_writeback_running, &bdi->wb.state);
f11b00f3 78}
00d4e736 79EXPORT_SYMBOL(writeback_in_progress);
f11b00f3 80
7ccf19a8
NP
81static inline struct inode *wb_inode(struct list_head *head)
82{
83 return list_entry(head, struct inode, i_wb_list);
84}
85
15eb77a0
WF
86/*
87 * Include the creation of the trace points after defining the
88 * wb_writeback_work structure and inline functions so that the definition
89 * remains local to this file.
90 */
91#define CREATE_TRACE_POINTS
92#include <trace/events/writeback.h>
93
774016b2
SW
94EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
95
d6c10f1f
TH
96static bool wb_io_lists_populated(struct bdi_writeback *wb)
97{
98 if (wb_has_dirty_io(wb)) {
99 return false;
100 } else {
101 set_bit(WB_has_dirty_io, &wb->state);
766a9d6e
TH
102 atomic_long_add(wb->avg_write_bandwidth,
103 &wb->bdi->tot_write_bandwidth);
d6c10f1f
TH
104 return true;
105 }
106}
107
108static void wb_io_lists_depopulated(struct bdi_writeback *wb)
109{
110 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
766a9d6e 111 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
d6c10f1f 112 clear_bit(WB_has_dirty_io, &wb->state);
766a9d6e
TH
113 atomic_long_sub(wb->avg_write_bandwidth,
114 &wb->bdi->tot_write_bandwidth);
115 }
d6c10f1f
TH
116}
117
118/**
119 * inode_wb_list_move_locked - move an inode onto a bdi_writeback IO list
120 * @inode: inode to be moved
121 * @wb: target bdi_writeback
122 * @head: one of @wb->b_{dirty|io|more_io}
123 *
124 * Move @inode->i_wb_list to @list of @wb and set %WB_has_dirty_io.
125 * Returns %true if @inode is the first occupant of the !dirty_time IO
126 * lists; otherwise, %false.
127 */
128static bool inode_wb_list_move_locked(struct inode *inode,
129 struct bdi_writeback *wb,
130 struct list_head *head)
131{
132 assert_spin_locked(&wb->list_lock);
133
134 list_move(&inode->i_wb_list, head);
135
136 /* dirty_time doesn't count as dirty_io until expiration */
137 if (head != &wb->b_dirty_time)
138 return wb_io_lists_populated(wb);
139
140 wb_io_lists_depopulated(wb);
141 return false;
142}
143
144/**
145 * inode_wb_list_del_locked - remove an inode from its bdi_writeback IO list
146 * @inode: inode to be removed
147 * @wb: bdi_writeback @inode is being removed from
148 *
149 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
150 * clear %WB_has_dirty_io if all are empty afterwards.
151 */
152static void inode_wb_list_del_locked(struct inode *inode,
153 struct bdi_writeback *wb)
154{
155 assert_spin_locked(&wb->list_lock);
156
157 list_del_init(&inode->i_wb_list);
158 wb_io_lists_depopulated(wb);
159}
160
f0054bb1 161static void wb_wakeup(struct bdi_writeback *wb)
5acda9d1 162{
f0054bb1
TH
163 spin_lock_bh(&wb->work_lock);
164 if (test_bit(WB_registered, &wb->state))
165 mod_delayed_work(bdi_wq, &wb->dwork, 0);
166 spin_unlock_bh(&wb->work_lock);
5acda9d1
JK
167}
168
f0054bb1
TH
169static void wb_queue_work(struct bdi_writeback *wb,
170 struct wb_writeback_work *work)
6585027a 171{
f0054bb1 172 trace_writeback_queue(wb->bdi, work);
6585027a 173
f0054bb1
TH
174 spin_lock_bh(&wb->work_lock);
175 if (!test_bit(WB_registered, &wb->state)) {
5acda9d1
JK
176 if (work->done)
177 complete(work->done);
178 goto out_unlock;
179 }
f0054bb1
TH
180 list_add_tail(&work->list, &wb->work_list);
181 mod_delayed_work(bdi_wq, &wb->dwork, 0);
5acda9d1 182out_unlock:
f0054bb1 183 spin_unlock_bh(&wb->work_lock);
1da177e4
LT
184}
185
f0054bb1
TH
186static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
187 bool range_cyclic, enum wb_reason reason)
1da177e4 188{
83ba7b07 189 struct wb_writeback_work *work;
03ba3782 190
bcddc3f0
JA
191 /*
192 * This is WB_SYNC_NONE writeback, so if allocation fails just
193 * wakeup the thread for old dirty data writeback
194 */
83ba7b07
CH
195 work = kzalloc(sizeof(*work), GFP_ATOMIC);
196 if (!work) {
f0054bb1
TH
197 trace_writeback_nowork(wb->bdi);
198 wb_wakeup(wb);
83ba7b07 199 return;
bcddc3f0 200 }
03ba3782 201
83ba7b07
CH
202 work->sync_mode = WB_SYNC_NONE;
203 work->nr_pages = nr_pages;
204 work->range_cyclic = range_cyclic;
0e175a18 205 work->reason = reason;
03ba3782 206
f0054bb1 207 wb_queue_work(wb, work);
b6e51316
JA
208}
209
703c2708
TH
210#ifdef CONFIG_CGROUP_WRITEBACK
211
212/**
213 * inode_congested - test whether an inode is congested
214 * @inode: inode to test for congestion
215 * @cong_bits: mask of WB_[a]sync_congested bits to test
216 *
217 * Tests whether @inode is congested. @cong_bits is the mask of congestion
218 * bits to test and the return value is the mask of set bits.
219 *
220 * If cgroup writeback is enabled for @inode, the congestion state is
221 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
222 * associated with @inode is congested; otherwise, the root wb's congestion
223 * state is used.
224 */
225int inode_congested(struct inode *inode, int cong_bits)
226{
227 if (inode) {
228 struct bdi_writeback *wb = inode_to_wb(inode);
229 if (wb)
230 return wb_congested(wb, cong_bits);
231 }
232
233 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
234}
235EXPORT_SYMBOL_GPL(inode_congested);
236
237#endif /* CONFIG_CGROUP_WRITEBACK */
238
b6e51316
JA
239/**
240 * bdi_start_writeback - start writeback
241 * @bdi: the backing device to write from
242 * @nr_pages: the number of pages to write
786228ab 243 * @reason: reason why some writeback work was initiated
b6e51316
JA
244 *
245 * Description:
246 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
25985edc 247 * started when this function returns, we make no guarantees on
0e3c9a22 248 * completion. Caller need not hold sb s_umount semaphore.
b6e51316
JA
249 *
250 */
0e175a18
CW
251void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
252 enum wb_reason reason)
b6e51316 253{
f0054bb1 254 __wb_start_writeback(&bdi->wb, nr_pages, true, reason);
c5444198 255}
d3ddec76 256
c5444198
CH
257/**
258 * bdi_start_background_writeback - start background writeback
259 * @bdi: the backing device to write from
260 *
261 * Description:
6585027a
JK
262 * This makes sure WB_SYNC_NONE background writeback happens. When
263 * this function returns, it is only guaranteed that for given BDI
264 * some IO is happening if we are over background dirty threshold.
265 * Caller need not hold sb s_umount semaphore.
c5444198
CH
266 */
267void bdi_start_background_writeback(struct backing_dev_info *bdi)
268{
6585027a
JK
269 /*
270 * We just wake up the flusher thread. It will perform background
271 * writeback as soon as there is no other work to do.
272 */
71927e84 273 trace_writeback_wake_background(bdi);
f0054bb1 274 wb_wakeup(&bdi->wb);
1da177e4
LT
275}
276
a66979ab
DC
277/*
278 * Remove the inode from the writeback list it is on.
279 */
280void inode_wb_list_del(struct inode *inode)
281{
52ebea74 282 struct bdi_writeback *wb = inode_to_wb(inode);
f758eeab 283
52ebea74 284 spin_lock(&wb->list_lock);
d6c10f1f 285 inode_wb_list_del_locked(inode, wb);
52ebea74 286 spin_unlock(&wb->list_lock);
a66979ab
DC
287}
288
6610a0bc
AM
289/*
290 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
291 * furthest end of its superblock's dirty-inode list.
292 *
293 * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2 294 * already the most-recently-dirtied inode on the b_dirty list. If that is
6610a0bc
AM
295 * the case then the inode must have been redirtied while it was being written
296 * out and we don't reset its dirtied_when.
297 */
f758eeab 298static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6610a0bc 299{
03ba3782 300 if (!list_empty(&wb->b_dirty)) {
66f3b8e2 301 struct inode *tail;
6610a0bc 302
7ccf19a8 303 tail = wb_inode(wb->b_dirty.next);
66f3b8e2 304 if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc
AM
305 inode->dirtied_when = jiffies;
306 }
d6c10f1f 307 inode_wb_list_move_locked(inode, wb, &wb->b_dirty);
6610a0bc
AM
308}
309
c986d1e2 310/*
66f3b8e2 311 * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2 312 */
f758eeab 313static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
c986d1e2 314{
d6c10f1f 315 inode_wb_list_move_locked(inode, wb, &wb->b_more_io);
c986d1e2
AM
316}
317
1c0eeaf5
JE
318static void inode_sync_complete(struct inode *inode)
319{
365b94ae 320 inode->i_state &= ~I_SYNC;
4eff96dd
JK
321 /* If inode is clean an unused, put it into LRU now... */
322 inode_add_lru(inode);
365b94ae 323 /* Waiters must see I_SYNC cleared before being woken up */
1c0eeaf5
JE
324 smp_mb();
325 wake_up_bit(&inode->i_state, __I_SYNC);
326}
327
d2caa3c5
JL
328static bool inode_dirtied_after(struct inode *inode, unsigned long t)
329{
330 bool ret = time_after(inode->dirtied_when, t);
331#ifndef CONFIG_64BIT
332 /*
333 * For inodes being constantly redirtied, dirtied_when can get stuck.
334 * It _appears_ to be in the future, but is actually in distant past.
335 * This test is necessary to prevent such wrapped-around relative times
5b0830cb 336 * from permanently stopping the whole bdi writeback.
d2caa3c5
JL
337 */
338 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
339#endif
340 return ret;
341}
342
0ae45f63
TT
343#define EXPIRE_DIRTY_ATIME 0x0001
344
2c136579 345/*
0e2f2b23 346 * Move expired (dirtied before work->older_than_this) dirty inodes from
697e6fed 347 * @delaying_queue to @dispatch_queue.
2c136579 348 */
e84d0a4f 349static int move_expired_inodes(struct list_head *delaying_queue,
2c136579 350 struct list_head *dispatch_queue,
0ae45f63 351 int flags,
ad4e38dd 352 struct wb_writeback_work *work)
2c136579 353{
0ae45f63
TT
354 unsigned long *older_than_this = NULL;
355 unsigned long expire_time;
5c03449d
SL
356 LIST_HEAD(tmp);
357 struct list_head *pos, *node;
cf137307 358 struct super_block *sb = NULL;
5c03449d 359 struct inode *inode;
cf137307 360 int do_sb_sort = 0;
e84d0a4f 361 int moved = 0;
5c03449d 362
0ae45f63
TT
363 if ((flags & EXPIRE_DIRTY_ATIME) == 0)
364 older_than_this = work->older_than_this;
a2f48706
TT
365 else if (!work->for_sync) {
366 expire_time = jiffies - (dirtytime_expire_interval * HZ);
0ae45f63
TT
367 older_than_this = &expire_time;
368 }
2c136579 369 while (!list_empty(delaying_queue)) {
7ccf19a8 370 inode = wb_inode(delaying_queue->prev);
0ae45f63
TT
371 if (older_than_this &&
372 inode_dirtied_after(inode, *older_than_this))
2c136579 373 break;
a8855990
JK
374 list_move(&inode->i_wb_list, &tmp);
375 moved++;
0ae45f63
TT
376 if (flags & EXPIRE_DIRTY_ATIME)
377 set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
a8855990
JK
378 if (sb_is_blkdev_sb(inode->i_sb))
379 continue;
cf137307
JA
380 if (sb && sb != inode->i_sb)
381 do_sb_sort = 1;
382 sb = inode->i_sb;
5c03449d
SL
383 }
384
cf137307
JA
385 /* just one sb in list, splice to dispatch_queue and we're done */
386 if (!do_sb_sort) {
387 list_splice(&tmp, dispatch_queue);
e84d0a4f 388 goto out;
cf137307
JA
389 }
390
5c03449d
SL
391 /* Move inodes from one superblock together */
392 while (!list_empty(&tmp)) {
7ccf19a8 393 sb = wb_inode(tmp.prev)->i_sb;
5c03449d 394 list_for_each_prev_safe(pos, node, &tmp) {
7ccf19a8 395 inode = wb_inode(pos);
5c03449d 396 if (inode->i_sb == sb)
7ccf19a8 397 list_move(&inode->i_wb_list, dispatch_queue);
5c03449d 398 }
2c136579 399 }
e84d0a4f
WF
400out:
401 return moved;
2c136579
FW
402}
403
404/*
405 * Queue all expired dirty inodes for io, eldest first.
4ea879b9
WF
406 * Before
407 * newly dirtied b_dirty b_io b_more_io
408 * =============> gf edc BA
409 * After
410 * newly dirtied b_dirty b_io b_more_io
411 * =============> g fBAedc
412 * |
413 * +--> dequeue for IO
2c136579 414 */
ad4e38dd 415static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
66f3b8e2 416{
e84d0a4f 417 int moved;
0ae45f63 418
f758eeab 419 assert_spin_locked(&wb->list_lock);
4ea879b9 420 list_splice_init(&wb->b_more_io, &wb->b_io);
0ae45f63
TT
421 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
422 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
423 EXPIRE_DIRTY_ATIME, work);
d6c10f1f
TH
424 if (moved)
425 wb_io_lists_populated(wb);
ad4e38dd 426 trace_writeback_queue_io(wb, work, moved);
66f3b8e2
JA
427}
428
a9185b41 429static int write_inode(struct inode *inode, struct writeback_control *wbc)
08d8e974 430{
9fb0a7da
TH
431 int ret;
432
433 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
434 trace_writeback_write_inode_start(inode, wbc);
435 ret = inode->i_sb->s_op->write_inode(inode, wbc);
436 trace_writeback_write_inode(inode, wbc);
437 return ret;
438 }
03ba3782 439 return 0;
08d8e974 440}
08d8e974 441
1da177e4 442/*
169ebd90
JK
443 * Wait for writeback on an inode to complete. Called with i_lock held.
444 * Caller must make sure inode cannot go away when we drop i_lock.
01c03194 445 */
169ebd90
JK
446static void __inode_wait_for_writeback(struct inode *inode)
447 __releases(inode->i_lock)
448 __acquires(inode->i_lock)
01c03194
CH
449{
450 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
451 wait_queue_head_t *wqh;
452
453 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
250df6ed
DC
454 while (inode->i_state & I_SYNC) {
455 spin_unlock(&inode->i_lock);
74316201
N
456 __wait_on_bit(wqh, &wq, bit_wait,
457 TASK_UNINTERRUPTIBLE);
250df6ed 458 spin_lock(&inode->i_lock);
58a9d3d8 459 }
01c03194
CH
460}
461
169ebd90
JK
462/*
463 * Wait for writeback on an inode to complete. Caller must have inode pinned.
464 */
465void inode_wait_for_writeback(struct inode *inode)
466{
467 spin_lock(&inode->i_lock);
468 __inode_wait_for_writeback(inode);
469 spin_unlock(&inode->i_lock);
470}
471
472/*
473 * Sleep until I_SYNC is cleared. This function must be called with i_lock
474 * held and drops it. It is aimed for callers not holding any inode reference
475 * so once i_lock is dropped, inode can go away.
476 */
477static void inode_sleep_on_writeback(struct inode *inode)
478 __releases(inode->i_lock)
479{
480 DEFINE_WAIT(wait);
481 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
482 int sleep;
483
484 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
485 sleep = inode->i_state & I_SYNC;
486 spin_unlock(&inode->i_lock);
487 if (sleep)
488 schedule();
489 finish_wait(wqh, &wait);
490}
491
ccb26b5a
JK
492/*
493 * Find proper writeback list for the inode depending on its current state and
494 * possibly also change of its state while we were doing writeback. Here we
495 * handle things such as livelock prevention or fairness of writeback among
496 * inodes. This function can be called only by flusher thread - noone else
497 * processes all inodes in writeback lists and requeueing inodes behind flusher
498 * thread's back can have unexpected consequences.
499 */
500static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
501 struct writeback_control *wbc)
502{
503 if (inode->i_state & I_FREEING)
504 return;
505
506 /*
507 * Sync livelock prevention. Each inode is tagged and synced in one
508 * shot. If still dirty, it will be redirty_tail()'ed below. Update
509 * the dirty time to prevent enqueue and sync it again.
510 */
511 if ((inode->i_state & I_DIRTY) &&
512 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
513 inode->dirtied_when = jiffies;
514
4f8ad655
JK
515 if (wbc->pages_skipped) {
516 /*
517 * writeback is not making progress due to locked
518 * buffers. Skip this inode for now.
519 */
520 redirty_tail(inode, wb);
521 return;
522 }
523
ccb26b5a
JK
524 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
525 /*
526 * We didn't write back all the pages. nfs_writepages()
527 * sometimes bales out without doing anything.
528 */
529 if (wbc->nr_to_write <= 0) {
530 /* Slice used up. Queue for next turn. */
531 requeue_io(inode, wb);
532 } else {
533 /*
534 * Writeback blocked by something other than
535 * congestion. Delay the inode for some time to
536 * avoid spinning on the CPU (100% iowait)
537 * retrying writeback of the dirty page/inode
538 * that cannot be performed immediately.
539 */
540 redirty_tail(inode, wb);
541 }
542 } else if (inode->i_state & I_DIRTY) {
543 /*
544 * Filesystems can dirty the inode during writeback operations,
545 * such as delayed allocation during submission or metadata
546 * updates after data IO completion.
547 */
548 redirty_tail(inode, wb);
0ae45f63 549 } else if (inode->i_state & I_DIRTY_TIME) {
a2f48706 550 inode->dirtied_when = jiffies;
d6c10f1f 551 inode_wb_list_move_locked(inode, wb, &wb->b_dirty_time);
ccb26b5a
JK
552 } else {
553 /* The inode is clean. Remove from writeback lists. */
d6c10f1f 554 inode_wb_list_del_locked(inode, wb);
ccb26b5a
JK
555 }
556}
557
01c03194 558/*
4f8ad655
JK
559 * Write out an inode and its dirty pages. Do not update the writeback list
560 * linkage. That is left to the caller. The caller is also responsible for
561 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1da177e4
LT
562 */
563static int
cd8ed2a4 564__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4 565{
1da177e4 566 struct address_space *mapping = inode->i_mapping;
251d6a47 567 long nr_to_write = wbc->nr_to_write;
01c03194 568 unsigned dirty;
1da177e4
LT
569 int ret;
570
4f8ad655 571 WARN_ON(!(inode->i_state & I_SYNC));
1da177e4 572
9fb0a7da
TH
573 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
574
1da177e4
LT
575 ret = do_writepages(mapping, wbc);
576
26821ed4
CH
577 /*
578 * Make sure to wait on the data before writing out the metadata.
579 * This is important for filesystems that modify metadata on data
7747bd4b
DC
580 * I/O completion. We don't do it for sync(2) writeback because it has a
581 * separate, external IO completion path and ->sync_fs for guaranteeing
582 * inode metadata is written back correctly.
26821ed4 583 */
7747bd4b 584 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
26821ed4 585 int err = filemap_fdatawait(mapping);
1da177e4
LT
586 if (ret == 0)
587 ret = err;
588 }
589
5547e8aa
DM
590 /*
591 * Some filesystems may redirty the inode during the writeback
592 * due to delalloc, clear dirty metadata flags right before
593 * write_inode()
594 */
250df6ed 595 spin_lock(&inode->i_lock);
9c6ac78e 596
5547e8aa 597 dirty = inode->i_state & I_DIRTY;
a2f48706
TT
598 if (inode->i_state & I_DIRTY_TIME) {
599 if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
600 unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
601 unlikely(time_after(jiffies,
602 (inode->dirtied_time_when +
603 dirtytime_expire_interval * HZ)))) {
604 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
605 trace_writeback_lazytime(inode);
606 }
607 } else
608 inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
0ae45f63 609 inode->i_state &= ~dirty;
9c6ac78e
TH
610
611 /*
612 * Paired with smp_mb() in __mark_inode_dirty(). This allows
613 * __mark_inode_dirty() to test i_state without grabbing i_lock -
614 * either they see the I_DIRTY bits cleared or we see the dirtied
615 * inode.
616 *
617 * I_DIRTY_PAGES is always cleared together above even if @mapping
618 * still has dirty pages. The flag is reinstated after smp_mb() if
619 * necessary. This guarantees that either __mark_inode_dirty()
620 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
621 */
622 smp_mb();
623
624 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
625 inode->i_state |= I_DIRTY_PAGES;
626
250df6ed 627 spin_unlock(&inode->i_lock);
9c6ac78e 628
0ae45f63
TT
629 if (dirty & I_DIRTY_TIME)
630 mark_inode_dirty_sync(inode);
26821ed4 631 /* Don't write the inode if only I_DIRTY_PAGES was set */
0ae45f63 632 if (dirty & ~I_DIRTY_PAGES) {
a9185b41 633 int err = write_inode(inode, wbc);
1da177e4
LT
634 if (ret == 0)
635 ret = err;
636 }
4f8ad655
JK
637 trace_writeback_single_inode(inode, wbc, nr_to_write);
638 return ret;
639}
640
641/*
642 * Write out an inode's dirty pages. Either the caller has an active reference
643 * on the inode or the inode has I_WILL_FREE set.
644 *
645 * This function is designed to be called for writing back one inode which
646 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
647 * and does more profound writeback list handling in writeback_sb_inodes().
648 */
649static int
650writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
651 struct writeback_control *wbc)
652{
653 int ret = 0;
654
655 spin_lock(&inode->i_lock);
656 if (!atomic_read(&inode->i_count))
657 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
658 else
659 WARN_ON(inode->i_state & I_WILL_FREE);
660
661 if (inode->i_state & I_SYNC) {
662 if (wbc->sync_mode != WB_SYNC_ALL)
663 goto out;
664 /*
169ebd90
JK
665 * It's a data-integrity sync. We must wait. Since callers hold
666 * inode reference or inode has I_WILL_FREE set, it cannot go
667 * away under us.
4f8ad655 668 */
169ebd90 669 __inode_wait_for_writeback(inode);
4f8ad655
JK
670 }
671 WARN_ON(inode->i_state & I_SYNC);
672 /*
f9b0e058
JK
673 * Skip inode if it is clean and we have no outstanding writeback in
674 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
675 * function since flusher thread may be doing for example sync in
676 * parallel and if we move the inode, it could get skipped. So here we
677 * make sure inode is on some writeback list and leave it there unless
678 * we have completely cleaned the inode.
4f8ad655 679 */
0ae45f63 680 if (!(inode->i_state & I_DIRTY_ALL) &&
f9b0e058
JK
681 (wbc->sync_mode != WB_SYNC_ALL ||
682 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
4f8ad655
JK
683 goto out;
684 inode->i_state |= I_SYNC;
685 spin_unlock(&inode->i_lock);
686
cd8ed2a4 687 ret = __writeback_single_inode(inode, wbc);
1da177e4 688
f758eeab 689 spin_lock(&wb->list_lock);
250df6ed 690 spin_lock(&inode->i_lock);
4f8ad655
JK
691 /*
692 * If inode is clean, remove it from writeback lists. Otherwise don't
693 * touch it. See comment above for explanation.
694 */
0ae45f63 695 if (!(inode->i_state & I_DIRTY_ALL))
d6c10f1f 696 inode_wb_list_del_locked(inode, wb);
4f8ad655 697 spin_unlock(&wb->list_lock);
1c0eeaf5 698 inode_sync_complete(inode);
4f8ad655
JK
699out:
700 spin_unlock(&inode->i_lock);
1da177e4
LT
701 return ret;
702}
703
a88a341a 704static long writeback_chunk_size(struct bdi_writeback *wb,
1a12d8bd 705 struct wb_writeback_work *work)
d46db3d5
WF
706{
707 long pages;
708
709 /*
710 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
711 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
712 * here avoids calling into writeback_inodes_wb() more than once.
713 *
714 * The intended call sequence for WB_SYNC_ALL writeback is:
715 *
716 * wb_writeback()
717 * writeback_sb_inodes() <== called only once
718 * write_cache_pages() <== called once for each inode
719 * (quickly) tag currently dirty pages
720 * (maybe slowly) sync all tagged pages
721 */
722 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
723 pages = LONG_MAX;
1a12d8bd 724 else {
a88a341a 725 pages = min(wb->avg_write_bandwidth / 2,
1a12d8bd
WF
726 global_dirty_limit / DIRTY_SCOPE);
727 pages = min(pages, work->nr_pages);
728 pages = round_down(pages + MIN_WRITEBACK_PAGES,
729 MIN_WRITEBACK_PAGES);
730 }
d46db3d5
WF
731
732 return pages;
733}
734
f11c9c5c
ES
735/*
736 * Write a portion of b_io inodes which belong to @sb.
edadfb10 737 *
d46db3d5 738 * Return the number of pages and/or inodes written.
f11c9c5c 739 */
d46db3d5
WF
740static long writeback_sb_inodes(struct super_block *sb,
741 struct bdi_writeback *wb,
742 struct wb_writeback_work *work)
1da177e4 743{
d46db3d5
WF
744 struct writeback_control wbc = {
745 .sync_mode = work->sync_mode,
746 .tagged_writepages = work->tagged_writepages,
747 .for_kupdate = work->for_kupdate,
748 .for_background = work->for_background,
7747bd4b 749 .for_sync = work->for_sync,
d46db3d5
WF
750 .range_cyclic = work->range_cyclic,
751 .range_start = 0,
752 .range_end = LLONG_MAX,
753 };
754 unsigned long start_time = jiffies;
755 long write_chunk;
756 long wrote = 0; /* count both pages and inodes */
757
03ba3782 758 while (!list_empty(&wb->b_io)) {
7ccf19a8 759 struct inode *inode = wb_inode(wb->b_io.prev);
edadfb10
CH
760
761 if (inode->i_sb != sb) {
d46db3d5 762 if (work->sb) {
edadfb10
CH
763 /*
764 * We only want to write back data for this
765 * superblock, move all inodes not belonging
766 * to it back onto the dirty list.
767 */
f758eeab 768 redirty_tail(inode, wb);
edadfb10
CH
769 continue;
770 }
771
772 /*
773 * The inode belongs to a different superblock.
774 * Bounce back to the caller to unpin this and
775 * pin the next superblock.
776 */
d46db3d5 777 break;
edadfb10
CH
778 }
779
9843b76a 780 /*
331cbdee
WL
781 * Don't bother with new inodes or inodes being freed, first
782 * kind does not need periodic writeout yet, and for the latter
9843b76a
CH
783 * kind writeout is handled by the freer.
784 */
250df6ed 785 spin_lock(&inode->i_lock);
9843b76a 786 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
250df6ed 787 spin_unlock(&inode->i_lock);
fcc5c222 788 redirty_tail(inode, wb);
7ef0d737
NP
789 continue;
790 }
cc1676d9
JK
791 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
792 /*
793 * If this inode is locked for writeback and we are not
794 * doing writeback-for-data-integrity, move it to
795 * b_more_io so that writeback can proceed with the
796 * other inodes on s_io.
797 *
798 * We'll have another go at writing back this inode
799 * when we completed a full scan of b_io.
800 */
801 spin_unlock(&inode->i_lock);
802 requeue_io(inode, wb);
803 trace_writeback_sb_inodes_requeue(inode);
804 continue;
805 }
f0d07b7f
JK
806 spin_unlock(&wb->list_lock);
807
4f8ad655
JK
808 /*
809 * We already requeued the inode if it had I_SYNC set and we
810 * are doing WB_SYNC_NONE writeback. So this catches only the
811 * WB_SYNC_ALL case.
812 */
169ebd90
JK
813 if (inode->i_state & I_SYNC) {
814 /* Wait for I_SYNC. This function drops i_lock... */
815 inode_sleep_on_writeback(inode);
816 /* Inode may be gone, start again */
ead188f9 817 spin_lock(&wb->list_lock);
169ebd90
JK
818 continue;
819 }
4f8ad655
JK
820 inode->i_state |= I_SYNC;
821 spin_unlock(&inode->i_lock);
169ebd90 822
a88a341a 823 write_chunk = writeback_chunk_size(wb, work);
d46db3d5
WF
824 wbc.nr_to_write = write_chunk;
825 wbc.pages_skipped = 0;
250df6ed 826
169ebd90
JK
827 /*
828 * We use I_SYNC to pin the inode in memory. While it is set
829 * evict_inode() will wait so the inode cannot be freed.
830 */
cd8ed2a4 831 __writeback_single_inode(inode, &wbc);
250df6ed 832
d46db3d5
WF
833 work->nr_pages -= write_chunk - wbc.nr_to_write;
834 wrote += write_chunk - wbc.nr_to_write;
4f8ad655
JK
835 spin_lock(&wb->list_lock);
836 spin_lock(&inode->i_lock);
0ae45f63 837 if (!(inode->i_state & I_DIRTY_ALL))
d46db3d5 838 wrote++;
4f8ad655
JK
839 requeue_inode(inode, wb, &wbc);
840 inode_sync_complete(inode);
0f1b1fd8 841 spin_unlock(&inode->i_lock);
169ebd90 842 cond_resched_lock(&wb->list_lock);
d46db3d5
WF
843 /*
844 * bail out to wb_writeback() often enough to check
845 * background threshold and other termination conditions.
846 */
847 if (wrote) {
848 if (time_is_before_jiffies(start_time + HZ / 10UL))
849 break;
850 if (work->nr_pages <= 0)
851 break;
8bc3be27 852 }
1da177e4 853 }
d46db3d5 854 return wrote;
f11c9c5c
ES
855}
856
d46db3d5
WF
857static long __writeback_inodes_wb(struct bdi_writeback *wb,
858 struct wb_writeback_work *work)
f11c9c5c 859{
d46db3d5
WF
860 unsigned long start_time = jiffies;
861 long wrote = 0;
38f21977 862
f11c9c5c 863 while (!list_empty(&wb->b_io)) {
7ccf19a8 864 struct inode *inode = wb_inode(wb->b_io.prev);
f11c9c5c 865 struct super_block *sb = inode->i_sb;
9ecc2738 866
eb6ef3df 867 if (!trylock_super(sb)) {
0e995816 868 /*
eb6ef3df 869 * trylock_super() may fail consistently due to
0e995816
WF
870 * s_umount being grabbed by someone else. Don't use
871 * requeue_io() to avoid busy retrying the inode/sb.
872 */
873 redirty_tail(inode, wb);
edadfb10 874 continue;
f11c9c5c 875 }
d46db3d5 876 wrote += writeback_sb_inodes(sb, wb, work);
eb6ef3df 877 up_read(&sb->s_umount);
f11c9c5c 878
d46db3d5
WF
879 /* refer to the same tests at the end of writeback_sb_inodes */
880 if (wrote) {
881 if (time_is_before_jiffies(start_time + HZ / 10UL))
882 break;
883 if (work->nr_pages <= 0)
884 break;
885 }
f11c9c5c 886 }
66f3b8e2 887 /* Leave any unwritten inodes on b_io */
d46db3d5 888 return wrote;
66f3b8e2
JA
889}
890
7d9f073b 891static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
0e175a18 892 enum wb_reason reason)
edadfb10 893{
d46db3d5
WF
894 struct wb_writeback_work work = {
895 .nr_pages = nr_pages,
896 .sync_mode = WB_SYNC_NONE,
897 .range_cyclic = 1,
0e175a18 898 .reason = reason,
d46db3d5 899 };
edadfb10 900
f758eeab 901 spin_lock(&wb->list_lock);
424b351f 902 if (list_empty(&wb->b_io))
ad4e38dd 903 queue_io(wb, &work);
d46db3d5 904 __writeback_inodes_wb(wb, &work);
f758eeab 905 spin_unlock(&wb->list_lock);
edadfb10 906
d46db3d5
WF
907 return nr_pages - work.nr_pages;
908}
03ba3782 909
a88a341a 910static bool over_bground_thresh(struct bdi_writeback *wb)
03ba3782
JA
911{
912 unsigned long background_thresh, dirty_thresh;
913
16c4042f 914 global_dirty_limits(&background_thresh, &dirty_thresh);
03ba3782 915
b00949aa
WF
916 if (global_page_state(NR_FILE_DIRTY) +
917 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
918 return true;
919
a88a341a 920 if (wb_stat(wb, WB_RECLAIMABLE) > wb_dirty_limit(wb, background_thresh))
b00949aa
WF
921 return true;
922
923 return false;
03ba3782
JA
924}
925
e98be2d5
WF
926/*
927 * Called under wb->list_lock. If there are multiple wb per bdi,
928 * only the flusher working on the first wb should do it.
929 */
930static void wb_update_bandwidth(struct bdi_writeback *wb,
931 unsigned long start_time)
932{
a88a341a 933 __wb_update_bandwidth(wb, 0, 0, 0, 0, 0, start_time);
e98be2d5
WF
934}
935
03ba3782
JA
936/*
937 * Explicit flushing or periodic writeback of "old" data.
66f3b8e2 938 *
03ba3782
JA
939 * Define "old": the first time one of an inode's pages is dirtied, we mark the
940 * dirtying-time in the inode's address_space. So this periodic writeback code
941 * just walks the superblock inode list, writing back any inodes which are
942 * older than a specific point in time.
66f3b8e2 943 *
03ba3782
JA
944 * Try to run once per dirty_writeback_interval. But if a writeback event
945 * takes longer than a dirty_writeback_interval interval, then leave a
946 * one-second gap.
66f3b8e2 947 *
03ba3782
JA
948 * older_than_this takes precedence over nr_to_write. So we'll only write back
949 * all dirty pages if they are all attached to "old" mappings.
66f3b8e2 950 */
c4a77a6c 951static long wb_writeback(struct bdi_writeback *wb,
83ba7b07 952 struct wb_writeback_work *work)
66f3b8e2 953{
e98be2d5 954 unsigned long wb_start = jiffies;
d46db3d5 955 long nr_pages = work->nr_pages;
0dc83bd3 956 unsigned long oldest_jif;
a5989bdc 957 struct inode *inode;
d46db3d5 958 long progress;
66f3b8e2 959
0dc83bd3
JK
960 oldest_jif = jiffies;
961 work->older_than_this = &oldest_jif;
38f21977 962
e8dfc305 963 spin_lock(&wb->list_lock);
03ba3782
JA
964 for (;;) {
965 /*
d3ddec76 966 * Stop writeback when nr_pages has been consumed
03ba3782 967 */
83ba7b07 968 if (work->nr_pages <= 0)
03ba3782 969 break;
66f3b8e2 970
aa373cf5
JK
971 /*
972 * Background writeout and kupdate-style writeback may
973 * run forever. Stop them if there is other work to do
974 * so that e.g. sync can proceed. They'll be restarted
975 * after the other works are all done.
976 */
977 if ((work->for_background || work->for_kupdate) &&
f0054bb1 978 !list_empty(&wb->work_list))
aa373cf5
JK
979 break;
980
38f21977 981 /*
d3ddec76
WF
982 * For background writeout, stop when we are below the
983 * background dirty threshold
38f21977 984 */
a88a341a 985 if (work->for_background && !over_bground_thresh(wb))
03ba3782 986 break;
38f21977 987
1bc36b64
JK
988 /*
989 * Kupdate and background works are special and we want to
990 * include all inodes that need writing. Livelock avoidance is
991 * handled by these works yielding to any other work so we are
992 * safe.
993 */
ba9aa839 994 if (work->for_kupdate) {
0dc83bd3 995 oldest_jif = jiffies -
ba9aa839 996 msecs_to_jiffies(dirty_expire_interval * 10);
1bc36b64 997 } else if (work->for_background)
0dc83bd3 998 oldest_jif = jiffies;
028c2dd1 999
d46db3d5 1000 trace_writeback_start(wb->bdi, work);
e8dfc305 1001 if (list_empty(&wb->b_io))
ad4e38dd 1002 queue_io(wb, work);
83ba7b07 1003 if (work->sb)
d46db3d5 1004 progress = writeback_sb_inodes(work->sb, wb, work);
edadfb10 1005 else
d46db3d5
WF
1006 progress = __writeback_inodes_wb(wb, work);
1007 trace_writeback_written(wb->bdi, work);
028c2dd1 1008
e98be2d5 1009 wb_update_bandwidth(wb, wb_start);
03ba3782
JA
1010
1011 /*
e6fb6da2
WF
1012 * Did we write something? Try for more
1013 *
1014 * Dirty inodes are moved to b_io for writeback in batches.
1015 * The completion of the current batch does not necessarily
1016 * mean the overall work is done. So we keep looping as long
1017 * as made some progress on cleaning pages or inodes.
03ba3782 1018 */
d46db3d5 1019 if (progress)
71fd05a8
JA
1020 continue;
1021 /*
e6fb6da2 1022 * No more inodes for IO, bail
71fd05a8 1023 */
b7a2441f 1024 if (list_empty(&wb->b_more_io))
03ba3782 1025 break;
71fd05a8
JA
1026 /*
1027 * Nothing written. Wait for some inode to
1028 * become available for writeback. Otherwise
1029 * we'll just busyloop.
1030 */
71fd05a8 1031 if (!list_empty(&wb->b_more_io)) {
d46db3d5 1032 trace_writeback_wait(wb->bdi, work);
7ccf19a8 1033 inode = wb_inode(wb->b_more_io.prev);
250df6ed 1034 spin_lock(&inode->i_lock);
f0d07b7f 1035 spin_unlock(&wb->list_lock);
169ebd90
JK
1036 /* This function drops i_lock... */
1037 inode_sleep_on_writeback(inode);
f0d07b7f 1038 spin_lock(&wb->list_lock);
03ba3782
JA
1039 }
1040 }
e8dfc305 1041 spin_unlock(&wb->list_lock);
03ba3782 1042
d46db3d5 1043 return nr_pages - work->nr_pages;
03ba3782
JA
1044}
1045
1046/*
83ba7b07 1047 * Return the next wb_writeback_work struct that hasn't been processed yet.
03ba3782 1048 */
f0054bb1 1049static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
03ba3782 1050{
83ba7b07 1051 struct wb_writeback_work *work = NULL;
03ba3782 1052
f0054bb1
TH
1053 spin_lock_bh(&wb->work_lock);
1054 if (!list_empty(&wb->work_list)) {
1055 work = list_entry(wb->work_list.next,
83ba7b07
CH
1056 struct wb_writeback_work, list);
1057 list_del_init(&work->list);
03ba3782 1058 }
f0054bb1 1059 spin_unlock_bh(&wb->work_lock);
83ba7b07 1060 return work;
03ba3782
JA
1061}
1062
cdf01dd5
LT
1063/*
1064 * Add in the number of potentially dirty inodes, because each inode
1065 * write can dirty pagecache in the underlying blockdev.
1066 */
1067static unsigned long get_nr_dirty_pages(void)
1068{
1069 return global_page_state(NR_FILE_DIRTY) +
1070 global_page_state(NR_UNSTABLE_NFS) +
1071 get_nr_dirty_inodes();
1072}
1073
6585027a
JK
1074static long wb_check_background_flush(struct bdi_writeback *wb)
1075{
a88a341a 1076 if (over_bground_thresh(wb)) {
6585027a
JK
1077
1078 struct wb_writeback_work work = {
1079 .nr_pages = LONG_MAX,
1080 .sync_mode = WB_SYNC_NONE,
1081 .for_background = 1,
1082 .range_cyclic = 1,
0e175a18 1083 .reason = WB_REASON_BACKGROUND,
6585027a
JK
1084 };
1085
1086 return wb_writeback(wb, &work);
1087 }
1088
1089 return 0;
1090}
1091
03ba3782
JA
1092static long wb_check_old_data_flush(struct bdi_writeback *wb)
1093{
1094 unsigned long expired;
1095 long nr_pages;
1096
69b62d01
JA
1097 /*
1098 * When set to zero, disable periodic writeback
1099 */
1100 if (!dirty_writeback_interval)
1101 return 0;
1102
03ba3782
JA
1103 expired = wb->last_old_flush +
1104 msecs_to_jiffies(dirty_writeback_interval * 10);
1105 if (time_before(jiffies, expired))
1106 return 0;
1107
1108 wb->last_old_flush = jiffies;
cdf01dd5 1109 nr_pages = get_nr_dirty_pages();
03ba3782 1110
c4a77a6c 1111 if (nr_pages) {
83ba7b07 1112 struct wb_writeback_work work = {
c4a77a6c
JA
1113 .nr_pages = nr_pages,
1114 .sync_mode = WB_SYNC_NONE,
1115 .for_kupdate = 1,
1116 .range_cyclic = 1,
0e175a18 1117 .reason = WB_REASON_PERIODIC,
c4a77a6c
JA
1118 };
1119
83ba7b07 1120 return wb_writeback(wb, &work);
c4a77a6c 1121 }
03ba3782
JA
1122
1123 return 0;
1124}
1125
1126/*
1127 * Retrieve work items and do the writeback they describe
1128 */
25d130ba 1129static long wb_do_writeback(struct bdi_writeback *wb)
03ba3782 1130{
83ba7b07 1131 struct wb_writeback_work *work;
c4a77a6c 1132 long wrote = 0;
03ba3782 1133
4452226e 1134 set_bit(WB_writeback_running, &wb->state);
f0054bb1 1135 while ((work = get_next_work_item(wb)) != NULL) {
03ba3782 1136
f0054bb1 1137 trace_writeback_exec(wb->bdi, work);
455b2864 1138
83ba7b07 1139 wrote += wb_writeback(wb, work);
03ba3782
JA
1140
1141 /*
83ba7b07
CH
1142 * Notify the caller of completion if this is a synchronous
1143 * work item, otherwise just free it.
03ba3782 1144 */
83ba7b07
CH
1145 if (work->done)
1146 complete(work->done);
1147 else
1148 kfree(work);
03ba3782
JA
1149 }
1150
1151 /*
1152 * Check for periodic writeback, kupdated() style
1153 */
1154 wrote += wb_check_old_data_flush(wb);
6585027a 1155 wrote += wb_check_background_flush(wb);
4452226e 1156 clear_bit(WB_writeback_running, &wb->state);
03ba3782
JA
1157
1158 return wrote;
1159}
1160
1161/*
1162 * Handle writeback of dirty data for the device backed by this bdi. Also
839a8e86 1163 * reschedules periodically and does kupdated style flushing.
03ba3782 1164 */
f0054bb1 1165void wb_workfn(struct work_struct *work)
03ba3782 1166{
839a8e86
TH
1167 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1168 struct bdi_writeback, dwork);
03ba3782
JA
1169 long pages_written;
1170
f0054bb1 1171 set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
766f9164 1172 current->flags |= PF_SWAPWRITE;
455b2864 1173
839a8e86 1174 if (likely(!current_is_workqueue_rescuer() ||
4452226e 1175 !test_bit(WB_registered, &wb->state))) {
6467716a 1176 /*
f0054bb1 1177 * The normal path. Keep writing back @wb until its
839a8e86 1178 * work_list is empty. Note that this path is also taken
f0054bb1 1179 * if @wb is shutting down even when we're running off the
839a8e86 1180 * rescuer as work_list needs to be drained.
6467716a 1181 */
839a8e86 1182 do {
25d130ba 1183 pages_written = wb_do_writeback(wb);
839a8e86 1184 trace_writeback_pages_written(pages_written);
f0054bb1 1185 } while (!list_empty(&wb->work_list));
839a8e86
TH
1186 } else {
1187 /*
1188 * bdi_wq can't get enough workers and we're running off
1189 * the emergency worker. Don't hog it. Hopefully, 1024 is
1190 * enough for efficient IO.
1191 */
f0054bb1 1192 pages_written = writeback_inodes_wb(wb, 1024,
839a8e86 1193 WB_REASON_FORKER_THREAD);
455b2864 1194 trace_writeback_pages_written(pages_written);
03ba3782
JA
1195 }
1196
f0054bb1 1197 if (!list_empty(&wb->work_list))
6ca738d6
DB
1198 mod_delayed_work(bdi_wq, &wb->dwork, 0);
1199 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
f0054bb1 1200 wb_wakeup_delayed(wb);
455b2864 1201
839a8e86 1202 current->flags &= ~PF_SWAPWRITE;
03ba3782
JA
1203}
1204
1205/*
b8c2f347
CH
1206 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1207 * the whole world.
03ba3782 1208 */
0e175a18 1209void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
03ba3782 1210{
b8c2f347 1211 struct backing_dev_info *bdi;
03ba3782 1212
47df3dde
JK
1213 if (!nr_pages)
1214 nr_pages = get_nr_dirty_pages();
03ba3782 1215
b8c2f347 1216 rcu_read_lock();
cfc4ba53 1217 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
03ba3782
JA
1218 if (!bdi_has_dirty_io(bdi))
1219 continue;
f0054bb1 1220 __wb_start_writeback(&bdi->wb, nr_pages, false, reason);
03ba3782 1221 }
cfc4ba53 1222 rcu_read_unlock();
1da177e4
LT
1223}
1224
a2f48706
TT
1225/*
1226 * Wake up bdi's periodically to make sure dirtytime inodes gets
1227 * written back periodically. We deliberately do *not* check the
1228 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
1229 * kernel to be constantly waking up once there are any dirtytime
1230 * inodes on the system. So instead we define a separate delayed work
1231 * function which gets called much more rarely. (By default, only
1232 * once every 12 hours.)
1233 *
1234 * If there is any other write activity going on in the file system,
1235 * this function won't be necessary. But if the only thing that has
1236 * happened on the file system is a dirtytime inode caused by an atime
1237 * update, we need this infrastructure below to make sure that inode
1238 * eventually gets pushed out to disk.
1239 */
1240static void wakeup_dirtytime_writeback(struct work_struct *w);
1241static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
1242
1243static void wakeup_dirtytime_writeback(struct work_struct *w)
1244{
1245 struct backing_dev_info *bdi;
1246
1247 rcu_read_lock();
1248 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1249 if (list_empty(&bdi->wb.b_dirty_time))
1250 continue;
f0054bb1 1251 wb_wakeup(&bdi->wb);
a2f48706
TT
1252 }
1253 rcu_read_unlock();
1254 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1255}
1256
1257static int __init start_dirtytime_writeback(void)
1258{
1259 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1260 return 0;
1261}
1262__initcall(start_dirtytime_writeback);
1263
1efff914
TT
1264int dirtytime_interval_handler(struct ctl_table *table, int write,
1265 void __user *buffer, size_t *lenp, loff_t *ppos)
1266{
1267 int ret;
1268
1269 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1270 if (ret == 0 && write)
1271 mod_delayed_work(system_wq, &dirtytime_work, 0);
1272 return ret;
1273}
1274
03ba3782
JA
1275static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1276{
1277 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1278 struct dentry *dentry;
1279 const char *name = "?";
1280
1281 dentry = d_find_alias(inode);
1282 if (dentry) {
1283 spin_lock(&dentry->d_lock);
1284 name = (const char *) dentry->d_name.name;
1285 }
1286 printk(KERN_DEBUG
1287 "%s(%d): dirtied inode %lu (%s) on %s\n",
1288 current->comm, task_pid_nr(current), inode->i_ino,
1289 name, inode->i_sb->s_id);
1290 if (dentry) {
1291 spin_unlock(&dentry->d_lock);
1292 dput(dentry);
1293 }
1294 }
1295}
1296
1297/**
1298 * __mark_inode_dirty - internal function
1299 * @inode: inode to mark
1300 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1301 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1302 * mark_inode_dirty_sync.
1da177e4 1303 *
03ba3782
JA
1304 * Put the inode on the super block's dirty list.
1305 *
1306 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1307 * dirty list only if it is hashed or if it refers to a blockdev.
1308 * If it was not hashed, it will never be added to the dirty list
1309 * even if it is later hashed, as it will have been marked dirty already.
1310 *
1311 * In short, make sure you hash any inodes _before_ you start marking
1312 * them dirty.
1da177e4 1313 *
03ba3782
JA
1314 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1315 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1316 * the kernel-internal blockdev inode represents the dirtying time of the
1317 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1318 * page->mapping->host, so the page-dirtying time is recorded in the internal
1319 * blockdev inode.
1da177e4 1320 */
0ae45f63 1321#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
03ba3782 1322void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4 1323{
03ba3782 1324 struct super_block *sb = inode->i_sb;
253c34e9 1325 struct backing_dev_info *bdi = NULL;
0ae45f63
TT
1326 int dirtytime;
1327
1328 trace_writeback_mark_inode_dirty(inode, flags);
1da177e4 1329
03ba3782
JA
1330 /*
1331 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1332 * dirty the inode itself
1333 */
0ae45f63 1334 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
9fb0a7da
TH
1335 trace_writeback_dirty_inode_start(inode, flags);
1336
03ba3782 1337 if (sb->s_op->dirty_inode)
aa385729 1338 sb->s_op->dirty_inode(inode, flags);
9fb0a7da
TH
1339
1340 trace_writeback_dirty_inode(inode, flags);
03ba3782 1341 }
0ae45f63
TT
1342 if (flags & I_DIRTY_INODE)
1343 flags &= ~I_DIRTY_TIME;
1344 dirtytime = flags & I_DIRTY_TIME;
03ba3782
JA
1345
1346 /*
9c6ac78e
TH
1347 * Paired with smp_mb() in __writeback_single_inode() for the
1348 * following lockless i_state test. See there for details.
03ba3782
JA
1349 */
1350 smp_mb();
1351
0ae45f63
TT
1352 if (((inode->i_state & flags) == flags) ||
1353 (dirtytime && (inode->i_state & I_DIRTY_INODE)))
03ba3782
JA
1354 return;
1355
1356 if (unlikely(block_dump))
1357 block_dump___mark_inode_dirty(inode);
1358
250df6ed 1359 spin_lock(&inode->i_lock);
0ae45f63
TT
1360 if (dirtytime && (inode->i_state & I_DIRTY_INODE))
1361 goto out_unlock_inode;
03ba3782
JA
1362 if ((inode->i_state & flags) != flags) {
1363 const int was_dirty = inode->i_state & I_DIRTY;
1364
52ebea74
TH
1365 inode_attach_wb(inode, NULL);
1366
0ae45f63
TT
1367 if (flags & I_DIRTY_INODE)
1368 inode->i_state &= ~I_DIRTY_TIME;
03ba3782
JA
1369 inode->i_state |= flags;
1370
1371 /*
1372 * If the inode is being synced, just update its dirty state.
1373 * The unlocker will place the inode on the appropriate
1374 * superblock list, based upon its state.
1375 */
1376 if (inode->i_state & I_SYNC)
250df6ed 1377 goto out_unlock_inode;
03ba3782
JA
1378
1379 /*
1380 * Only add valid (hashed) inodes to the superblock's
1381 * dirty list. Add blockdev inodes as well.
1382 */
1383 if (!S_ISBLK(inode->i_mode)) {
1d3382cb 1384 if (inode_unhashed(inode))
250df6ed 1385 goto out_unlock_inode;
03ba3782 1386 }
a4ffdde6 1387 if (inode->i_state & I_FREEING)
250df6ed 1388 goto out_unlock_inode;
03ba3782
JA
1389
1390 /*
1391 * If the inode was already on b_dirty/b_io/b_more_io, don't
1392 * reposition it (that would break b_dirty time-ordering).
1393 */
1394 if (!was_dirty) {
d6c10f1f 1395 struct list_head *dirty_list;
a66979ab 1396 bool wakeup_bdi = false;
253c34e9
AB
1397 bdi = inode_to_bdi(inode);
1398
146d7009
JB
1399 spin_unlock(&inode->i_lock);
1400 spin_lock(&bdi->wb.list_lock);
253c34e9 1401
d6c10f1f
TH
1402 WARN(bdi_cap_writeback_dirty(bdi) &&
1403 !test_bit(WB_registered, &bdi->wb.state),
1404 "bdi-%s not registered\n", bdi->name);
03ba3782
JA
1405
1406 inode->dirtied_when = jiffies;
a2f48706
TT
1407 if (dirtytime)
1408 inode->dirtied_time_when = jiffies;
d6c10f1f 1409
a2f48706 1410 if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
d6c10f1f 1411 dirty_list = &bdi->wb.b_dirty;
a2f48706 1412 else
d6c10f1f
TH
1413 dirty_list = &bdi->wb.b_dirty_time;
1414
1415 wakeup_bdi = inode_wb_list_move_locked(inode, &bdi->wb,
1416 dirty_list);
1417
f758eeab 1418 spin_unlock(&bdi->wb.list_lock);
0ae45f63 1419 trace_writeback_dirty_inode_enqueue(inode);
a66979ab 1420
d6c10f1f
TH
1421 /*
1422 * If this is the first dirty inode for this bdi,
1423 * we have to wake-up the corresponding bdi thread
1424 * to make sure background write-back happens
1425 * later.
1426 */
1427 if (bdi_cap_writeback_dirty(bdi) && wakeup_bdi)
f0054bb1 1428 wb_wakeup_delayed(&bdi->wb);
a66979ab 1429 return;
1da177e4 1430 }
1da177e4 1431 }
250df6ed
DC
1432out_unlock_inode:
1433 spin_unlock(&inode->i_lock);
253c34e9 1434
03ba3782
JA
1435}
1436EXPORT_SYMBOL(__mark_inode_dirty);
1437
b6e51316 1438static void wait_sb_inodes(struct super_block *sb)
03ba3782
JA
1439{
1440 struct inode *inode, *old_inode = NULL;
1441
1442 /*
1443 * We need to be protected against the filesystem going from
1444 * r/o to r/w or vice versa.
1445 */
b6e51316 1446 WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782 1447
55fa6091 1448 spin_lock(&inode_sb_list_lock);
03ba3782
JA
1449
1450 /*
1451 * Data integrity sync. Must wait for all pages under writeback,
1452 * because there may have been pages dirtied before our sync
1453 * call, but which had writeout started before we write it out.
1454 * In which case, the inode may not be on the dirty list, but
1455 * we still have to wait for that writeout.
1456 */
b6e51316 1457 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
250df6ed 1458 struct address_space *mapping = inode->i_mapping;
03ba3782 1459
250df6ed
DC
1460 spin_lock(&inode->i_lock);
1461 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1462 (mapping->nrpages == 0)) {
1463 spin_unlock(&inode->i_lock);
03ba3782 1464 continue;
250df6ed 1465 }
03ba3782 1466 __iget(inode);
250df6ed 1467 spin_unlock(&inode->i_lock);
55fa6091
DC
1468 spin_unlock(&inode_sb_list_lock);
1469
03ba3782 1470 /*
55fa6091
DC
1471 * We hold a reference to 'inode' so it couldn't have been
1472 * removed from s_inodes list while we dropped the
1473 * inode_sb_list_lock. We cannot iput the inode now as we can
1474 * be holding the last reference and we cannot iput it under
1475 * inode_sb_list_lock. So we keep the reference and iput it
1476 * later.
03ba3782
JA
1477 */
1478 iput(old_inode);
1479 old_inode = inode;
1480
1481 filemap_fdatawait(mapping);
1482
1483 cond_resched();
1484
55fa6091 1485 spin_lock(&inode_sb_list_lock);
03ba3782 1486 }
55fa6091 1487 spin_unlock(&inode_sb_list_lock);
03ba3782 1488 iput(old_inode);
1da177e4
LT
1489}
1490
d8a8559c 1491/**
3259f8be 1492 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
d8a8559c 1493 * @sb: the superblock
3259f8be 1494 * @nr: the number of pages to write
786228ab 1495 * @reason: reason why some writeback work initiated
1da177e4 1496 *
d8a8559c
JA
1497 * Start writeback on some inodes on this super_block. No guarantees are made
1498 * on how many (if any) will be written, and this function does not wait
3259f8be 1499 * for IO completion of submitted IO.
1da177e4 1500 */
0e175a18
CW
1501void writeback_inodes_sb_nr(struct super_block *sb,
1502 unsigned long nr,
1503 enum wb_reason reason)
1da177e4 1504{
83ba7b07
CH
1505 DECLARE_COMPLETION_ONSTACK(done);
1506 struct wb_writeback_work work = {
6e6938b6
WF
1507 .sb = sb,
1508 .sync_mode = WB_SYNC_NONE,
1509 .tagged_writepages = 1,
1510 .done = &done,
1511 .nr_pages = nr,
0e175a18 1512 .reason = reason,
3c4d7165 1513 };
d8a8559c 1514
6eedc701
JK
1515 if (sb->s_bdi == &noop_backing_dev_info)
1516 return;
cf37e972 1517 WARN_ON(!rwsem_is_locked(&sb->s_umount));
f0054bb1 1518 wb_queue_work(&sb->s_bdi->wb, &work);
83ba7b07 1519 wait_for_completion(&done);
e913fc82 1520}
3259f8be
CM
1521EXPORT_SYMBOL(writeback_inodes_sb_nr);
1522
1523/**
1524 * writeback_inodes_sb - writeback dirty inodes from given super_block
1525 * @sb: the superblock
786228ab 1526 * @reason: reason why some writeback work was initiated
3259f8be
CM
1527 *
1528 * Start writeback on some inodes on this super_block. No guarantees are made
1529 * on how many (if any) will be written, and this function does not wait
1530 * for IO completion of submitted IO.
1531 */
0e175a18 1532void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1533{
0e175a18 1534 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1535}
0e3c9a22 1536EXPORT_SYMBOL(writeback_inodes_sb);
e913fc82 1537
17bd55d0 1538/**
10ee27a0 1539 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
17bd55d0 1540 * @sb: the superblock
10ee27a0
MX
1541 * @nr: the number of pages to write
1542 * @reason: the reason of writeback
17bd55d0 1543 *
10ee27a0 1544 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
17bd55d0
ES
1545 * Returns 1 if writeback was started, 0 if not.
1546 */
10ee27a0
MX
1547int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1548 unsigned long nr,
1549 enum wb_reason reason)
17bd55d0 1550{
10ee27a0 1551 if (writeback_in_progress(sb->s_bdi))
17bd55d0 1552 return 1;
10ee27a0
MX
1553
1554 if (!down_read_trylock(&sb->s_umount))
17bd55d0 1555 return 0;
10ee27a0
MX
1556
1557 writeback_inodes_sb_nr(sb, nr, reason);
1558 up_read(&sb->s_umount);
1559 return 1;
17bd55d0 1560}
10ee27a0 1561EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
17bd55d0 1562
3259f8be 1563/**
10ee27a0 1564 * try_to_writeback_inodes_sb - try to start writeback if none underway
3259f8be 1565 * @sb: the superblock
786228ab 1566 * @reason: reason why some writeback work was initiated
3259f8be 1567 *
10ee27a0 1568 * Implement by try_to_writeback_inodes_sb_nr()
3259f8be
CM
1569 * Returns 1 if writeback was started, 0 if not.
1570 */
10ee27a0 1571int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1572{
10ee27a0 1573 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1574}
10ee27a0 1575EXPORT_SYMBOL(try_to_writeback_inodes_sb);
3259f8be 1576
d8a8559c
JA
1577/**
1578 * sync_inodes_sb - sync sb inode pages
0dc83bd3 1579 * @sb: the superblock
d8a8559c
JA
1580 *
1581 * This function writes and waits on any dirty inode belonging to this
0dc83bd3 1582 * super_block.
d8a8559c 1583 */
0dc83bd3 1584void sync_inodes_sb(struct super_block *sb)
d8a8559c 1585{
83ba7b07
CH
1586 DECLARE_COMPLETION_ONSTACK(done);
1587 struct wb_writeback_work work = {
3c4d7165
CH
1588 .sb = sb,
1589 .sync_mode = WB_SYNC_ALL,
1590 .nr_pages = LONG_MAX,
1591 .range_cyclic = 0,
83ba7b07 1592 .done = &done,
0e175a18 1593 .reason = WB_REASON_SYNC,
7747bd4b 1594 .for_sync = 1,
3c4d7165
CH
1595 };
1596
6eedc701
JK
1597 /* Nothing to do? */
1598 if (sb->s_bdi == &noop_backing_dev_info)
1599 return;
cf37e972
CH
1600 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1601
f0054bb1 1602 wb_queue_work(&sb->s_bdi->wb, &work);
83ba7b07
CH
1603 wait_for_completion(&done);
1604
b6e51316 1605 wait_sb_inodes(sb);
1da177e4 1606}
d8a8559c 1607EXPORT_SYMBOL(sync_inodes_sb);
1da177e4 1608
1da177e4 1609/**
7f04c26d
AA
1610 * write_inode_now - write an inode to disk
1611 * @inode: inode to write to disk
1612 * @sync: whether the write should be synchronous or not
1613 *
1614 * This function commits an inode to disk immediately if it is dirty. This is
1615 * primarily needed by knfsd.
1da177e4 1616 *
7f04c26d 1617 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 1618 */
1da177e4
LT
1619int write_inode_now(struct inode *inode, int sync)
1620{
f758eeab 1621 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1da177e4
LT
1622 struct writeback_control wbc = {
1623 .nr_to_write = LONG_MAX,
18914b18 1624 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e
OH
1625 .range_start = 0,
1626 .range_end = LLONG_MAX,
1da177e4
LT
1627 };
1628
1629 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 1630 wbc.nr_to_write = 0;
1da177e4
LT
1631
1632 might_sleep();
4f8ad655 1633 return writeback_single_inode(inode, wb, &wbc);
1da177e4
LT
1634}
1635EXPORT_SYMBOL(write_inode_now);
1636
1637/**
1638 * sync_inode - write an inode and its pages to disk.
1639 * @inode: the inode to sync
1640 * @wbc: controls the writeback mode
1641 *
1642 * sync_inode() will write an inode and its pages to disk. It will also
1643 * correctly update the inode on its superblock's dirty inode lists and will
1644 * update inode->i_state.
1645 *
1646 * The caller must have a ref on the inode.
1647 */
1648int sync_inode(struct inode *inode, struct writeback_control *wbc)
1649{
4f8ad655 1650 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1da177e4
LT
1651}
1652EXPORT_SYMBOL(sync_inode);
c3765016
CH
1653
1654/**
c691b9d9 1655 * sync_inode_metadata - write an inode to disk
c3765016
CH
1656 * @inode: the inode to sync
1657 * @wait: wait for I/O to complete.
1658 *
c691b9d9 1659 * Write an inode to disk and adjust its dirty state after completion.
c3765016
CH
1660 *
1661 * Note: only writes the actual inode, no associated data or other metadata.
1662 */
1663int sync_inode_metadata(struct inode *inode, int wait)
1664{
1665 struct writeback_control wbc = {
1666 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1667 .nr_to_write = 0, /* metadata-only */
1668 };
1669
1670 return sync_inode(inode, &wbc);
1671}
1672EXPORT_SYMBOL(sync_inode_metadata);