]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/fs-writeback.c
writeback: Do not sort b_io list only because of block device inode
[mirror_ubuntu-bionic-kernel.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
630d9c47 17#include <linux/export.h>
1da177e4 18#include <linux/spinlock.h>
5a0e3ad6 19#include <linux/slab.h>
1da177e4
LT
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
bc31b86a 23#include <linux/pagemap.h>
03ba3782 24#include <linux/kthread.h>
1da177e4
LT
25#include <linux/writeback.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
455b2864 28#include <linux/tracepoint.h>
07f3f05c 29#include "internal.h"
1da177e4 30
bc31b86a
WF
31/*
32 * 4MB minimal write chunk size
33 */
34#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
35
c4a77a6c
JA
36/*
37 * Passed into wb_writeback(), essentially a subset of writeback_control
38 */
83ba7b07 39struct wb_writeback_work {
c4a77a6c
JA
40 long nr_pages;
41 struct super_block *sb;
d46db3d5 42 unsigned long *older_than_this;
c4a77a6c 43 enum writeback_sync_modes sync_mode;
6e6938b6 44 unsigned int tagged_writepages:1;
52957fe1
HS
45 unsigned int for_kupdate:1;
46 unsigned int range_cyclic:1;
47 unsigned int for_background:1;
0e175a18 48 enum wb_reason reason; /* why was writeback initiated? */
c4a77a6c 49
8010c3b6 50 struct list_head list; /* pending work list */
83ba7b07 51 struct completion *done; /* set if the caller waits */
03ba3782
JA
52};
53
f11b00f3
AB
54/**
55 * writeback_in_progress - determine whether there is writeback in progress
56 * @bdi: the device's backing_dev_info structure.
57 *
03ba3782
JA
58 * Determine whether there is writeback waiting to be handled against a
59 * backing device.
f11b00f3
AB
60 */
61int writeback_in_progress(struct backing_dev_info *bdi)
62{
81d73a32 63 return test_bit(BDI_writeback_running, &bdi->state);
f11b00f3 64}
00d4e736 65EXPORT_SYMBOL(writeback_in_progress);
f11b00f3 66
692ebd17
JK
67static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
68{
69 struct super_block *sb = inode->i_sb;
692ebd17 70
a8855990 71 if (sb_is_blkdev_sb(sb))
aaead25b
CH
72 return inode->i_mapping->backing_dev_info;
73
74 return sb->s_bdi;
692ebd17
JK
75}
76
7ccf19a8
NP
77static inline struct inode *wb_inode(struct list_head *head)
78{
79 return list_entry(head, struct inode, i_wb_list);
80}
81
15eb77a0
WF
82/*
83 * Include the creation of the trace points after defining the
84 * wb_writeback_work structure and inline functions so that the definition
85 * remains local to this file.
86 */
87#define CREATE_TRACE_POINTS
88#include <trace/events/writeback.h>
89
6585027a
JK
90static void bdi_queue_work(struct backing_dev_info *bdi,
91 struct wb_writeback_work *work)
92{
93 trace_writeback_queue(bdi, work);
94
95 spin_lock_bh(&bdi->wb_lock);
96 list_add_tail(&work->list, &bdi->work_list);
6467716a 97 spin_unlock_bh(&bdi->wb_lock);
839a8e86
TH
98
99 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
1da177e4
LT
100}
101
83ba7b07
CH
102static void
103__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
0e175a18 104 bool range_cyclic, enum wb_reason reason)
1da177e4 105{
83ba7b07 106 struct wb_writeback_work *work;
03ba3782 107
bcddc3f0
JA
108 /*
109 * This is WB_SYNC_NONE writeback, so if allocation fails just
110 * wakeup the thread for old dirty data writeback
111 */
83ba7b07
CH
112 work = kzalloc(sizeof(*work), GFP_ATOMIC);
113 if (!work) {
839a8e86
TH
114 trace_writeback_nowork(bdi);
115 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
83ba7b07 116 return;
bcddc3f0 117 }
03ba3782 118
83ba7b07
CH
119 work->sync_mode = WB_SYNC_NONE;
120 work->nr_pages = nr_pages;
121 work->range_cyclic = range_cyclic;
0e175a18 122 work->reason = reason;
03ba3782 123
83ba7b07 124 bdi_queue_work(bdi, work);
b6e51316
JA
125}
126
127/**
128 * bdi_start_writeback - start writeback
129 * @bdi: the backing device to write from
130 * @nr_pages: the number of pages to write
786228ab 131 * @reason: reason why some writeback work was initiated
b6e51316
JA
132 *
133 * Description:
134 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
25985edc 135 * started when this function returns, we make no guarantees on
0e3c9a22 136 * completion. Caller need not hold sb s_umount semaphore.
b6e51316
JA
137 *
138 */
0e175a18
CW
139void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
140 enum wb_reason reason)
b6e51316 141{
0e175a18 142 __bdi_start_writeback(bdi, nr_pages, true, reason);
c5444198 143}
d3ddec76 144
c5444198
CH
145/**
146 * bdi_start_background_writeback - start background writeback
147 * @bdi: the backing device to write from
148 *
149 * Description:
6585027a
JK
150 * This makes sure WB_SYNC_NONE background writeback happens. When
151 * this function returns, it is only guaranteed that for given BDI
152 * some IO is happening if we are over background dirty threshold.
153 * Caller need not hold sb s_umount semaphore.
c5444198
CH
154 */
155void bdi_start_background_writeback(struct backing_dev_info *bdi)
156{
6585027a
JK
157 /*
158 * We just wake up the flusher thread. It will perform background
159 * writeback as soon as there is no other work to do.
160 */
71927e84 161 trace_writeback_wake_background(bdi);
839a8e86 162 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
1da177e4
LT
163}
164
a66979ab
DC
165/*
166 * Remove the inode from the writeback list it is on.
167 */
168void inode_wb_list_del(struct inode *inode)
169{
f758eeab
CH
170 struct backing_dev_info *bdi = inode_to_bdi(inode);
171
172 spin_lock(&bdi->wb.list_lock);
a66979ab 173 list_del_init(&inode->i_wb_list);
f758eeab 174 spin_unlock(&bdi->wb.list_lock);
a66979ab
DC
175}
176
6610a0bc
AM
177/*
178 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
179 * furthest end of its superblock's dirty-inode list.
180 *
181 * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2 182 * already the most-recently-dirtied inode on the b_dirty list. If that is
6610a0bc
AM
183 * the case then the inode must have been redirtied while it was being written
184 * out and we don't reset its dirtied_when.
185 */
f758eeab 186static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6610a0bc 187{
f758eeab 188 assert_spin_locked(&wb->list_lock);
03ba3782 189 if (!list_empty(&wb->b_dirty)) {
66f3b8e2 190 struct inode *tail;
6610a0bc 191
7ccf19a8 192 tail = wb_inode(wb->b_dirty.next);
66f3b8e2 193 if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc
AM
194 inode->dirtied_when = jiffies;
195 }
7ccf19a8 196 list_move(&inode->i_wb_list, &wb->b_dirty);
6610a0bc
AM
197}
198
c986d1e2 199/*
66f3b8e2 200 * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2 201 */
f758eeab 202static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
c986d1e2 203{
f758eeab 204 assert_spin_locked(&wb->list_lock);
7ccf19a8 205 list_move(&inode->i_wb_list, &wb->b_more_io);
c986d1e2
AM
206}
207
1c0eeaf5
JE
208static void inode_sync_complete(struct inode *inode)
209{
365b94ae 210 inode->i_state &= ~I_SYNC;
4eff96dd
JK
211 /* If inode is clean an unused, put it into LRU now... */
212 inode_add_lru(inode);
365b94ae 213 /* Waiters must see I_SYNC cleared before being woken up */
1c0eeaf5
JE
214 smp_mb();
215 wake_up_bit(&inode->i_state, __I_SYNC);
216}
217
d2caa3c5
JL
218static bool inode_dirtied_after(struct inode *inode, unsigned long t)
219{
220 bool ret = time_after(inode->dirtied_when, t);
221#ifndef CONFIG_64BIT
222 /*
223 * For inodes being constantly redirtied, dirtied_when can get stuck.
224 * It _appears_ to be in the future, but is actually in distant past.
225 * This test is necessary to prevent such wrapped-around relative times
5b0830cb 226 * from permanently stopping the whole bdi writeback.
d2caa3c5
JL
227 */
228 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
229#endif
230 return ret;
231}
232
2c136579 233/*
0e2f2b23 234 * Move expired (dirtied before work->older_than_this) dirty inodes from
697e6fed 235 * @delaying_queue to @dispatch_queue.
2c136579 236 */
e84d0a4f 237static int move_expired_inodes(struct list_head *delaying_queue,
2c136579 238 struct list_head *dispatch_queue,
ad4e38dd 239 struct wb_writeback_work *work)
2c136579 240{
5c03449d
SL
241 LIST_HEAD(tmp);
242 struct list_head *pos, *node;
cf137307 243 struct super_block *sb = NULL;
5c03449d 244 struct inode *inode;
cf137307 245 int do_sb_sort = 0;
e84d0a4f 246 int moved = 0;
5c03449d 247
2c136579 248 while (!list_empty(delaying_queue)) {
7ccf19a8 249 inode = wb_inode(delaying_queue->prev);
ad4e38dd
CW
250 if (work->older_than_this &&
251 inode_dirtied_after(inode, *work->older_than_this))
2c136579 252 break;
a8855990
JK
253 list_move(&inode->i_wb_list, &tmp);
254 moved++;
255 if (sb_is_blkdev_sb(inode->i_sb))
256 continue;
cf137307
JA
257 if (sb && sb != inode->i_sb)
258 do_sb_sort = 1;
259 sb = inode->i_sb;
5c03449d
SL
260 }
261
cf137307
JA
262 /* just one sb in list, splice to dispatch_queue and we're done */
263 if (!do_sb_sort) {
264 list_splice(&tmp, dispatch_queue);
e84d0a4f 265 goto out;
cf137307
JA
266 }
267
5c03449d
SL
268 /* Move inodes from one superblock together */
269 while (!list_empty(&tmp)) {
7ccf19a8 270 sb = wb_inode(tmp.prev)->i_sb;
5c03449d 271 list_for_each_prev_safe(pos, node, &tmp) {
7ccf19a8 272 inode = wb_inode(pos);
5c03449d 273 if (inode->i_sb == sb)
7ccf19a8 274 list_move(&inode->i_wb_list, dispatch_queue);
5c03449d 275 }
2c136579 276 }
e84d0a4f
WF
277out:
278 return moved;
2c136579
FW
279}
280
281/*
282 * Queue all expired dirty inodes for io, eldest first.
4ea879b9
WF
283 * Before
284 * newly dirtied b_dirty b_io b_more_io
285 * =============> gf edc BA
286 * After
287 * newly dirtied b_dirty b_io b_more_io
288 * =============> g fBAedc
289 * |
290 * +--> dequeue for IO
2c136579 291 */
ad4e38dd 292static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
66f3b8e2 293{
e84d0a4f 294 int moved;
f758eeab 295 assert_spin_locked(&wb->list_lock);
4ea879b9 296 list_splice_init(&wb->b_more_io, &wb->b_io);
ad4e38dd
CW
297 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
298 trace_writeback_queue_io(wb, work, moved);
66f3b8e2
JA
299}
300
a9185b41 301static int write_inode(struct inode *inode, struct writeback_control *wbc)
08d8e974 302{
9fb0a7da
TH
303 int ret;
304
305 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
306 trace_writeback_write_inode_start(inode, wbc);
307 ret = inode->i_sb->s_op->write_inode(inode, wbc);
308 trace_writeback_write_inode(inode, wbc);
309 return ret;
310 }
03ba3782 311 return 0;
08d8e974 312}
08d8e974 313
1da177e4 314/*
169ebd90
JK
315 * Wait for writeback on an inode to complete. Called with i_lock held.
316 * Caller must make sure inode cannot go away when we drop i_lock.
01c03194 317 */
169ebd90
JK
318static void __inode_wait_for_writeback(struct inode *inode)
319 __releases(inode->i_lock)
320 __acquires(inode->i_lock)
01c03194
CH
321{
322 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
323 wait_queue_head_t *wqh;
324
325 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
250df6ed
DC
326 while (inode->i_state & I_SYNC) {
327 spin_unlock(&inode->i_lock);
01c03194 328 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
250df6ed 329 spin_lock(&inode->i_lock);
58a9d3d8 330 }
01c03194
CH
331}
332
169ebd90
JK
333/*
334 * Wait for writeback on an inode to complete. Caller must have inode pinned.
335 */
336void inode_wait_for_writeback(struct inode *inode)
337{
338 spin_lock(&inode->i_lock);
339 __inode_wait_for_writeback(inode);
340 spin_unlock(&inode->i_lock);
341}
342
343/*
344 * Sleep until I_SYNC is cleared. This function must be called with i_lock
345 * held and drops it. It is aimed for callers not holding any inode reference
346 * so once i_lock is dropped, inode can go away.
347 */
348static void inode_sleep_on_writeback(struct inode *inode)
349 __releases(inode->i_lock)
350{
351 DEFINE_WAIT(wait);
352 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
353 int sleep;
354
355 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
356 sleep = inode->i_state & I_SYNC;
357 spin_unlock(&inode->i_lock);
358 if (sleep)
359 schedule();
360 finish_wait(wqh, &wait);
361}
362
ccb26b5a
JK
363/*
364 * Find proper writeback list for the inode depending on its current state and
365 * possibly also change of its state while we were doing writeback. Here we
366 * handle things such as livelock prevention or fairness of writeback among
367 * inodes. This function can be called only by flusher thread - noone else
368 * processes all inodes in writeback lists and requeueing inodes behind flusher
369 * thread's back can have unexpected consequences.
370 */
371static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
372 struct writeback_control *wbc)
373{
374 if (inode->i_state & I_FREEING)
375 return;
376
377 /*
378 * Sync livelock prevention. Each inode is tagged and synced in one
379 * shot. If still dirty, it will be redirty_tail()'ed below. Update
380 * the dirty time to prevent enqueue and sync it again.
381 */
382 if ((inode->i_state & I_DIRTY) &&
383 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
384 inode->dirtied_when = jiffies;
385
4f8ad655
JK
386 if (wbc->pages_skipped) {
387 /*
388 * writeback is not making progress due to locked
389 * buffers. Skip this inode for now.
390 */
391 redirty_tail(inode, wb);
392 return;
393 }
394
ccb26b5a
JK
395 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
396 /*
397 * We didn't write back all the pages. nfs_writepages()
398 * sometimes bales out without doing anything.
399 */
400 if (wbc->nr_to_write <= 0) {
401 /* Slice used up. Queue for next turn. */
402 requeue_io(inode, wb);
403 } else {
404 /*
405 * Writeback blocked by something other than
406 * congestion. Delay the inode for some time to
407 * avoid spinning on the CPU (100% iowait)
408 * retrying writeback of the dirty page/inode
409 * that cannot be performed immediately.
410 */
411 redirty_tail(inode, wb);
412 }
413 } else if (inode->i_state & I_DIRTY) {
414 /*
415 * Filesystems can dirty the inode during writeback operations,
416 * such as delayed allocation during submission or metadata
417 * updates after data IO completion.
418 */
419 redirty_tail(inode, wb);
420 } else {
421 /* The inode is clean. Remove from writeback lists. */
422 list_del_init(&inode->i_wb_list);
423 }
424}
425
01c03194 426/*
4f8ad655
JK
427 * Write out an inode and its dirty pages. Do not update the writeback list
428 * linkage. That is left to the caller. The caller is also responsible for
429 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1da177e4
LT
430 */
431static int
cd8ed2a4 432__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4 433{
1da177e4 434 struct address_space *mapping = inode->i_mapping;
251d6a47 435 long nr_to_write = wbc->nr_to_write;
01c03194 436 unsigned dirty;
1da177e4
LT
437 int ret;
438
4f8ad655 439 WARN_ON(!(inode->i_state & I_SYNC));
1da177e4 440
9fb0a7da
TH
441 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
442
1da177e4
LT
443 ret = do_writepages(mapping, wbc);
444
26821ed4
CH
445 /*
446 * Make sure to wait on the data before writing out the metadata.
447 * This is important for filesystems that modify metadata on data
448 * I/O completion.
449 */
a9185b41 450 if (wbc->sync_mode == WB_SYNC_ALL) {
26821ed4 451 int err = filemap_fdatawait(mapping);
1da177e4
LT
452 if (ret == 0)
453 ret = err;
454 }
455
5547e8aa
DM
456 /*
457 * Some filesystems may redirty the inode during the writeback
458 * due to delalloc, clear dirty metadata flags right before
459 * write_inode()
460 */
250df6ed 461 spin_lock(&inode->i_lock);
6290be1c
JK
462 /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
463 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
464 inode->i_state &= ~I_DIRTY_PAGES;
5547e8aa
DM
465 dirty = inode->i_state & I_DIRTY;
466 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
250df6ed 467 spin_unlock(&inode->i_lock);
26821ed4
CH
468 /* Don't write the inode if only I_DIRTY_PAGES was set */
469 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
a9185b41 470 int err = write_inode(inode, wbc);
1da177e4
LT
471 if (ret == 0)
472 ret = err;
473 }
4f8ad655
JK
474 trace_writeback_single_inode(inode, wbc, nr_to_write);
475 return ret;
476}
477
478/*
479 * Write out an inode's dirty pages. Either the caller has an active reference
480 * on the inode or the inode has I_WILL_FREE set.
481 *
482 * This function is designed to be called for writing back one inode which
483 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
484 * and does more profound writeback list handling in writeback_sb_inodes().
485 */
486static int
487writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
488 struct writeback_control *wbc)
489{
490 int ret = 0;
491
492 spin_lock(&inode->i_lock);
493 if (!atomic_read(&inode->i_count))
494 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
495 else
496 WARN_ON(inode->i_state & I_WILL_FREE);
497
498 if (inode->i_state & I_SYNC) {
499 if (wbc->sync_mode != WB_SYNC_ALL)
500 goto out;
501 /*
169ebd90
JK
502 * It's a data-integrity sync. We must wait. Since callers hold
503 * inode reference or inode has I_WILL_FREE set, it cannot go
504 * away under us.
4f8ad655 505 */
169ebd90 506 __inode_wait_for_writeback(inode);
4f8ad655
JK
507 }
508 WARN_ON(inode->i_state & I_SYNC);
509 /*
510 * Skip inode if it is clean. We don't want to mess with writeback
511 * lists in this function since flusher thread may be doing for example
512 * sync in parallel and if we move the inode, it could get skipped. So
513 * here we make sure inode is on some writeback list and leave it there
514 * unless we have completely cleaned the inode.
515 */
516 if (!(inode->i_state & I_DIRTY))
517 goto out;
518 inode->i_state |= I_SYNC;
519 spin_unlock(&inode->i_lock);
520
cd8ed2a4 521 ret = __writeback_single_inode(inode, wbc);
1da177e4 522
f758eeab 523 spin_lock(&wb->list_lock);
250df6ed 524 spin_lock(&inode->i_lock);
4f8ad655
JK
525 /*
526 * If inode is clean, remove it from writeback lists. Otherwise don't
527 * touch it. See comment above for explanation.
528 */
529 if (!(inode->i_state & I_DIRTY))
530 list_del_init(&inode->i_wb_list);
531 spin_unlock(&wb->list_lock);
1c0eeaf5 532 inode_sync_complete(inode);
4f8ad655
JK
533out:
534 spin_unlock(&inode->i_lock);
1da177e4
LT
535 return ret;
536}
537
1a12d8bd
WF
538static long writeback_chunk_size(struct backing_dev_info *bdi,
539 struct wb_writeback_work *work)
d46db3d5
WF
540{
541 long pages;
542
543 /*
544 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
545 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
546 * here avoids calling into writeback_inodes_wb() more than once.
547 *
548 * The intended call sequence for WB_SYNC_ALL writeback is:
549 *
550 * wb_writeback()
551 * writeback_sb_inodes() <== called only once
552 * write_cache_pages() <== called once for each inode
553 * (quickly) tag currently dirty pages
554 * (maybe slowly) sync all tagged pages
555 */
556 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
557 pages = LONG_MAX;
1a12d8bd
WF
558 else {
559 pages = min(bdi->avg_write_bandwidth / 2,
560 global_dirty_limit / DIRTY_SCOPE);
561 pages = min(pages, work->nr_pages);
562 pages = round_down(pages + MIN_WRITEBACK_PAGES,
563 MIN_WRITEBACK_PAGES);
564 }
d46db3d5
WF
565
566 return pages;
567}
568
f11c9c5c
ES
569/*
570 * Write a portion of b_io inodes which belong to @sb.
edadfb10 571 *
d46db3d5 572 * Return the number of pages and/or inodes written.
f11c9c5c 573 */
d46db3d5
WF
574static long writeback_sb_inodes(struct super_block *sb,
575 struct bdi_writeback *wb,
576 struct wb_writeback_work *work)
1da177e4 577{
d46db3d5
WF
578 struct writeback_control wbc = {
579 .sync_mode = work->sync_mode,
580 .tagged_writepages = work->tagged_writepages,
581 .for_kupdate = work->for_kupdate,
582 .for_background = work->for_background,
583 .range_cyclic = work->range_cyclic,
584 .range_start = 0,
585 .range_end = LLONG_MAX,
586 };
587 unsigned long start_time = jiffies;
588 long write_chunk;
589 long wrote = 0; /* count both pages and inodes */
590
03ba3782 591 while (!list_empty(&wb->b_io)) {
7ccf19a8 592 struct inode *inode = wb_inode(wb->b_io.prev);
edadfb10
CH
593
594 if (inode->i_sb != sb) {
d46db3d5 595 if (work->sb) {
edadfb10
CH
596 /*
597 * We only want to write back data for this
598 * superblock, move all inodes not belonging
599 * to it back onto the dirty list.
600 */
f758eeab 601 redirty_tail(inode, wb);
edadfb10
CH
602 continue;
603 }
604
605 /*
606 * The inode belongs to a different superblock.
607 * Bounce back to the caller to unpin this and
608 * pin the next superblock.
609 */
d46db3d5 610 break;
edadfb10
CH
611 }
612
9843b76a 613 /*
331cbdee
WL
614 * Don't bother with new inodes or inodes being freed, first
615 * kind does not need periodic writeout yet, and for the latter
9843b76a
CH
616 * kind writeout is handled by the freer.
617 */
250df6ed 618 spin_lock(&inode->i_lock);
9843b76a 619 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
250df6ed 620 spin_unlock(&inode->i_lock);
fcc5c222 621 redirty_tail(inode, wb);
7ef0d737
NP
622 continue;
623 }
cc1676d9
JK
624 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
625 /*
626 * If this inode is locked for writeback and we are not
627 * doing writeback-for-data-integrity, move it to
628 * b_more_io so that writeback can proceed with the
629 * other inodes on s_io.
630 *
631 * We'll have another go at writing back this inode
632 * when we completed a full scan of b_io.
633 */
634 spin_unlock(&inode->i_lock);
635 requeue_io(inode, wb);
636 trace_writeback_sb_inodes_requeue(inode);
637 continue;
638 }
f0d07b7f
JK
639 spin_unlock(&wb->list_lock);
640
4f8ad655
JK
641 /*
642 * We already requeued the inode if it had I_SYNC set and we
643 * are doing WB_SYNC_NONE writeback. So this catches only the
644 * WB_SYNC_ALL case.
645 */
169ebd90
JK
646 if (inode->i_state & I_SYNC) {
647 /* Wait for I_SYNC. This function drops i_lock... */
648 inode_sleep_on_writeback(inode);
649 /* Inode may be gone, start again */
ead188f9 650 spin_lock(&wb->list_lock);
169ebd90
JK
651 continue;
652 }
4f8ad655
JK
653 inode->i_state |= I_SYNC;
654 spin_unlock(&inode->i_lock);
169ebd90 655
1a12d8bd 656 write_chunk = writeback_chunk_size(wb->bdi, work);
d46db3d5
WF
657 wbc.nr_to_write = write_chunk;
658 wbc.pages_skipped = 0;
250df6ed 659
169ebd90
JK
660 /*
661 * We use I_SYNC to pin the inode in memory. While it is set
662 * evict_inode() will wait so the inode cannot be freed.
663 */
cd8ed2a4 664 __writeback_single_inode(inode, &wbc);
250df6ed 665
d46db3d5
WF
666 work->nr_pages -= write_chunk - wbc.nr_to_write;
667 wrote += write_chunk - wbc.nr_to_write;
4f8ad655
JK
668 spin_lock(&wb->list_lock);
669 spin_lock(&inode->i_lock);
d46db3d5
WF
670 if (!(inode->i_state & I_DIRTY))
671 wrote++;
4f8ad655
JK
672 requeue_inode(inode, wb, &wbc);
673 inode_sync_complete(inode);
0f1b1fd8 674 spin_unlock(&inode->i_lock);
169ebd90 675 cond_resched_lock(&wb->list_lock);
d46db3d5
WF
676 /*
677 * bail out to wb_writeback() often enough to check
678 * background threshold and other termination conditions.
679 */
680 if (wrote) {
681 if (time_is_before_jiffies(start_time + HZ / 10UL))
682 break;
683 if (work->nr_pages <= 0)
684 break;
8bc3be27 685 }
1da177e4 686 }
d46db3d5 687 return wrote;
f11c9c5c
ES
688}
689
d46db3d5
WF
690static long __writeback_inodes_wb(struct bdi_writeback *wb,
691 struct wb_writeback_work *work)
f11c9c5c 692{
d46db3d5
WF
693 unsigned long start_time = jiffies;
694 long wrote = 0;
38f21977 695
f11c9c5c 696 while (!list_empty(&wb->b_io)) {
7ccf19a8 697 struct inode *inode = wb_inode(wb->b_io.prev);
f11c9c5c 698 struct super_block *sb = inode->i_sb;
9ecc2738 699
12ad3ab6 700 if (!grab_super_passive(sb)) {
0e995816
WF
701 /*
702 * grab_super_passive() may fail consistently due to
703 * s_umount being grabbed by someone else. Don't use
704 * requeue_io() to avoid busy retrying the inode/sb.
705 */
706 redirty_tail(inode, wb);
edadfb10 707 continue;
f11c9c5c 708 }
d46db3d5 709 wrote += writeback_sb_inodes(sb, wb, work);
edadfb10 710 drop_super(sb);
f11c9c5c 711
d46db3d5
WF
712 /* refer to the same tests at the end of writeback_sb_inodes */
713 if (wrote) {
714 if (time_is_before_jiffies(start_time + HZ / 10UL))
715 break;
716 if (work->nr_pages <= 0)
717 break;
718 }
f11c9c5c 719 }
66f3b8e2 720 /* Leave any unwritten inodes on b_io */
d46db3d5 721 return wrote;
66f3b8e2
JA
722}
723
0e175a18
CW
724long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
725 enum wb_reason reason)
edadfb10 726{
d46db3d5
WF
727 struct wb_writeback_work work = {
728 .nr_pages = nr_pages,
729 .sync_mode = WB_SYNC_NONE,
730 .range_cyclic = 1,
0e175a18 731 .reason = reason,
d46db3d5 732 };
edadfb10 733
f758eeab 734 spin_lock(&wb->list_lock);
424b351f 735 if (list_empty(&wb->b_io))
ad4e38dd 736 queue_io(wb, &work);
d46db3d5 737 __writeback_inodes_wb(wb, &work);
f758eeab 738 spin_unlock(&wb->list_lock);
edadfb10 739
d46db3d5
WF
740 return nr_pages - work.nr_pages;
741}
03ba3782 742
b00949aa 743static bool over_bground_thresh(struct backing_dev_info *bdi)
03ba3782
JA
744{
745 unsigned long background_thresh, dirty_thresh;
746
16c4042f 747 global_dirty_limits(&background_thresh, &dirty_thresh);
03ba3782 748
b00949aa
WF
749 if (global_page_state(NR_FILE_DIRTY) +
750 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
751 return true;
752
753 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
754 bdi_dirty_limit(bdi, background_thresh))
755 return true;
756
757 return false;
03ba3782
JA
758}
759
e98be2d5
WF
760/*
761 * Called under wb->list_lock. If there are multiple wb per bdi,
762 * only the flusher working on the first wb should do it.
763 */
764static void wb_update_bandwidth(struct bdi_writeback *wb,
765 unsigned long start_time)
766{
af6a3113 767 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
e98be2d5
WF
768}
769
03ba3782
JA
770/*
771 * Explicit flushing or periodic writeback of "old" data.
66f3b8e2 772 *
03ba3782
JA
773 * Define "old": the first time one of an inode's pages is dirtied, we mark the
774 * dirtying-time in the inode's address_space. So this periodic writeback code
775 * just walks the superblock inode list, writing back any inodes which are
776 * older than a specific point in time.
66f3b8e2 777 *
03ba3782
JA
778 * Try to run once per dirty_writeback_interval. But if a writeback event
779 * takes longer than a dirty_writeback_interval interval, then leave a
780 * one-second gap.
66f3b8e2 781 *
03ba3782
JA
782 * older_than_this takes precedence over nr_to_write. So we'll only write back
783 * all dirty pages if they are all attached to "old" mappings.
66f3b8e2 784 */
c4a77a6c 785static long wb_writeback(struct bdi_writeback *wb,
83ba7b07 786 struct wb_writeback_work *work)
66f3b8e2 787{
e98be2d5 788 unsigned long wb_start = jiffies;
d46db3d5 789 long nr_pages = work->nr_pages;
03ba3782 790 unsigned long oldest_jif;
a5989bdc 791 struct inode *inode;
d46db3d5 792 long progress;
66f3b8e2 793
e185dda8 794 oldest_jif = jiffies;
d46db3d5 795 work->older_than_this = &oldest_jif;
38f21977 796
e8dfc305 797 spin_lock(&wb->list_lock);
03ba3782
JA
798 for (;;) {
799 /*
d3ddec76 800 * Stop writeback when nr_pages has been consumed
03ba3782 801 */
83ba7b07 802 if (work->nr_pages <= 0)
03ba3782 803 break;
66f3b8e2 804
aa373cf5
JK
805 /*
806 * Background writeout and kupdate-style writeback may
807 * run forever. Stop them if there is other work to do
808 * so that e.g. sync can proceed. They'll be restarted
809 * after the other works are all done.
810 */
811 if ((work->for_background || work->for_kupdate) &&
812 !list_empty(&wb->bdi->work_list))
813 break;
814
38f21977 815 /*
d3ddec76
WF
816 * For background writeout, stop when we are below the
817 * background dirty threshold
38f21977 818 */
b00949aa 819 if (work->for_background && !over_bground_thresh(wb->bdi))
03ba3782 820 break;
38f21977 821
1bc36b64
JK
822 /*
823 * Kupdate and background works are special and we want to
824 * include all inodes that need writing. Livelock avoidance is
825 * handled by these works yielding to any other work so we are
826 * safe.
827 */
ba9aa839
WF
828 if (work->for_kupdate) {
829 oldest_jif = jiffies -
830 msecs_to_jiffies(dirty_expire_interval * 10);
1bc36b64
JK
831 } else if (work->for_background)
832 oldest_jif = jiffies;
028c2dd1 833
d46db3d5 834 trace_writeback_start(wb->bdi, work);
e8dfc305 835 if (list_empty(&wb->b_io))
ad4e38dd 836 queue_io(wb, work);
83ba7b07 837 if (work->sb)
d46db3d5 838 progress = writeback_sb_inodes(work->sb, wb, work);
edadfb10 839 else
d46db3d5
WF
840 progress = __writeback_inodes_wb(wb, work);
841 trace_writeback_written(wb->bdi, work);
028c2dd1 842
e98be2d5 843 wb_update_bandwidth(wb, wb_start);
03ba3782
JA
844
845 /*
e6fb6da2
WF
846 * Did we write something? Try for more
847 *
848 * Dirty inodes are moved to b_io for writeback in batches.
849 * The completion of the current batch does not necessarily
850 * mean the overall work is done. So we keep looping as long
851 * as made some progress on cleaning pages or inodes.
03ba3782 852 */
d46db3d5 853 if (progress)
71fd05a8
JA
854 continue;
855 /*
e6fb6da2 856 * No more inodes for IO, bail
71fd05a8 857 */
b7a2441f 858 if (list_empty(&wb->b_more_io))
03ba3782 859 break;
71fd05a8
JA
860 /*
861 * Nothing written. Wait for some inode to
862 * become available for writeback. Otherwise
863 * we'll just busyloop.
864 */
71fd05a8 865 if (!list_empty(&wb->b_more_io)) {
d46db3d5 866 trace_writeback_wait(wb->bdi, work);
7ccf19a8 867 inode = wb_inode(wb->b_more_io.prev);
250df6ed 868 spin_lock(&inode->i_lock);
f0d07b7f 869 spin_unlock(&wb->list_lock);
169ebd90
JK
870 /* This function drops i_lock... */
871 inode_sleep_on_writeback(inode);
f0d07b7f 872 spin_lock(&wb->list_lock);
03ba3782
JA
873 }
874 }
e8dfc305 875 spin_unlock(&wb->list_lock);
03ba3782 876
d46db3d5 877 return nr_pages - work->nr_pages;
03ba3782
JA
878}
879
880/*
83ba7b07 881 * Return the next wb_writeback_work struct that hasn't been processed yet.
03ba3782 882 */
83ba7b07 883static struct wb_writeback_work *
08852b6d 884get_next_work_item(struct backing_dev_info *bdi)
03ba3782 885{
83ba7b07 886 struct wb_writeback_work *work = NULL;
03ba3782 887
6467716a 888 spin_lock_bh(&bdi->wb_lock);
83ba7b07
CH
889 if (!list_empty(&bdi->work_list)) {
890 work = list_entry(bdi->work_list.next,
891 struct wb_writeback_work, list);
892 list_del_init(&work->list);
03ba3782 893 }
6467716a 894 spin_unlock_bh(&bdi->wb_lock);
83ba7b07 895 return work;
03ba3782
JA
896}
897
cdf01dd5
LT
898/*
899 * Add in the number of potentially dirty inodes, because each inode
900 * write can dirty pagecache in the underlying blockdev.
901 */
902static unsigned long get_nr_dirty_pages(void)
903{
904 return global_page_state(NR_FILE_DIRTY) +
905 global_page_state(NR_UNSTABLE_NFS) +
906 get_nr_dirty_inodes();
907}
908
6585027a
JK
909static long wb_check_background_flush(struct bdi_writeback *wb)
910{
b00949aa 911 if (over_bground_thresh(wb->bdi)) {
6585027a
JK
912
913 struct wb_writeback_work work = {
914 .nr_pages = LONG_MAX,
915 .sync_mode = WB_SYNC_NONE,
916 .for_background = 1,
917 .range_cyclic = 1,
0e175a18 918 .reason = WB_REASON_BACKGROUND,
6585027a
JK
919 };
920
921 return wb_writeback(wb, &work);
922 }
923
924 return 0;
925}
926
03ba3782
JA
927static long wb_check_old_data_flush(struct bdi_writeback *wb)
928{
929 unsigned long expired;
930 long nr_pages;
931
69b62d01
JA
932 /*
933 * When set to zero, disable periodic writeback
934 */
935 if (!dirty_writeback_interval)
936 return 0;
937
03ba3782
JA
938 expired = wb->last_old_flush +
939 msecs_to_jiffies(dirty_writeback_interval * 10);
940 if (time_before(jiffies, expired))
941 return 0;
942
943 wb->last_old_flush = jiffies;
cdf01dd5 944 nr_pages = get_nr_dirty_pages();
03ba3782 945
c4a77a6c 946 if (nr_pages) {
83ba7b07 947 struct wb_writeback_work work = {
c4a77a6c
JA
948 .nr_pages = nr_pages,
949 .sync_mode = WB_SYNC_NONE,
950 .for_kupdate = 1,
951 .range_cyclic = 1,
0e175a18 952 .reason = WB_REASON_PERIODIC,
c4a77a6c
JA
953 };
954
83ba7b07 955 return wb_writeback(wb, &work);
c4a77a6c 956 }
03ba3782
JA
957
958 return 0;
959}
960
961/*
962 * Retrieve work items and do the writeback they describe
963 */
964long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
965{
966 struct backing_dev_info *bdi = wb->bdi;
83ba7b07 967 struct wb_writeback_work *work;
c4a77a6c 968 long wrote = 0;
03ba3782 969
81d73a32 970 set_bit(BDI_writeback_running, &wb->bdi->state);
08852b6d 971 while ((work = get_next_work_item(bdi)) != NULL) {
03ba3782
JA
972 /*
973 * Override sync mode, in case we must wait for completion
83ba7b07 974 * because this thread is exiting now.
03ba3782
JA
975 */
976 if (force_wait)
83ba7b07 977 work->sync_mode = WB_SYNC_ALL;
03ba3782 978
455b2864
DC
979 trace_writeback_exec(bdi, work);
980
83ba7b07 981 wrote += wb_writeback(wb, work);
03ba3782
JA
982
983 /*
83ba7b07
CH
984 * Notify the caller of completion if this is a synchronous
985 * work item, otherwise just free it.
03ba3782 986 */
83ba7b07
CH
987 if (work->done)
988 complete(work->done);
989 else
990 kfree(work);
03ba3782
JA
991 }
992
993 /*
994 * Check for periodic writeback, kupdated() style
995 */
996 wrote += wb_check_old_data_flush(wb);
6585027a 997 wrote += wb_check_background_flush(wb);
81d73a32 998 clear_bit(BDI_writeback_running, &wb->bdi->state);
03ba3782
JA
999
1000 return wrote;
1001}
1002
1003/*
1004 * Handle writeback of dirty data for the device backed by this bdi. Also
839a8e86 1005 * reschedules periodically and does kupdated style flushing.
03ba3782 1006 */
839a8e86 1007void bdi_writeback_workfn(struct work_struct *work)
03ba3782 1008{
839a8e86
TH
1009 struct bdi_writeback *wb = container_of(to_delayed_work(work),
1010 struct bdi_writeback, dwork);
08243900 1011 struct backing_dev_info *bdi = wb->bdi;
03ba3782
JA
1012 long pages_written;
1013
ef3b1019 1014 set_worker_desc("flush-%s", dev_name(bdi->dev));
766f9164 1015 current->flags |= PF_SWAPWRITE;
455b2864 1016
839a8e86
TH
1017 if (likely(!current_is_workqueue_rescuer() ||
1018 list_empty(&bdi->bdi_list))) {
6467716a 1019 /*
839a8e86
TH
1020 * The normal path. Keep writing back @bdi until its
1021 * work_list is empty. Note that this path is also taken
1022 * if @bdi is shutting down even when we're running off the
1023 * rescuer as work_list needs to be drained.
6467716a 1024 */
839a8e86
TH
1025 do {
1026 pages_written = wb_do_writeback(wb, 0);
1027 trace_writeback_pages_written(pages_written);
1028 } while (!list_empty(&bdi->work_list));
1029 } else {
1030 /*
1031 * bdi_wq can't get enough workers and we're running off
1032 * the emergency worker. Don't hog it. Hopefully, 1024 is
1033 * enough for efficient IO.
1034 */
1035 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
1036 WB_REASON_FORKER_THREAD);
455b2864 1037 trace_writeback_pages_written(pages_written);
03ba3782
JA
1038 }
1039
839a8e86
TH
1040 if (!list_empty(&bdi->work_list) ||
1041 (wb_has_dirty_io(wb) && dirty_writeback_interval))
1042 queue_delayed_work(bdi_wq, &wb->dwork,
1043 msecs_to_jiffies(dirty_writeback_interval * 10));
455b2864 1044
839a8e86 1045 current->flags &= ~PF_SWAPWRITE;
03ba3782
JA
1046}
1047
1048/*
b8c2f347
CH
1049 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1050 * the whole world.
03ba3782 1051 */
0e175a18 1052void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
03ba3782 1053{
b8c2f347 1054 struct backing_dev_info *bdi;
03ba3782 1055
83ba7b07
CH
1056 if (!nr_pages) {
1057 nr_pages = global_page_state(NR_FILE_DIRTY) +
b8c2f347
CH
1058 global_page_state(NR_UNSTABLE_NFS);
1059 }
03ba3782 1060
b8c2f347 1061 rcu_read_lock();
cfc4ba53 1062 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
03ba3782
JA
1063 if (!bdi_has_dirty_io(bdi))
1064 continue;
0e175a18 1065 __bdi_start_writeback(bdi, nr_pages, false, reason);
03ba3782 1066 }
cfc4ba53 1067 rcu_read_unlock();
1da177e4
LT
1068}
1069
03ba3782
JA
1070static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1071{
1072 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1073 struct dentry *dentry;
1074 const char *name = "?";
1075
1076 dentry = d_find_alias(inode);
1077 if (dentry) {
1078 spin_lock(&dentry->d_lock);
1079 name = (const char *) dentry->d_name.name;
1080 }
1081 printk(KERN_DEBUG
1082 "%s(%d): dirtied inode %lu (%s) on %s\n",
1083 current->comm, task_pid_nr(current), inode->i_ino,
1084 name, inode->i_sb->s_id);
1085 if (dentry) {
1086 spin_unlock(&dentry->d_lock);
1087 dput(dentry);
1088 }
1089 }
1090}
1091
1092/**
1093 * __mark_inode_dirty - internal function
1094 * @inode: inode to mark
1095 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1096 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1097 * mark_inode_dirty_sync.
1da177e4 1098 *
03ba3782
JA
1099 * Put the inode on the super block's dirty list.
1100 *
1101 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1102 * dirty list only if it is hashed or if it refers to a blockdev.
1103 * If it was not hashed, it will never be added to the dirty list
1104 * even if it is later hashed, as it will have been marked dirty already.
1105 *
1106 * In short, make sure you hash any inodes _before_ you start marking
1107 * them dirty.
1da177e4 1108 *
03ba3782
JA
1109 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1110 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1111 * the kernel-internal blockdev inode represents the dirtying time of the
1112 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1113 * page->mapping->host, so the page-dirtying time is recorded in the internal
1114 * blockdev inode.
1da177e4 1115 */
03ba3782 1116void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4 1117{
03ba3782 1118 struct super_block *sb = inode->i_sb;
253c34e9 1119 struct backing_dev_info *bdi = NULL;
1da177e4 1120
03ba3782
JA
1121 /*
1122 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1123 * dirty the inode itself
1124 */
1125 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
9fb0a7da
TH
1126 trace_writeback_dirty_inode_start(inode, flags);
1127
03ba3782 1128 if (sb->s_op->dirty_inode)
aa385729 1129 sb->s_op->dirty_inode(inode, flags);
9fb0a7da
TH
1130
1131 trace_writeback_dirty_inode(inode, flags);
03ba3782
JA
1132 }
1133
1134 /*
1135 * make sure that changes are seen by all cpus before we test i_state
1136 * -- mikulas
1137 */
1138 smp_mb();
1139
1140 /* avoid the locking if we can */
1141 if ((inode->i_state & flags) == flags)
1142 return;
1143
1144 if (unlikely(block_dump))
1145 block_dump___mark_inode_dirty(inode);
1146
250df6ed 1147 spin_lock(&inode->i_lock);
03ba3782
JA
1148 if ((inode->i_state & flags) != flags) {
1149 const int was_dirty = inode->i_state & I_DIRTY;
1150
1151 inode->i_state |= flags;
1152
1153 /*
1154 * If the inode is being synced, just update its dirty state.
1155 * The unlocker will place the inode on the appropriate
1156 * superblock list, based upon its state.
1157 */
1158 if (inode->i_state & I_SYNC)
250df6ed 1159 goto out_unlock_inode;
03ba3782
JA
1160
1161 /*
1162 * Only add valid (hashed) inodes to the superblock's
1163 * dirty list. Add blockdev inodes as well.
1164 */
1165 if (!S_ISBLK(inode->i_mode)) {
1d3382cb 1166 if (inode_unhashed(inode))
250df6ed 1167 goto out_unlock_inode;
03ba3782 1168 }
a4ffdde6 1169 if (inode->i_state & I_FREEING)
250df6ed 1170 goto out_unlock_inode;
03ba3782
JA
1171
1172 /*
1173 * If the inode was already on b_dirty/b_io/b_more_io, don't
1174 * reposition it (that would break b_dirty time-ordering).
1175 */
1176 if (!was_dirty) {
a66979ab 1177 bool wakeup_bdi = false;
253c34e9
AB
1178 bdi = inode_to_bdi(inode);
1179
1180 if (bdi_cap_writeback_dirty(bdi)) {
1181 WARN(!test_bit(BDI_registered, &bdi->state),
1182 "bdi-%s not registered\n", bdi->name);
1183
1184 /*
1185 * If this is the first dirty inode for this
1186 * bdi, we have to wake-up the corresponding
1187 * bdi thread to make sure background
1188 * write-back happens later.
1189 */
1190 if (!wb_has_dirty_io(&bdi->wb))
1191 wakeup_bdi = true;
500b067c 1192 }
03ba3782 1193
a66979ab 1194 spin_unlock(&inode->i_lock);
f758eeab 1195 spin_lock(&bdi->wb.list_lock);
03ba3782 1196 inode->dirtied_when = jiffies;
7ccf19a8 1197 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
f758eeab 1198 spin_unlock(&bdi->wb.list_lock);
a66979ab
DC
1199
1200 if (wakeup_bdi)
1201 bdi_wakeup_thread_delayed(bdi);
1202 return;
1da177e4 1203 }
1da177e4 1204 }
250df6ed
DC
1205out_unlock_inode:
1206 spin_unlock(&inode->i_lock);
253c34e9 1207
03ba3782
JA
1208}
1209EXPORT_SYMBOL(__mark_inode_dirty);
1210
b6e51316 1211static void wait_sb_inodes(struct super_block *sb)
03ba3782
JA
1212{
1213 struct inode *inode, *old_inode = NULL;
1214
1215 /*
1216 * We need to be protected against the filesystem going from
1217 * r/o to r/w or vice versa.
1218 */
b6e51316 1219 WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782 1220
55fa6091 1221 spin_lock(&inode_sb_list_lock);
03ba3782
JA
1222
1223 /*
1224 * Data integrity sync. Must wait for all pages under writeback,
1225 * because there may have been pages dirtied before our sync
1226 * call, but which had writeout started before we write it out.
1227 * In which case, the inode may not be on the dirty list, but
1228 * we still have to wait for that writeout.
1229 */
b6e51316 1230 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
250df6ed 1231 struct address_space *mapping = inode->i_mapping;
03ba3782 1232
250df6ed
DC
1233 spin_lock(&inode->i_lock);
1234 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1235 (mapping->nrpages == 0)) {
1236 spin_unlock(&inode->i_lock);
03ba3782 1237 continue;
250df6ed 1238 }
03ba3782 1239 __iget(inode);
250df6ed 1240 spin_unlock(&inode->i_lock);
55fa6091
DC
1241 spin_unlock(&inode_sb_list_lock);
1242
03ba3782 1243 /*
55fa6091
DC
1244 * We hold a reference to 'inode' so it couldn't have been
1245 * removed from s_inodes list while we dropped the
1246 * inode_sb_list_lock. We cannot iput the inode now as we can
1247 * be holding the last reference and we cannot iput it under
1248 * inode_sb_list_lock. So we keep the reference and iput it
1249 * later.
03ba3782
JA
1250 */
1251 iput(old_inode);
1252 old_inode = inode;
1253
1254 filemap_fdatawait(mapping);
1255
1256 cond_resched();
1257
55fa6091 1258 spin_lock(&inode_sb_list_lock);
03ba3782 1259 }
55fa6091 1260 spin_unlock(&inode_sb_list_lock);
03ba3782 1261 iput(old_inode);
1da177e4
LT
1262}
1263
d8a8559c 1264/**
3259f8be 1265 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
d8a8559c 1266 * @sb: the superblock
3259f8be 1267 * @nr: the number of pages to write
786228ab 1268 * @reason: reason why some writeback work initiated
1da177e4 1269 *
d8a8559c
JA
1270 * Start writeback on some inodes on this super_block. No guarantees are made
1271 * on how many (if any) will be written, and this function does not wait
3259f8be 1272 * for IO completion of submitted IO.
1da177e4 1273 */
0e175a18
CW
1274void writeback_inodes_sb_nr(struct super_block *sb,
1275 unsigned long nr,
1276 enum wb_reason reason)
1da177e4 1277{
83ba7b07
CH
1278 DECLARE_COMPLETION_ONSTACK(done);
1279 struct wb_writeback_work work = {
6e6938b6
WF
1280 .sb = sb,
1281 .sync_mode = WB_SYNC_NONE,
1282 .tagged_writepages = 1,
1283 .done = &done,
1284 .nr_pages = nr,
0e175a18 1285 .reason = reason,
3c4d7165 1286 };
d8a8559c 1287
6eedc701
JK
1288 if (sb->s_bdi == &noop_backing_dev_info)
1289 return;
cf37e972 1290 WARN_ON(!rwsem_is_locked(&sb->s_umount));
83ba7b07
CH
1291 bdi_queue_work(sb->s_bdi, &work);
1292 wait_for_completion(&done);
e913fc82 1293}
3259f8be
CM
1294EXPORT_SYMBOL(writeback_inodes_sb_nr);
1295
1296/**
1297 * writeback_inodes_sb - writeback dirty inodes from given super_block
1298 * @sb: the superblock
786228ab 1299 * @reason: reason why some writeback work was initiated
3259f8be
CM
1300 *
1301 * Start writeback on some inodes on this super_block. No guarantees are made
1302 * on how many (if any) will be written, and this function does not wait
1303 * for IO completion of submitted IO.
1304 */
0e175a18 1305void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1306{
0e175a18 1307 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1308}
0e3c9a22 1309EXPORT_SYMBOL(writeback_inodes_sb);
e913fc82 1310
17bd55d0 1311/**
10ee27a0 1312 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
17bd55d0 1313 * @sb: the superblock
10ee27a0
MX
1314 * @nr: the number of pages to write
1315 * @reason: the reason of writeback
17bd55d0 1316 *
10ee27a0 1317 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
17bd55d0
ES
1318 * Returns 1 if writeback was started, 0 if not.
1319 */
10ee27a0
MX
1320int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1321 unsigned long nr,
1322 enum wb_reason reason)
17bd55d0 1323{
10ee27a0 1324 if (writeback_in_progress(sb->s_bdi))
17bd55d0 1325 return 1;
10ee27a0
MX
1326
1327 if (!down_read_trylock(&sb->s_umount))
17bd55d0 1328 return 0;
10ee27a0
MX
1329
1330 writeback_inodes_sb_nr(sb, nr, reason);
1331 up_read(&sb->s_umount);
1332 return 1;
17bd55d0 1333}
10ee27a0 1334EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
17bd55d0 1335
3259f8be 1336/**
10ee27a0 1337 * try_to_writeback_inodes_sb - try to start writeback if none underway
3259f8be 1338 * @sb: the superblock
786228ab 1339 * @reason: reason why some writeback work was initiated
3259f8be 1340 *
10ee27a0 1341 * Implement by try_to_writeback_inodes_sb_nr()
3259f8be
CM
1342 * Returns 1 if writeback was started, 0 if not.
1343 */
10ee27a0 1344int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1345{
10ee27a0 1346 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1347}
10ee27a0 1348EXPORT_SYMBOL(try_to_writeback_inodes_sb);
3259f8be 1349
d8a8559c
JA
1350/**
1351 * sync_inodes_sb - sync sb inode pages
1352 * @sb: the superblock
1353 *
1354 * This function writes and waits on any dirty inode belonging to this
cb9ef8d5 1355 * super_block.
d8a8559c 1356 */
b6e51316 1357void sync_inodes_sb(struct super_block *sb)
d8a8559c 1358{
83ba7b07
CH
1359 DECLARE_COMPLETION_ONSTACK(done);
1360 struct wb_writeback_work work = {
3c4d7165
CH
1361 .sb = sb,
1362 .sync_mode = WB_SYNC_ALL,
1363 .nr_pages = LONG_MAX,
1364 .range_cyclic = 0,
83ba7b07 1365 .done = &done,
0e175a18 1366 .reason = WB_REASON_SYNC,
3c4d7165
CH
1367 };
1368
6eedc701
JK
1369 /* Nothing to do? */
1370 if (sb->s_bdi == &noop_backing_dev_info)
1371 return;
cf37e972
CH
1372 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1373
83ba7b07
CH
1374 bdi_queue_work(sb->s_bdi, &work);
1375 wait_for_completion(&done);
1376
b6e51316 1377 wait_sb_inodes(sb);
1da177e4 1378}
d8a8559c 1379EXPORT_SYMBOL(sync_inodes_sb);
1da177e4 1380
1da177e4 1381/**
7f04c26d
AA
1382 * write_inode_now - write an inode to disk
1383 * @inode: inode to write to disk
1384 * @sync: whether the write should be synchronous or not
1385 *
1386 * This function commits an inode to disk immediately if it is dirty. This is
1387 * primarily needed by knfsd.
1da177e4 1388 *
7f04c26d 1389 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 1390 */
1da177e4
LT
1391int write_inode_now(struct inode *inode, int sync)
1392{
f758eeab 1393 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1da177e4
LT
1394 struct writeback_control wbc = {
1395 .nr_to_write = LONG_MAX,
18914b18 1396 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e
OH
1397 .range_start = 0,
1398 .range_end = LLONG_MAX,
1da177e4
LT
1399 };
1400
1401 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 1402 wbc.nr_to_write = 0;
1da177e4
LT
1403
1404 might_sleep();
4f8ad655 1405 return writeback_single_inode(inode, wb, &wbc);
1da177e4
LT
1406}
1407EXPORT_SYMBOL(write_inode_now);
1408
1409/**
1410 * sync_inode - write an inode and its pages to disk.
1411 * @inode: the inode to sync
1412 * @wbc: controls the writeback mode
1413 *
1414 * sync_inode() will write an inode and its pages to disk. It will also
1415 * correctly update the inode on its superblock's dirty inode lists and will
1416 * update inode->i_state.
1417 *
1418 * The caller must have a ref on the inode.
1419 */
1420int sync_inode(struct inode *inode, struct writeback_control *wbc)
1421{
4f8ad655 1422 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1da177e4
LT
1423}
1424EXPORT_SYMBOL(sync_inode);
c3765016
CH
1425
1426/**
c691b9d9 1427 * sync_inode_metadata - write an inode to disk
c3765016
CH
1428 * @inode: the inode to sync
1429 * @wait: wait for I/O to complete.
1430 *
c691b9d9 1431 * Write an inode to disk and adjust its dirty state after completion.
c3765016
CH
1432 *
1433 * Note: only writes the actual inode, no associated data or other metadata.
1434 */
1435int sync_inode_metadata(struct inode *inode, int wait)
1436{
1437 struct writeback_control wbc = {
1438 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1439 .nr_to_write = 0, /* metadata-only */
1440 };
1441
1442 return sync_inode(inode, &wbc);
1443}
1444EXPORT_SYMBOL(sync_inode_metadata);