]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/f2fs/data.c
f2fs: add F2FS_GOING_DOWN_METAFLUSH to test power-failure
[mirror_ubuntu-bionic-kernel.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
f1e88660 22#include <linux/cleancache.h>
eb47b800
JK
23
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
db9f7c1a 27#include "trace.h"
848753aa 28#include <trace/events/f2fs.h>
eb47b800 29
4246a0b6 30static void f2fs_read_end_io(struct bio *bio)
93dfe2ac 31{
f568849e
LT
32 struct bio_vec *bvec;
33 int i;
93dfe2ac 34
4375a336 35 if (f2fs_bio_encrypted(bio)) {
4246a0b6 36 if (bio->bi_error) {
4375a336
JK
37 f2fs_release_crypto_ctx(bio->bi_private);
38 } else {
39 f2fs_end_io_crypto_work(bio->bi_private, bio);
40 return;
41 }
42 }
43
12377024
CY
44 bio_for_each_segment_all(bvec, bio, i) {
45 struct page *page = bvec->bv_page;
f1e88660 46
4246a0b6 47 if (!bio->bi_error) {
f1e88660
JK
48 SetPageUptodate(page);
49 } else {
50 ClearPageUptodate(page);
51 SetPageError(page);
52 }
53 unlock_page(page);
54 }
f1e88660
JK
55 bio_put(bio);
56}
57
4246a0b6 58static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 59{
1b1f559f 60 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
61 struct bio_vec *bvec;
62 int i;
93dfe2ac 63
f568849e 64 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
65 struct page *page = bvec->bv_page;
66
4375a336
JK
67 f2fs_restore_and_release_control_page(&page);
68
4246a0b6 69 if (unlikely(bio->bi_error)) {
cf779cab 70 set_page_dirty(page);
93dfe2ac 71 set_bit(AS_EIO, &page->mapping->flags);
744602cf 72 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
73 }
74 end_page_writeback(page);
75 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 76 }
93dfe2ac 77
93dfe2ac
JK
78 if (!get_pages(sbi, F2FS_WRITEBACK) &&
79 !list_empty(&sbi->cp_wait.task_list))
80 wake_up(&sbi->cp_wait);
81
82 bio_put(bio);
83}
84
940a6d34
GZ
85/*
86 * Low-level block read/write IO operations.
87 */
88static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
89 int npages, bool is_read)
90{
91 struct bio *bio;
92
740432f8 93 bio = f2fs_bio_alloc(npages);
940a6d34
GZ
94
95 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 96 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 97 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 98 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
99
100 return bio;
101}
102
458e6197 103static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 104{
458e6197 105 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
106
107 if (!io->bio)
108 return;
109
6a8f8ca5 110 if (is_read_io(fio->rw))
2ace38e0 111 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 112 else
2ace38e0 113 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 114
6a8f8ca5 115 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
116 io->bio = NULL;
117}
118
119void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 120 enum page_type type, int rw)
93dfe2ac
JK
121{
122 enum page_type btype = PAGE_TYPE_OF_BIO(type);
123 struct f2fs_bio_info *io;
124
125 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
126
df0f8dc0 127 down_write(&io->io_rwsem);
458e6197
JK
128
129 /* change META to META_FLUSH in the checkpoint procedure */
130 if (type >= META_FLUSH) {
131 io->fio.type = META_FLUSH;
0f7b2abd
JK
132 if (test_opt(sbi, NOBARRIER))
133 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
134 else
135 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
136 }
137 __submit_merged_bio(io);
df0f8dc0 138 up_write(&io->io_rwsem);
93dfe2ac
JK
139}
140
141/*
142 * Fill the locked page with data located in the block address.
143 * Return unlocked page.
144 */
05ca3632 145int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 146{
93dfe2ac 147 struct bio *bio;
4375a336 148 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
93dfe2ac 149
2ace38e0 150 trace_f2fs_submit_page_bio(page, fio);
05ca3632 151 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
152
153 /* Allocate a new bio */
05ca3632 154 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
155
156 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
157 bio_put(bio);
93dfe2ac
JK
158 return -EFAULT;
159 }
160
cf04e8eb 161 submit_bio(fio->rw, bio);
93dfe2ac
JK
162 return 0;
163}
164
05ca3632 165void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac 166{
05ca3632 167 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 168 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 169 struct f2fs_bio_info *io;
940a6d34 170 bool is_read = is_read_io(fio->rw);
4375a336 171 struct page *bio_page;
93dfe2ac 172
940a6d34 173 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 174
cf04e8eb 175 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 176
df0f8dc0 177 down_write(&io->io_rwsem);
93dfe2ac 178
940a6d34 179 if (!is_read)
93dfe2ac
JK
180 inc_page_count(sbi, F2FS_WRITEBACK);
181
cf04e8eb 182 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
183 io->fio.rw != fio->rw))
184 __submit_merged_bio(io);
93dfe2ac
JK
185alloc_new:
186 if (io->bio == NULL) {
90a893c7 187 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 188
cf04e8eb 189 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 190 io->fio = *fio;
93dfe2ac
JK
191 }
192
4375a336
JK
193 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
194
195 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
93dfe2ac 196 PAGE_CACHE_SIZE) {
458e6197 197 __submit_merged_bio(io);
93dfe2ac
JK
198 goto alloc_new;
199 }
200
cf04e8eb 201 io->last_block_in_bio = fio->blk_addr;
05ca3632 202 f2fs_trace_ios(fio, 0);
93dfe2ac 203
df0f8dc0 204 up_write(&io->io_rwsem);
05ca3632 205 trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac
JK
206}
207
0a8165d7 208/*
eb47b800
JK
209 * Lock ordering for the change of data block address:
210 * ->data_page
211 * ->node_page
212 * update block addresses in the node page
213 */
216a620a 214void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800
JK
215{
216 struct f2fs_node *rn;
217 __le32 *addr_array;
218 struct page *node_page = dn->node_page;
219 unsigned int ofs_in_node = dn->ofs_in_node;
220
5514f0aa 221 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 222
45590710 223 rn = F2FS_NODE(node_page);
eb47b800
JK
224
225 /* Get physical address of data block */
226 addr_array = blkaddr_in_node(rn);
e1509cf2 227 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
eb47b800
JK
228 set_page_dirty(node_page);
229}
230
231int reserve_new_block(struct dnode_of_data *dn)
232{
4081363f 233 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 234
6bacf52f 235 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 236 return -EPERM;
cfb271d4 237 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
238 return -ENOSPC;
239
c01e2853
NJ
240 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
241
eb47b800 242 dn->data_blkaddr = NEW_ADDR;
216a620a 243 set_data_blkaddr(dn);
a18ff063 244 mark_inode_dirty(dn->inode);
eb47b800
JK
245 sync_inode_page(dn);
246 return 0;
247}
248
b600965c
HL
249int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
250{
251 bool need_put = dn->inode_page ? false : true;
252 int err;
253
254 err = get_dnode_of_data(dn, index, ALLOC_NODE);
255 if (err)
256 return err;
a8865372 257
b600965c
HL
258 if (dn->data_blkaddr == NULL_ADDR)
259 err = reserve_new_block(dn);
a8865372 260 if (err || need_put)
b600965c
HL
261 f2fs_put_dnode(dn);
262 return err;
263}
264
759af1c9 265int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 266{
028a41e8 267 struct extent_info ei;
759af1c9 268 struct inode *inode = dn->inode;
028a41e8 269
759af1c9
FL
270 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
271 dn->data_blkaddr = ei.blk + index - ei.fofs;
272 return 0;
429511cd 273 }
028a41e8 274
759af1c9 275 return f2fs_reserve_block(dn, index);
eb47b800
JK
276}
277
43f3eae1 278struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
eb47b800 279{
eb47b800
JK
280 struct address_space *mapping = inode->i_mapping;
281 struct dnode_of_data dn;
282 struct page *page;
cb3bc9ee 283 struct extent_info ei;
eb47b800 284 int err;
cf04e8eb 285 struct f2fs_io_info fio = {
05ca3632 286 .sbi = F2FS_I_SB(inode),
cf04e8eb 287 .type = DATA,
43f3eae1 288 .rw = rw,
4375a336 289 .encrypted_page = NULL,
cf04e8eb 290 };
eb47b800 291
4375a336
JK
292 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
293 return read_mapping_page(mapping, index, NULL);
294
9ac1349a 295 page = grab_cache_page(mapping, index);
650495de
JK
296 if (!page)
297 return ERR_PTR(-ENOMEM);
298
cb3bc9ee
CY
299 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
300 dn.data_blkaddr = ei.blk + index - ei.fofs;
301 goto got_it;
302 }
303
eb47b800 304 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 305 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b
JK
306 if (err)
307 goto put_err;
eb47b800
JK
308 f2fs_put_dnode(&dn);
309
6bacf52f 310 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b
JK
311 err = -ENOENT;
312 goto put_err;
650495de 313 }
cb3bc9ee 314got_it:
43f3eae1
JK
315 if (PageUptodate(page)) {
316 unlock_page(page);
eb47b800 317 return page;
43f3eae1 318 }
eb47b800 319
d59ff4df
JK
320 /*
321 * A new dentry page is allocated but not able to be written, since its
322 * new inode page couldn't be allocated due to -ENOSPC.
323 * In such the case, its blkaddr can be remained as NEW_ADDR.
324 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
325 */
326 if (dn.data_blkaddr == NEW_ADDR) {
327 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
328 SetPageUptodate(page);
43f3eae1 329 unlock_page(page);
d59ff4df
JK
330 return page;
331 }
eb47b800 332
cf04e8eb 333 fio.blk_addr = dn.data_blkaddr;
05ca3632
JK
334 fio.page = page;
335 err = f2fs_submit_page_bio(&fio);
393ff91f 336 if (err)
86531d6b 337 goto put_err;
43f3eae1 338 return page;
86531d6b
JK
339
340put_err:
341 f2fs_put_page(page, 1);
342 return ERR_PTR(err);
43f3eae1
JK
343}
344
345struct page *find_data_page(struct inode *inode, pgoff_t index)
346{
347 struct address_space *mapping = inode->i_mapping;
348 struct page *page;
349
350 page = find_get_page(mapping, index);
351 if (page && PageUptodate(page))
352 return page;
353 f2fs_put_page(page, 0);
354
355 page = get_read_data_page(inode, index, READ_SYNC);
356 if (IS_ERR(page))
357 return page;
358
359 if (PageUptodate(page))
360 return page;
361
362 wait_on_page_locked(page);
363 if (unlikely(!PageUptodate(page))) {
364 f2fs_put_page(page, 0);
365 return ERR_PTR(-EIO);
366 }
367 return page;
368}
369
370/*
371 * If it tries to access a hole, return an error.
372 * Because, the callers, functions in dir.c and GC, should be able to know
373 * whether this page exists or not.
374 */
375struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
376{
377 struct address_space *mapping = inode->i_mapping;
378 struct page *page;
379repeat:
380 page = get_read_data_page(inode, index, READ_SYNC);
381 if (IS_ERR(page))
382 return page;
393ff91f 383
43f3eae1 384 /* wait for read completion */
393ff91f 385 lock_page(page);
6bacf52f 386 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
387 f2fs_put_page(page, 1);
388 return ERR_PTR(-EIO);
eb47b800 389 }
6bacf52f 390 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
391 f2fs_put_page(page, 1);
392 goto repeat;
eb47b800
JK
393 }
394 return page;
395}
396
0a8165d7 397/*
eb47b800
JK
398 * Caller ensures that this data page is never allocated.
399 * A new zero-filled data page is allocated in the page cache.
39936837 400 *
4f4124d0
CY
401 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
402 * f2fs_unlock_op().
470f00e9
CY
403 * Note that, ipage is set only by make_empty_dir, and if any error occur,
404 * ipage should be released by this function.
eb47b800 405 */
64aa7ed9 406struct page *get_new_data_page(struct inode *inode,
a8865372 407 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 408{
eb47b800
JK
409 struct address_space *mapping = inode->i_mapping;
410 struct page *page;
411 struct dnode_of_data dn;
412 int err;
01f28610
JK
413repeat:
414 page = grab_cache_page(mapping, index);
470f00e9
CY
415 if (!page) {
416 /*
417 * before exiting, we should make sure ipage will be released
418 * if any error occur.
419 */
420 f2fs_put_page(ipage, 1);
01f28610 421 return ERR_PTR(-ENOMEM);
470f00e9 422 }
eb47b800 423
a8865372 424 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 425 err = f2fs_reserve_block(&dn, index);
01f28610
JK
426 if (err) {
427 f2fs_put_page(page, 1);
eb47b800 428 return ERR_PTR(err);
a8865372 429 }
01f28610
JK
430 if (!ipage)
431 f2fs_put_dnode(&dn);
eb47b800
JK
432
433 if (PageUptodate(page))
01f28610 434 goto got_it;
eb47b800
JK
435
436 if (dn.data_blkaddr == NEW_ADDR) {
437 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 438 SetPageUptodate(page);
eb47b800 439 } else {
4375a336 440 f2fs_put_page(page, 1);
a8865372 441
4375a336
JK
442 page = get_read_data_page(inode, index, READ_SYNC);
443 if (IS_ERR(page))
afcb7ca0 444 goto repeat;
4375a336
JK
445
446 /* wait for read completion */
447 lock_page(page);
eb47b800 448 }
01f28610 449got_it:
9edcdabf
CY
450 if (new_i_size && i_size_read(inode) <
451 ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
452 i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
453 /* Only the directory inode sets new_i_size */
454 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
455 }
456 return page;
457}
458
bfad7c2d
JK
459static int __allocate_data_block(struct dnode_of_data *dn)
460{
4081363f 461 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 462 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d 463 struct f2fs_summary sum;
bfad7c2d 464 struct node_info ni;
38aa0889 465 int seg = CURSEG_WARM_DATA;
976e4c50 466 pgoff_t fofs;
bfad7c2d
JK
467
468 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
469 return -EPERM;
df6136ef
CY
470
471 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
472 if (dn->data_blkaddr == NEW_ADDR)
473 goto alloc;
474
bfad7c2d
JK
475 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
476 return -ENOSPC;
477
df6136ef 478alloc:
bfad7c2d
JK
479 get_node_info(sbi, dn->nid, &ni);
480 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
481
38aa0889
JK
482 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
483 seg = CURSEG_DIRECT_IO;
484
df6136ef
CY
485 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
486 &sum, seg);
216a620a 487 set_data_blkaddr(dn);
bfad7c2d 488
976e4c50
JK
489 /* update i_size */
490 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
491 dn->ofs_in_node;
9edcdabf
CY
492 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
493 i_size_write(dn->inode,
494 ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
976e4c50 495
3e72f721 496 /* direct IO doesn't use extent cache to maximize the performance */
a28ef1f5 497 f2fs_drop_largest_extent(dn->inode, fofs);
3e72f721 498
bfad7c2d
JK
499 return 0;
500}
501
59b802e5
JK
502static void __allocate_data_blocks(struct inode *inode, loff_t offset,
503 size_t count)
504{
505 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
506 struct dnode_of_data dn;
507 u64 start = F2FS_BYTES_TO_BLK(offset);
508 u64 len = F2FS_BYTES_TO_BLK(count);
509 bool allocated;
510 u64 end_offset;
511
512 while (len) {
513 f2fs_balance_fs(sbi);
514 f2fs_lock_op(sbi);
515
516 /* When reading holes, we need its node page */
517 set_new_dnode(&dn, inode, NULL, NULL, 0);
518 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
519 goto out;
520
521 allocated = false;
522 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
523
524 while (dn.ofs_in_node < end_offset && len) {
d6d4f1cb
CY
525 block_t blkaddr;
526
f9811703
CY
527 if (unlikely(f2fs_cp_error(sbi)))
528 goto sync_out;
529
d6d4f1cb 530 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
df6136ef 531 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
59b802e5
JK
532 if (__allocate_data_block(&dn))
533 goto sync_out;
534 allocated = true;
535 }
536 len--;
537 start++;
538 dn.ofs_in_node++;
539 }
540
541 if (allocated)
542 sync_inode_page(&dn);
543
544 f2fs_put_dnode(&dn);
545 f2fs_unlock_op(sbi);
546 }
547 return;
548
549sync_out:
550 if (allocated)
551 sync_inode_page(&dn);
552 f2fs_put_dnode(&dn);
553out:
554 f2fs_unlock_op(sbi);
555 return;
556}
557
0a8165d7 558/*
003a3e1d
JK
559 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
560 * f2fs_map_blocks structure.
4f4124d0
CY
561 * If original data blocks are allocated, then give them to blockdev.
562 * Otherwise,
563 * a. preallocate requested block addresses
564 * b. do not use extent cache for better performance
565 * c. give the block addresses to blockdev
eb47b800 566 */
003a3e1d 567static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 568 int create, int flag)
eb47b800 569{
003a3e1d 570 unsigned int maxblocks = map->m_len;
eb47b800 571 struct dnode_of_data dn;
f9811703 572 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
bfad7c2d
JK
573 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
574 pgoff_t pgofs, end_offset;
575 int err = 0, ofs = 1;
a2e7d1bf 576 struct extent_info ei;
bfad7c2d 577 bool allocated = false;
eb47b800 578
003a3e1d
JK
579 map->m_len = 0;
580 map->m_flags = 0;
581
582 /* it only supports block size == page size */
583 pgofs = (pgoff_t)map->m_lblk;
eb47b800 584
7e4dde79 585 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
586 map->m_pblk = ei.blk + pgofs - ei.fofs;
587 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
588 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 589 goto out;
a2e7d1bf 590 }
bfad7c2d 591
59b802e5 592 if (create)
4081363f 593 f2fs_lock_op(F2FS_I_SB(inode));
eb47b800
JK
594
595 /* When reading holes, we need its node page */
596 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 597 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 598 if (err) {
bfad7c2d
JK
599 if (err == -ENOENT)
600 err = 0;
601 goto unlock_out;
848753aa 602 }
973163fc
CY
603
604 if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
605 if (create) {
f9811703
CY
606 if (unlikely(f2fs_cp_error(sbi))) {
607 err = -EIO;
608 goto put_out;
609 }
973163fc
CY
610 err = __allocate_data_block(&dn);
611 if (err)
612 goto put_out;
613 allocated = true;
614 map->m_flags = F2FS_MAP_NEW;
615 } else {
616 if (flag != F2FS_GET_BLOCK_FIEMAP ||
617 dn.data_blkaddr != NEW_ADDR) {
618 if (flag == F2FS_GET_BLOCK_BMAP)
619 err = -ENOENT;
620 goto put_out;
621 }
622
623 /*
624 * preallocated unwritten block should be mapped
625 * for fiemap.
626 */
627 if (dn.data_blkaddr == NEW_ADDR)
628 map->m_flags = F2FS_MAP_UNWRITTEN;
e2b4e2bc 629 }
e2b4e2bc 630 }
eb47b800 631
973163fc
CY
632 map->m_flags |= F2FS_MAP_MAPPED;
633 map->m_pblk = dn.data_blkaddr;
634 map->m_len = 1;
bfad7c2d 635
6403eb1f 636 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d
JK
637 dn.ofs_in_node++;
638 pgofs++;
639
640get_next:
641 if (dn.ofs_in_node >= end_offset) {
642 if (allocated)
643 sync_inode_page(&dn);
644 allocated = false;
645 f2fs_put_dnode(&dn);
646
647 set_new_dnode(&dn, inode, NULL, NULL, 0);
648 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 649 if (err) {
bfad7c2d
JK
650 if (err == -ENOENT)
651 err = 0;
652 goto unlock_out;
653 }
e2b4e2bc 654
6403eb1f 655 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 656 }
eb47b800 657
003a3e1d 658 if (maxblocks > map->m_len) {
bfad7c2d 659 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
973163fc
CY
660
661 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
662 if (create) {
f9811703
CY
663 if (unlikely(f2fs_cp_error(sbi))) {
664 err = -EIO;
665 goto sync_out;
666 }
973163fc
CY
667 err = __allocate_data_block(&dn);
668 if (err)
669 goto sync_out;
670 allocated = true;
671 map->m_flags |= F2FS_MAP_NEW;
672 blkaddr = dn.data_blkaddr;
673 } else {
674 /*
675 * we only merge preallocated unwritten blocks
676 * for fiemap.
677 */
678 if (flag != F2FS_GET_BLOCK_FIEMAP ||
679 blkaddr != NEW_ADDR)
680 goto sync_out;
681 }
bfad7c2d 682 }
973163fc 683
e1c42045 684 /* Give more consecutive addresses for the readahead */
7f63eb77
JK
685 if ((map->m_pblk != NEW_ADDR &&
686 blkaddr == (map->m_pblk + ofs)) ||
687 (map->m_pblk == NEW_ADDR &&
688 blkaddr == NEW_ADDR)) {
bfad7c2d
JK
689 ofs++;
690 dn.ofs_in_node++;
691 pgofs++;
003a3e1d 692 map->m_len++;
bfad7c2d
JK
693 goto get_next;
694 }
eb47b800 695 }
bfad7c2d
JK
696sync_out:
697 if (allocated)
698 sync_inode_page(&dn);
699put_out:
eb47b800 700 f2fs_put_dnode(&dn);
bfad7c2d
JK
701unlock_out:
702 if (create)
4081363f 703 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d 704out:
003a3e1d 705 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 706 return err;
eb47b800
JK
707}
708
003a3e1d 709static int __get_data_block(struct inode *inode, sector_t iblock,
e2b4e2bc 710 struct buffer_head *bh, int create, int flag)
003a3e1d
JK
711{
712 struct f2fs_map_blocks map;
713 int ret;
714
715 map.m_lblk = iblock;
716 map.m_len = bh->b_size >> inode->i_blkbits;
717
e2b4e2bc 718 ret = f2fs_map_blocks(inode, &map, create, flag);
003a3e1d
JK
719 if (!ret) {
720 map_bh(bh, inode->i_sb, map.m_pblk);
721 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
722 bh->b_size = map.m_len << inode->i_blkbits;
723 }
724 return ret;
725}
726
ccfb3000 727static int get_data_block(struct inode *inode, sector_t iblock,
e2b4e2bc
CY
728 struct buffer_head *bh_result, int create, int flag)
729{
730 return __get_data_block(inode, iblock, bh_result, create, flag);
731}
732
733static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb3000
JK
734 struct buffer_head *bh_result, int create)
735{
e2b4e2bc
CY
736 return __get_data_block(inode, iblock, bh_result, create,
737 F2FS_GET_BLOCK_DIO);
ccfb3000
JK
738}
739
e2b4e2bc 740static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb3000
JK
741 struct buffer_head *bh_result, int create)
742{
e2b4e2bc
CY
743 return __get_data_block(inode, iblock, bh_result, create,
744 F2FS_GET_BLOCK_BMAP);
ccfb3000
JK
745}
746
7f63eb77
JK
747static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
748{
749 return (offset >> inode->i_blkbits);
750}
751
752static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
753{
754 return (blk << inode->i_blkbits);
755}
756
9ab70134
JK
757int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
758 u64 start, u64 len)
759{
7f63eb77
JK
760 struct buffer_head map_bh;
761 sector_t start_blk, last_blk;
762 loff_t isize = i_size_read(inode);
763 u64 logical = 0, phys = 0, size = 0;
764 u32 flags = 0;
765 bool past_eof = false, whole_file = false;
766 int ret = 0;
767
768 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
769 if (ret)
770 return ret;
771
772 mutex_lock(&inode->i_mutex);
773
774 if (len >= isize) {
775 whole_file = true;
776 len = isize;
777 }
778
779 if (logical_to_blk(inode, len) == 0)
780 len = blk_to_logical(inode, 1);
781
782 start_blk = logical_to_blk(inode, start);
783 last_blk = logical_to_blk(inode, start + len - 1);
784next:
785 memset(&map_bh, 0, sizeof(struct buffer_head));
786 map_bh.b_size = len;
787
e2b4e2bc
CY
788 ret = get_data_block(inode, start_blk, &map_bh, 0,
789 F2FS_GET_BLOCK_FIEMAP);
7f63eb77
JK
790 if (ret)
791 goto out;
792
793 /* HOLE */
794 if (!buffer_mapped(&map_bh)) {
795 start_blk++;
796
797 if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
798 past_eof = 1;
799
800 if (past_eof && size) {
801 flags |= FIEMAP_EXTENT_LAST;
802 ret = fiemap_fill_next_extent(fieinfo, logical,
803 phys, size, flags);
804 } else if (size) {
805 ret = fiemap_fill_next_extent(fieinfo, logical,
806 phys, size, flags);
807 size = 0;
808 }
809
810 /* if we have holes up to/past EOF then we're done */
811 if (start_blk > last_blk || past_eof || ret)
812 goto out;
813 } else {
814 if (start_blk > last_blk && !whole_file) {
815 ret = fiemap_fill_next_extent(fieinfo, logical,
816 phys, size, flags);
817 goto out;
818 }
819
820 /*
821 * if size != 0 then we know we already have an extent
822 * to add, so add it.
823 */
824 if (size) {
825 ret = fiemap_fill_next_extent(fieinfo, logical,
826 phys, size, flags);
827 if (ret)
828 goto out;
829 }
830
831 logical = blk_to_logical(inode, start_blk);
832 phys = blk_to_logical(inode, map_bh.b_blocknr);
833 size = map_bh.b_size;
834 flags = 0;
835 if (buffer_unwritten(&map_bh))
836 flags = FIEMAP_EXTENT_UNWRITTEN;
837
838 start_blk += logical_to_blk(inode, size);
839
840 /*
841 * If we are past the EOF, then we need to make sure as
842 * soon as we find a hole that the last extent we found
843 * is marked with FIEMAP_EXTENT_LAST
844 */
845 if (!past_eof && logical + size >= isize)
846 past_eof = true;
847 }
848 cond_resched();
849 if (fatal_signal_pending(current))
850 ret = -EINTR;
851 else
852 goto next;
853out:
854 if (ret == 1)
855 ret = 0;
856
857 mutex_unlock(&inode->i_mutex);
858 return ret;
9ab70134
JK
859}
860
f1e88660
JK
861/*
862 * This function was originally taken from fs/mpage.c, and customized for f2fs.
863 * Major change was from block_size == page_size in f2fs by default.
864 */
865static int f2fs_mpage_readpages(struct address_space *mapping,
866 struct list_head *pages, struct page *page,
867 unsigned nr_pages)
868{
869 struct bio *bio = NULL;
870 unsigned page_idx;
871 sector_t last_block_in_bio = 0;
872 struct inode *inode = mapping->host;
873 const unsigned blkbits = inode->i_blkbits;
874 const unsigned blocksize = 1 << blkbits;
875 sector_t block_in_file;
876 sector_t last_block;
877 sector_t last_block_in_file;
878 sector_t block_nr;
879 struct block_device *bdev = inode->i_sb->s_bdev;
880 struct f2fs_map_blocks map;
881
882 map.m_pblk = 0;
883 map.m_lblk = 0;
884 map.m_len = 0;
885 map.m_flags = 0;
886
887 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
888
889 prefetchw(&page->flags);
890 if (pages) {
891 page = list_entry(pages->prev, struct page, lru);
892 list_del(&page->lru);
893 if (add_to_page_cache_lru(page, mapping,
894 page->index, GFP_KERNEL))
895 goto next_page;
896 }
897
898 block_in_file = (sector_t)page->index;
899 last_block = block_in_file + nr_pages;
900 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
901 blkbits;
902 if (last_block > last_block_in_file)
903 last_block = last_block_in_file;
904
905 /*
906 * Map blocks using the previous result first.
907 */
908 if ((map.m_flags & F2FS_MAP_MAPPED) &&
909 block_in_file > map.m_lblk &&
910 block_in_file < (map.m_lblk + map.m_len))
911 goto got_it;
912
913 /*
914 * Then do more f2fs_map_blocks() calls until we are
915 * done with this page.
916 */
917 map.m_flags = 0;
918
919 if (block_in_file < last_block) {
920 map.m_lblk = block_in_file;
921 map.m_len = last_block - block_in_file;
922
46c9e141
CY
923 if (f2fs_map_blocks(inode, &map, 0,
924 F2FS_GET_BLOCK_READ))
f1e88660
JK
925 goto set_error_page;
926 }
927got_it:
928 if ((map.m_flags & F2FS_MAP_MAPPED)) {
929 block_nr = map.m_pblk + block_in_file - map.m_lblk;
930 SetPageMappedToDisk(page);
931
932 if (!PageUptodate(page) && !cleancache_get_page(page)) {
933 SetPageUptodate(page);
934 goto confused;
935 }
936 } else {
937 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
938 SetPageUptodate(page);
939 unlock_page(page);
940 goto next_page;
941 }
942
943 /*
944 * This page will go to BIO. Do we need to send this
945 * BIO off first?
946 */
947 if (bio && (last_block_in_bio != block_nr - 1)) {
948submit_and_realloc:
949 submit_bio(READ, bio);
950 bio = NULL;
951 }
952 if (bio == NULL) {
4375a336
JK
953 struct f2fs_crypto_ctx *ctx = NULL;
954
955 if (f2fs_encrypted_inode(inode) &&
956 S_ISREG(inode->i_mode)) {
957 struct page *cpage;
958
959 ctx = f2fs_get_crypto_ctx(inode);
960 if (IS_ERR(ctx))
961 goto set_error_page;
962
963 /* wait the page to be moved by cleaning */
964 cpage = find_lock_page(
965 META_MAPPING(F2FS_I_SB(inode)),
966 block_nr);
967 if (cpage) {
968 f2fs_wait_on_page_writeback(cpage,
969 DATA);
970 f2fs_put_page(cpage, 1);
971 }
972 }
973
f1e88660 974 bio = bio_alloc(GFP_KERNEL,
b54ffb73 975 min_t(int, nr_pages, BIO_MAX_PAGES));
4375a336
JK
976 if (!bio) {
977 if (ctx)
978 f2fs_release_crypto_ctx(ctx);
f1e88660 979 goto set_error_page;
4375a336 980 }
f1e88660
JK
981 bio->bi_bdev = bdev;
982 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
12377024 983 bio->bi_end_io = f2fs_read_end_io;
4375a336 984 bio->bi_private = ctx;
f1e88660
JK
985 }
986
987 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
988 goto submit_and_realloc;
989
990 last_block_in_bio = block_nr;
991 goto next_page;
992set_error_page:
993 SetPageError(page);
994 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
995 unlock_page(page);
996 goto next_page;
997confused:
998 if (bio) {
999 submit_bio(READ, bio);
1000 bio = NULL;
1001 }
1002 unlock_page(page);
1003next_page:
1004 if (pages)
1005 page_cache_release(page);
1006 }
1007 BUG_ON(pages && !list_empty(pages));
1008 if (bio)
1009 submit_bio(READ, bio);
1010 return 0;
1011}
1012
eb47b800
JK
1013static int f2fs_read_data_page(struct file *file, struct page *page)
1014{
9ffe0fb5 1015 struct inode *inode = page->mapping->host;
b3d208f9 1016 int ret = -EAGAIN;
9ffe0fb5 1017
c20e89cd
CY
1018 trace_f2fs_readpage(page, DATA);
1019
e1c42045 1020 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
1021 if (f2fs_has_inline_data(inode))
1022 ret = f2fs_read_inline_data(inode, page);
b3d208f9 1023 if (ret == -EAGAIN)
f1e88660 1024 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 1025 return ret;
eb47b800
JK
1026}
1027
1028static int f2fs_read_data_pages(struct file *file,
1029 struct address_space *mapping,
1030 struct list_head *pages, unsigned nr_pages)
1031{
9ffe0fb5
HL
1032 struct inode *inode = file->f_mapping->host;
1033
1034 /* If the file has inline data, skip readpages */
1035 if (f2fs_has_inline_data(inode))
1036 return 0;
1037
f1e88660 1038 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
1039}
1040
05ca3632 1041int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 1042{
05ca3632 1043 struct page *page = fio->page;
eb47b800 1044 struct inode *inode = page->mapping->host;
eb47b800
JK
1045 struct dnode_of_data dn;
1046 int err = 0;
1047
1048 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 1049 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
1050 if (err)
1051 return err;
1052
cf04e8eb 1053 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
1054
1055 /* This page is already truncated */
2bca1e23
JK
1056 if (fio->blk_addr == NULL_ADDR) {
1057 ClearPageUptodate(page);
eb47b800 1058 goto out_writepage;
2bca1e23 1059 }
eb47b800 1060
4375a336
JK
1061 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1062 fio->encrypted_page = f2fs_encrypt(inode, fio->page);
1063 if (IS_ERR(fio->encrypted_page)) {
1064 err = PTR_ERR(fio->encrypted_page);
1065 goto out_writepage;
1066 }
1067 }
1068
eb47b800
JK
1069 set_page_writeback(page);
1070
1071 /*
1072 * If current allocation needs SSR,
1073 * it had better in-place writes for updated data.
1074 */
cf04e8eb 1075 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
1076 !is_cold_data(page) &&
1077 need_inplace_update(inode))) {
05ca3632 1078 rewrite_data_page(fio);
fff04f90 1079 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
8ce67cb0 1080 trace_f2fs_do_write_data_page(page, IPU);
eb47b800 1081 } else {
05ca3632 1082 write_data_page(&dn, fio);
216a620a 1083 set_data_blkaddr(&dn);
7e4dde79 1084 f2fs_update_extent_cache(&dn);
8ce67cb0 1085 trace_f2fs_do_write_data_page(page, OPU);
fff04f90 1086 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
3c6c2beb
JK
1087 if (page->index == 0)
1088 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1089 }
1090out_writepage:
1091 f2fs_put_dnode(&dn);
1092 return err;
1093}
1094
1095static int f2fs_write_data_page(struct page *page,
1096 struct writeback_control *wbc)
1097{
1098 struct inode *inode = page->mapping->host;
4081363f 1099 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1100 loff_t i_size = i_size_read(inode);
1101 const pgoff_t end_index = ((unsigned long long) i_size)
1102 >> PAGE_CACHE_SHIFT;
9ffe0fb5 1103 unsigned offset = 0;
39936837 1104 bool need_balance_fs = false;
eb47b800 1105 int err = 0;
458e6197 1106 struct f2fs_io_info fio = {
05ca3632 1107 .sbi = sbi,
458e6197 1108 .type = DATA,
6c311ec6 1109 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
05ca3632 1110 .page = page,
4375a336 1111 .encrypted_page = NULL,
458e6197 1112 };
eb47b800 1113
ecda0de3
CY
1114 trace_f2fs_writepage(page, DATA);
1115
eb47b800 1116 if (page->index < end_index)
39936837 1117 goto write;
eb47b800
JK
1118
1119 /*
1120 * If the offset is out-of-range of file size,
1121 * this page does not have to be written to disk.
1122 */
1123 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 1124 if ((page->index >= end_index + 1) || !offset)
39936837 1125 goto out;
eb47b800
JK
1126
1127 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 1128write:
caf0047e 1129 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 1130 goto redirty_out;
1e84371f
JK
1131 if (f2fs_is_drop_cache(inode))
1132 goto out;
1133 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1134 available_free_memory(sbi, BASE_CHECK))
1135 goto redirty_out;
eb47b800 1136
39936837 1137 /* Dentry blocks are controlled by checkpoint */
eb47b800 1138 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
1139 if (unlikely(f2fs_cp_error(sbi)))
1140 goto redirty_out;
05ca3632 1141 err = do_write_data_page(&fio);
8618b881
JK
1142 goto done;
1143 }
9ffe0fb5 1144
cf779cab
JK
1145 /* we should bypass data pages to proceed the kworkder jobs */
1146 if (unlikely(f2fs_cp_error(sbi))) {
1147 SetPageError(page);
a7ffdbe2 1148 goto out;
cf779cab
JK
1149 }
1150
8618b881 1151 if (!wbc->for_reclaim)
39936837 1152 need_balance_fs = true;
8618b881 1153 else if (has_not_enough_free_secs(sbi, 0))
39936837 1154 goto redirty_out;
eb47b800 1155
b3d208f9 1156 err = -EAGAIN;
8618b881 1157 f2fs_lock_op(sbi);
b3d208f9
JK
1158 if (f2fs_has_inline_data(inode))
1159 err = f2fs_write_inline_data(inode, page);
1160 if (err == -EAGAIN)
05ca3632 1161 err = do_write_data_page(&fio);
8618b881
JK
1162 f2fs_unlock_op(sbi);
1163done:
1164 if (err && err != -ENOENT)
1165 goto redirty_out;
eb47b800 1166
eb47b800 1167 clear_cold_data(page);
39936837 1168out:
a7ffdbe2 1169 inode_dec_dirty_pages(inode);
2bca1e23
JK
1170 if (err)
1171 ClearPageUptodate(page);
eb47b800 1172 unlock_page(page);
39936837 1173 if (need_balance_fs)
eb47b800 1174 f2fs_balance_fs(sbi);
2aea39ec
JK
1175 if (wbc->for_reclaim)
1176 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
1177 return 0;
1178
eb47b800 1179redirty_out:
76f60268 1180 redirty_page_for_writepage(wbc, page);
8618b881 1181 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
1182}
1183
fa9150a8
NJ
1184static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1185 void *data)
1186{
1187 struct address_space *mapping = data;
1188 int ret = mapping->a_ops->writepage(page, wbc);
1189 mapping_set_error(mapping, ret);
1190 return ret;
1191}
1192
8f46dcae
CY
1193/*
1194 * This function was copied from write_cche_pages from mm/page-writeback.c.
1195 * The major change is making write step of cold data page separately from
1196 * warm/hot data page.
1197 */
1198static int f2fs_write_cache_pages(struct address_space *mapping,
1199 struct writeback_control *wbc, writepage_t writepage,
1200 void *data)
1201{
1202 int ret = 0;
1203 int done = 0;
1204 struct pagevec pvec;
1205 int nr_pages;
1206 pgoff_t uninitialized_var(writeback_index);
1207 pgoff_t index;
1208 pgoff_t end; /* Inclusive */
1209 pgoff_t done_index;
1210 int cycled;
1211 int range_whole = 0;
1212 int tag;
1213 int step = 0;
1214
1215 pagevec_init(&pvec, 0);
1216next:
1217 if (wbc->range_cyclic) {
1218 writeback_index = mapping->writeback_index; /* prev offset */
1219 index = writeback_index;
1220 if (index == 0)
1221 cycled = 1;
1222 else
1223 cycled = 0;
1224 end = -1;
1225 } else {
1226 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1227 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1228 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1229 range_whole = 1;
1230 cycled = 1; /* ignore range_cyclic tests */
1231 }
1232 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1233 tag = PAGECACHE_TAG_TOWRITE;
1234 else
1235 tag = PAGECACHE_TAG_DIRTY;
1236retry:
1237 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1238 tag_pages_for_writeback(mapping, index, end);
1239 done_index = index;
1240 while (!done && (index <= end)) {
1241 int i;
1242
1243 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1244 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1245 if (nr_pages == 0)
1246 break;
1247
1248 for (i = 0; i < nr_pages; i++) {
1249 struct page *page = pvec.pages[i];
1250
1251 if (page->index > end) {
1252 done = 1;
1253 break;
1254 }
1255
1256 done_index = page->index;
1257
1258 lock_page(page);
1259
1260 if (unlikely(page->mapping != mapping)) {
1261continue_unlock:
1262 unlock_page(page);
1263 continue;
1264 }
1265
1266 if (!PageDirty(page)) {
1267 /* someone wrote it for us */
1268 goto continue_unlock;
1269 }
1270
737f1899 1271 if (step == is_cold_data(page))
8f46dcae
CY
1272 goto continue_unlock;
1273
1274 if (PageWriteback(page)) {
1275 if (wbc->sync_mode != WB_SYNC_NONE)
1276 f2fs_wait_on_page_writeback(page, DATA);
1277 else
1278 goto continue_unlock;
1279 }
1280
1281 BUG_ON(PageWriteback(page));
1282 if (!clear_page_dirty_for_io(page))
1283 goto continue_unlock;
1284
1285 ret = (*writepage)(page, wbc, data);
1286 if (unlikely(ret)) {
1287 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1288 unlock_page(page);
1289 ret = 0;
1290 } else {
1291 done_index = page->index + 1;
1292 done = 1;
1293 break;
1294 }
1295 }
1296
1297 if (--wbc->nr_to_write <= 0 &&
1298 wbc->sync_mode == WB_SYNC_NONE) {
1299 done = 1;
1300 break;
1301 }
1302 }
1303 pagevec_release(&pvec);
1304 cond_resched();
1305 }
1306
1307 if (step < 1) {
1308 step++;
1309 goto next;
1310 }
1311
1312 if (!cycled && !done) {
1313 cycled = 1;
1314 index = 0;
1315 end = writeback_index - 1;
1316 goto retry;
1317 }
1318 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1319 mapping->writeback_index = done_index;
1320
1321 return ret;
1322}
1323
25ca923b 1324static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
1325 struct writeback_control *wbc)
1326{
1327 struct inode *inode = mapping->host;
4081363f 1328 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5463e7c1 1329 bool locked = false;
eb47b800 1330 int ret;
50c8cdb3 1331 long diff;
eb47b800 1332
e5748434
CY
1333 trace_f2fs_writepages(mapping->host, wbc, DATA);
1334
cfb185a1 1335 /* deal with chardevs and other special file */
1336 if (!mapping->a_ops->writepage)
1337 return 0;
1338
6a290544
CY
1339 /* skip writing if there is no dirty page in this inode */
1340 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1341 return 0;
1342
d5669f7b
JK
1343 /* during POR, we don't need to trigger writepage at all. */
1344 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1345 goto skip_write;
1346
50c8cdb3 1347 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 1348
5463e7c1
JK
1349 if (!S_ISDIR(inode->i_mode)) {
1350 mutex_lock(&sbi->writepages);
1351 locked = true;
1352 }
8f46dcae 1353 ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
bb96a8d5 1354 f2fs_submit_merged_bio(sbi, DATA, WRITE);
5463e7c1
JK
1355 if (locked)
1356 mutex_unlock(&sbi->writepages);
458e6197 1357
eb47b800
JK
1358 remove_dirty_dir_inode(inode);
1359
50c8cdb3 1360 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 1361 return ret;
d3baf95d
JK
1362
1363skip_write:
a7ffdbe2 1364 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 1365 return 0;
eb47b800
JK
1366}
1367
3aab8f82
CY
1368static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1369{
1370 struct inode *inode = mapping->host;
1371
1372 if (to > inode->i_size) {
1373 truncate_pagecache(inode, inode->i_size);
764aa3e9 1374 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
1375 }
1376}
1377
eb47b800
JK
1378static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1379 loff_t pos, unsigned len, unsigned flags,
1380 struct page **pagep, void **fsdata)
1381{
1382 struct inode *inode = mapping->host;
4081363f 1383 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b
JK
1384 struct page *page = NULL;
1385 struct page *ipage;
eb47b800
JK
1386 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1387 struct dnode_of_data dn;
1388 int err = 0;
1389
62aed044
CY
1390 trace_f2fs_write_begin(inode, pos, len, flags);
1391
eb47b800 1392 f2fs_balance_fs(sbi);
5f727395
JK
1393
1394 /*
1395 * We should check this at this moment to avoid deadlock on inode page
1396 * and #0 page. The locking rule for inline_data conversion should be:
1397 * lock_page(page #0) -> lock_page(inode_page)
1398 */
1399 if (index != 0) {
1400 err = f2fs_convert_inline_inode(inode);
1401 if (err)
1402 goto fail;
1403 }
afcb7ca0 1404repeat:
eb47b800 1405 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1406 if (!page) {
1407 err = -ENOMEM;
1408 goto fail;
1409 }
d5f66990 1410
eb47b800
JK
1411 *pagep = page;
1412
e479556b 1413 f2fs_lock_op(sbi);
9ba69cf9
JK
1414
1415 /* check inline_data */
1416 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
1417 if (IS_ERR(ipage)) {
1418 err = PTR_ERR(ipage);
9ba69cf9 1419 goto unlock_fail;
cd34e296 1420 }
9ba69cf9 1421
b3d208f9
JK
1422 set_new_dnode(&dn, inode, ipage, ipage, 0);
1423
9ba69cf9 1424 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
1425 if (pos + len <= MAX_INLINE_DATA) {
1426 read_inline_data(page, ipage);
1427 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1428 sync_inode_page(&dn);
1429 goto put_next;
b3d208f9 1430 }
5f727395
JK
1431 err = f2fs_convert_inline_page(&dn, page);
1432 if (err)
1433 goto put_fail;
b600965c 1434 }
759af1c9
FL
1435
1436 err = f2fs_get_block(&dn, index);
9ba69cf9 1437 if (err)
8cdcb713 1438 goto put_fail;
b3d208f9 1439put_next:
9ba69cf9
JK
1440 f2fs_put_dnode(&dn);
1441 f2fs_unlock_op(sbi);
1442
b3d208f9
JK
1443 f2fs_wait_on_page_writeback(page, DATA);
1444
90d4388a
CY
1445 if (len == PAGE_CACHE_SIZE)
1446 goto out_update;
1447 if (PageUptodate(page))
1448 goto out_clear;
eb47b800
JK
1449
1450 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1451 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1452 unsigned end = start + len;
1453
1454 /* Reading beyond i_size is simple: memset to zero */
1455 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
90d4388a 1456 goto out_update;
eb47b800
JK
1457 }
1458
b3d208f9 1459 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1460 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1461 } else {
cf04e8eb 1462 struct f2fs_io_info fio = {
05ca3632 1463 .sbi = sbi,
cf04e8eb
JK
1464 .type = DATA,
1465 .rw = READ_SYNC,
1466 .blk_addr = dn.data_blkaddr,
05ca3632 1467 .page = page,
4375a336 1468 .encrypted_page = NULL,
cf04e8eb 1469 };
05ca3632 1470 err = f2fs_submit_page_bio(&fio);
9234f319
JK
1471 if (err)
1472 goto fail;
d54c795b 1473
393ff91f 1474 lock_page(page);
6bacf52f 1475 if (unlikely(!PageUptodate(page))) {
3aab8f82
CY
1476 err = -EIO;
1477 goto fail;
eb47b800 1478 }
6bacf52f 1479 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1480 f2fs_put_page(page, 1);
1481 goto repeat;
eb47b800 1482 }
4375a336
JK
1483
1484 /* avoid symlink page */
1485 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1486 err = f2fs_decrypt_one(inode, page);
86531d6b 1487 if (err)
4375a336 1488 goto fail;
4375a336 1489 }
eb47b800 1490 }
90d4388a 1491out_update:
eb47b800 1492 SetPageUptodate(page);
90d4388a 1493out_clear:
eb47b800
JK
1494 clear_cold_data(page);
1495 return 0;
9ba69cf9 1496
8cdcb713
JK
1497put_fail:
1498 f2fs_put_dnode(&dn);
9ba69cf9
JK
1499unlock_fail:
1500 f2fs_unlock_op(sbi);
3aab8f82 1501fail:
86531d6b 1502 f2fs_put_page(page, 1);
3aab8f82
CY
1503 f2fs_write_failed(mapping, pos + len);
1504 return err;
eb47b800
JK
1505}
1506
a1dd3c13
JK
1507static int f2fs_write_end(struct file *file,
1508 struct address_space *mapping,
1509 loff_t pos, unsigned len, unsigned copied,
1510 struct page *page, void *fsdata)
1511{
1512 struct inode *inode = page->mapping->host;
1513
dfb2bf38
CY
1514 trace_f2fs_write_end(inode, pos, len, copied);
1515
34ba94ba 1516 set_page_dirty(page);
a1dd3c13
JK
1517
1518 if (pos + copied > i_size_read(inode)) {
1519 i_size_write(inode, pos + copied);
1520 mark_inode_dirty(inode);
1521 update_inode_page(inode);
1522 }
1523
75c3c8bc 1524 f2fs_put_page(page, 1);
a1dd3c13
JK
1525 return copied;
1526}
1527
6f673763
OS
1528static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1529 loff_t offset)
944fcfc1
JK
1530{
1531 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 1532
944fcfc1
JK
1533 if (offset & blocksize_mask)
1534 return -EINVAL;
1535
5b46f25d
AV
1536 if (iov_iter_alignment(iter) & blocksize_mask)
1537 return -EINVAL;
1538
944fcfc1
JK
1539 return 0;
1540}
1541
22c6186e
OS
1542static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1543 loff_t offset)
eb47b800
JK
1544{
1545 struct file *file = iocb->ki_filp;
3aab8f82
CY
1546 struct address_space *mapping = file->f_mapping;
1547 struct inode *inode = mapping->host;
1548 size_t count = iov_iter_count(iter);
1549 int err;
944fcfc1 1550
b3d208f9
JK
1551 /* we don't need to use inline_data strictly */
1552 if (f2fs_has_inline_data(inode)) {
1553 err = f2fs_convert_inline_inode(inode);
1554 if (err)
1555 return err;
1556 }
9ffe0fb5 1557
fcc85a4d
JK
1558 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1559 return 0;
1560
c15e8599
CY
1561 err = check_direct_IO(inode, iter, offset);
1562 if (err)
1563 return err;
944fcfc1 1564
6f673763 1565 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
70407fad 1566
f9811703 1567 if (iov_iter_rw(iter) == WRITE) {
59b802e5 1568 __allocate_data_blocks(inode, offset, count);
f9811703
CY
1569 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1570 err = -EIO;
1571 goto out;
1572 }
1573 }
59b802e5 1574
e2b4e2bc 1575 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
f9811703 1576out:
6f673763 1577 if (err < 0 && iov_iter_rw(iter) == WRITE)
3aab8f82 1578 f2fs_write_failed(mapping, offset + count);
70407fad 1579
6f673763 1580 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
70407fad 1581
3aab8f82 1582 return err;
eb47b800
JK
1583}
1584
487261f3
CY
1585void f2fs_invalidate_page(struct page *page, unsigned int offset,
1586 unsigned int length)
eb47b800
JK
1587{
1588 struct inode *inode = page->mapping->host;
487261f3 1589 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1590
487261f3
CY
1591 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1592 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
a7ffdbe2
JK
1593 return;
1594
487261f3
CY
1595 if (PageDirty(page)) {
1596 if (inode->i_ino == F2FS_META_INO(sbi))
1597 dec_page_count(sbi, F2FS_DIRTY_META);
1598 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1599 dec_page_count(sbi, F2FS_DIRTY_NODES);
1600 else
1601 inode_dec_dirty_pages(inode);
1602 }
decd36b6
CY
1603
1604 /* This is atomic written page, keep Private */
1605 if (IS_ATOMIC_WRITTEN_PAGE(page))
1606 return;
1607
eb47b800
JK
1608 ClearPagePrivate(page);
1609}
1610
487261f3 1611int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1612{
f68daeeb
JK
1613 /* If this is dirty page, keep PagePrivate */
1614 if (PageDirty(page))
1615 return 0;
1616
decd36b6
CY
1617 /* This is atomic written page, keep Private */
1618 if (IS_ATOMIC_WRITTEN_PAGE(page))
1619 return 0;
1620
eb47b800 1621 ClearPagePrivate(page);
c3850aa1 1622 return 1;
eb47b800
JK
1623}
1624
1625static int f2fs_set_data_page_dirty(struct page *page)
1626{
1627 struct address_space *mapping = page->mapping;
1628 struct inode *inode = mapping->host;
1629
26c6b887
JK
1630 trace_f2fs_set_page_dirty(page, DATA);
1631
eb47b800 1632 SetPageUptodate(page);
34ba94ba 1633
1e84371f 1634 if (f2fs_is_atomic_file(inode)) {
decd36b6
CY
1635 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1636 register_inmem_page(inode, page);
1637 return 1;
1638 }
1639 /*
1640 * Previously, this page has been registered, we just
1641 * return here.
1642 */
1643 return 0;
34ba94ba
JK
1644 }
1645
eb47b800
JK
1646 if (!PageDirty(page)) {
1647 __set_page_dirty_nobuffers(page);
a7ffdbe2 1648 update_dirty_page(inode, page);
eb47b800
JK
1649 return 1;
1650 }
1651 return 0;
1652}
1653
c01e54b7
JK
1654static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1655{
454ae7e5
CY
1656 struct inode *inode = mapping->host;
1657
b3d208f9
JK
1658 /* we don't need to use inline_data strictly */
1659 if (f2fs_has_inline_data(inode)) {
1660 int err = f2fs_convert_inline_inode(inode);
1661 if (err)
1662 return err;
1663 }
e2b4e2bc 1664 return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cd
CY
1665}
1666
eb47b800
JK
1667const struct address_space_operations f2fs_dblock_aops = {
1668 .readpage = f2fs_read_data_page,
1669 .readpages = f2fs_read_data_pages,
1670 .writepage = f2fs_write_data_page,
1671 .writepages = f2fs_write_data_pages,
1672 .write_begin = f2fs_write_begin,
a1dd3c13 1673 .write_end = f2fs_write_end,
eb47b800 1674 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1675 .invalidatepage = f2fs_invalidate_page,
1676 .releasepage = f2fs_release_page,
eb47b800 1677 .direct_IO = f2fs_direct_IO,
c01e54b7 1678 .bmap = f2fs_bmap,
eb47b800 1679};