4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
23 #include <linux/memcontrol.h>
24 #include <linux/cleancache.h>
30 #include <trace/events/f2fs.h>
32 static bool __is_cp_guaranteed(struct page
*page
)
34 struct address_space
*mapping
= page
->mapping
;
36 struct f2fs_sb_info
*sbi
;
41 inode
= mapping
->host
;
42 sbi
= F2FS_I_SB(inode
);
44 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
45 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
46 S_ISDIR(inode
->i_mode
) ||
52 static void f2fs_read_end_io(struct bio
*bio
)
57 #ifdef CONFIG_F2FS_FAULT_INJECTION
58 if (time_to_inject(F2FS_P_SB(bio
->bi_io_vec
->bv_page
), FAULT_IO
)) {
59 f2fs_show_injection_info(FAULT_IO
);
64 if (f2fs_bio_encrypted(bio
)) {
66 fscrypt_release_ctx(bio
->bi_private
);
68 fscrypt_decrypt_bio_pages(bio
->bi_private
, bio
);
73 bio_for_each_segment_all(bvec
, bio
, i
) {
74 struct page
*page
= bvec
->bv_page
;
77 if (!PageUptodate(page
))
78 SetPageUptodate(page
);
80 ClearPageUptodate(page
);
88 static void f2fs_write_end_io(struct bio
*bio
)
90 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
94 bio_for_each_segment_all(bvec
, bio
, i
) {
95 struct page
*page
= bvec
->bv_page
;
96 enum count_type type
= WB_DATA_TYPE(page
);
98 if (IS_DUMMY_WRITTEN_PAGE(page
)) {
99 set_page_private(page
, (unsigned long)NULL
);
100 ClearPagePrivate(page
);
102 mempool_free(page
, sbi
->write_io_dummy
);
104 if (unlikely(bio
->bi_error
))
105 f2fs_stop_checkpoint(sbi
, true);
109 fscrypt_pullback_bio_page(&page
, true);
111 if (unlikely(bio
->bi_error
)) {
112 mapping_set_error(page
->mapping
, -EIO
);
113 f2fs_stop_checkpoint(sbi
, true);
115 dec_page_count(sbi
, type
);
116 clear_cold_data(page
);
117 end_page_writeback(page
);
119 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
120 wq_has_sleeper(&sbi
->cp_wait
))
121 wake_up(&sbi
->cp_wait
);
127 * Return true, if pre_bio's bdev is same as its target device.
129 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
130 block_t blk_addr
, struct bio
*bio
)
132 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
135 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
136 if (FDEV(i
).start_blk
<= blk_addr
&&
137 FDEV(i
).end_blk
>= blk_addr
) {
138 blk_addr
-= FDEV(i
).start_blk
;
145 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
150 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
154 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
155 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
160 static bool __same_bdev(struct f2fs_sb_info
*sbi
,
161 block_t blk_addr
, struct bio
*bio
)
163 return f2fs_target_device(sbi
, blk_addr
, NULL
) == bio
->bi_bdev
;
167 * Low-level block read/write IO operations.
169 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
170 int npages
, bool is_read
)
174 bio
= f2fs_bio_alloc(npages
);
176 f2fs_target_device(sbi
, blk_addr
, bio
);
177 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
178 bio
->bi_private
= is_read
? NULL
: sbi
;
183 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
184 struct bio
*bio
, enum page_type type
)
186 if (!is_read_io(bio_op(bio
))) {
189 if (f2fs_sb_mounted_blkzoned(sbi
->sb
) &&
190 current
->plug
&& (type
== DATA
|| type
== NODE
))
191 blk_finish_plug(current
->plug
);
193 if (type
!= DATA
&& type
!= NODE
)
196 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
197 start
%= F2FS_IO_SIZE(sbi
);
202 /* fill dummy pages */
203 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
205 mempool_alloc(sbi
->write_io_dummy
,
206 GFP_NOIO
| __GFP_ZERO
| __GFP_NOFAIL
);
207 f2fs_bug_on(sbi
, !page
);
209 SetPagePrivate(page
);
210 set_page_private(page
, (unsigned long)DUMMY_WRITTEN_PAGE
);
212 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
216 * In the NODE case, we lose next block address chain. So, we
217 * need to do checkpoint in f2fs_sync_file.
220 set_sbi_flag(sbi
, SBI_NEED_CP
);
223 if (is_read_io(bio_op(bio
)))
224 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
226 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
230 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
232 struct f2fs_io_info
*fio
= &io
->fio
;
237 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
239 if (is_read_io(fio
->op
))
240 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
242 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
244 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
248 static bool __has_merged_page(struct f2fs_bio_info
*io
,
249 struct inode
*inode
, nid_t ino
, pgoff_t idx
)
251 struct bio_vec
*bvec
;
261 bio_for_each_segment_all(bvec
, io
->bio
, i
) {
263 if (bvec
->bv_page
->mapping
)
264 target
= bvec
->bv_page
;
266 target
= fscrypt_control_page(bvec
->bv_page
);
268 if (idx
!= target
->index
)
271 if (inode
&& inode
== target
->mapping
->host
)
273 if (ino
&& ino
== ino_of_node(target
))
280 static bool has_merged_page(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
281 nid_t ino
, pgoff_t idx
, enum page_type type
)
283 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
284 struct f2fs_bio_info
*io
= &sbi
->write_io
[btype
];
287 down_read(&io
->io_rwsem
);
288 ret
= __has_merged_page(io
, inode
, ino
, idx
);
289 up_read(&io
->io_rwsem
);
293 static void __f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
294 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
295 enum page_type type
, int rw
)
297 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
298 struct f2fs_bio_info
*io
;
300 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
302 down_write(&io
->io_rwsem
);
304 if (!__has_merged_page(io
, inode
, ino
, idx
))
307 /* change META to META_FLUSH in the checkpoint procedure */
308 if (type
>= META_FLUSH
) {
309 io
->fio
.type
= META_FLUSH
;
310 io
->fio
.op
= REQ_OP_WRITE
;
311 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
;
312 if (!test_opt(sbi
, NOBARRIER
))
313 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
315 __submit_merged_bio(io
);
317 up_write(&io
->io_rwsem
);
320 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
, enum page_type type
,
323 __f2fs_submit_merged_bio(sbi
, NULL
, 0, 0, type
, rw
);
326 void f2fs_submit_merged_bio_cond(struct f2fs_sb_info
*sbi
,
327 struct inode
*inode
, nid_t ino
, pgoff_t idx
,
328 enum page_type type
, int rw
)
330 if (has_merged_page(sbi
, inode
, ino
, idx
, type
))
331 __f2fs_submit_merged_bio(sbi
, inode
, ino
, idx
, type
, rw
);
334 void f2fs_flush_merged_bios(struct f2fs_sb_info
*sbi
)
336 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
337 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
338 f2fs_submit_merged_bio(sbi
, META
, WRITE
);
342 * Fill the locked page with data located in the block address.
343 * Return unlocked page.
345 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
348 struct page
*page
= fio
->encrypted_page
?
349 fio
->encrypted_page
: fio
->page
;
351 trace_f2fs_submit_page_bio(page
, fio
);
352 f2fs_trace_ios(fio
, 0);
354 /* Allocate a new bio */
355 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, 1, is_read_io(fio
->op
));
357 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
361 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
363 __submit_bio(fio
->sbi
, bio
, fio
->type
);
367 int f2fs_submit_page_mbio(struct f2fs_io_info
*fio
)
369 struct f2fs_sb_info
*sbi
= fio
->sbi
;
370 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
371 struct f2fs_bio_info
*io
;
372 bool is_read
= is_read_io(fio
->op
);
373 struct page
*bio_page
;
376 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
378 if (fio
->old_blkaddr
!= NEW_ADDR
)
379 verify_block_addr(sbi
, fio
->old_blkaddr
);
380 verify_block_addr(sbi
, fio
->new_blkaddr
);
382 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
384 /* set submitted = 1 as a return value */
388 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
390 down_write(&io
->io_rwsem
);
392 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
393 (io
->fio
.op
!= fio
->op
|| io
->fio
.op_flags
!= fio
->op_flags
) ||
394 !__same_bdev(sbi
, fio
->new_blkaddr
, io
->bio
)))
395 __submit_merged_bio(io
);
397 if (io
->bio
== NULL
) {
398 if ((fio
->type
== DATA
|| fio
->type
== NODE
) &&
399 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
402 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
405 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
,
406 BIO_MAX_PAGES
, is_read
);
410 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) <
412 __submit_merged_bio(io
);
416 io
->last_block_in_bio
= fio
->new_blkaddr
;
417 f2fs_trace_ios(fio
, 0);
419 up_write(&io
->io_rwsem
);
420 trace_f2fs_submit_page_mbio(fio
->page
, fio
);
424 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
426 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
429 /* Get physical address of data block */
430 addr_array
= blkaddr_in_node(rn
);
431 addr_array
[dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
435 * Lock ordering for the change of data block address:
438 * update block addresses in the node page
440 void set_data_blkaddr(struct dnode_of_data
*dn
)
442 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
443 __set_data_blkaddr(dn
);
444 if (set_page_dirty(dn
->node_page
))
445 dn
->node_changed
= true;
448 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
450 dn
->data_blkaddr
= blkaddr
;
451 set_data_blkaddr(dn
);
452 f2fs_update_extent_cache(dn
);
455 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
456 int reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
458 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
463 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
465 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, &count
)))
468 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
469 dn
->ofs_in_node
, count
);
471 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true);
473 for (; count
> 0; dn
->ofs_in_node
++) {
475 datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
476 if (blkaddr
== NULL_ADDR
) {
477 dn
->data_blkaddr
= NEW_ADDR
;
478 __set_data_blkaddr(dn
);
483 if (set_page_dirty(dn
->node_page
))
484 dn
->node_changed
= true;
488 /* Should keep dn->ofs_in_node unchanged */
489 int reserve_new_block(struct dnode_of_data
*dn
)
491 unsigned int ofs_in_node
= dn
->ofs_in_node
;
494 ret
= reserve_new_blocks(dn
, 1);
495 dn
->ofs_in_node
= ofs_in_node
;
499 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
501 bool need_put
= dn
->inode_page
? false : true;
504 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
508 if (dn
->data_blkaddr
== NULL_ADDR
)
509 err
= reserve_new_block(dn
);
515 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
517 struct extent_info ei
= {0,0,0};
518 struct inode
*inode
= dn
->inode
;
520 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
521 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
525 return f2fs_reserve_block(dn
, index
);
528 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
,
529 int op_flags
, bool for_write
)
531 struct address_space
*mapping
= inode
->i_mapping
;
532 struct dnode_of_data dn
;
534 struct extent_info ei
= {0,0,0};
536 struct f2fs_io_info fio
= {
537 .sbi
= F2FS_I_SB(inode
),
540 .op_flags
= op_flags
,
541 .encrypted_page
= NULL
,
544 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
545 return read_mapping_page(mapping
, index
, NULL
);
547 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
549 return ERR_PTR(-ENOMEM
);
551 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
552 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
556 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
557 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
562 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
567 if (PageUptodate(page
)) {
573 * A new dentry page is allocated but not able to be written, since its
574 * new inode page couldn't be allocated due to -ENOSPC.
575 * In such the case, its blkaddr can be remained as NEW_ADDR.
576 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
578 if (dn
.data_blkaddr
== NEW_ADDR
) {
579 zero_user_segment(page
, 0, PAGE_SIZE
);
580 if (!PageUptodate(page
))
581 SetPageUptodate(page
);
586 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
588 err
= f2fs_submit_page_bio(&fio
);
594 f2fs_put_page(page
, 1);
598 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
600 struct address_space
*mapping
= inode
->i_mapping
;
603 page
= find_get_page(mapping
, index
);
604 if (page
&& PageUptodate(page
))
606 f2fs_put_page(page
, 0);
608 page
= get_read_data_page(inode
, index
, 0, false);
612 if (PageUptodate(page
))
615 wait_on_page_locked(page
);
616 if (unlikely(!PageUptodate(page
))) {
617 f2fs_put_page(page
, 0);
618 return ERR_PTR(-EIO
);
624 * If it tries to access a hole, return an error.
625 * Because, the callers, functions in dir.c and GC, should be able to know
626 * whether this page exists or not.
628 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
,
631 struct address_space
*mapping
= inode
->i_mapping
;
634 page
= get_read_data_page(inode
, index
, 0, for_write
);
638 /* wait for read completion */
640 if (unlikely(page
->mapping
!= mapping
)) {
641 f2fs_put_page(page
, 1);
644 if (unlikely(!PageUptodate(page
))) {
645 f2fs_put_page(page
, 1);
646 return ERR_PTR(-EIO
);
652 * Caller ensures that this data page is never allocated.
653 * A new zero-filled data page is allocated in the page cache.
655 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
657 * Note that, ipage is set only by make_empty_dir, and if any error occur,
658 * ipage should be released by this function.
660 struct page
*get_new_data_page(struct inode
*inode
,
661 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
663 struct address_space
*mapping
= inode
->i_mapping
;
665 struct dnode_of_data dn
;
668 page
= f2fs_grab_cache_page(mapping
, index
, true);
671 * before exiting, we should make sure ipage will be released
672 * if any error occur.
674 f2fs_put_page(ipage
, 1);
675 return ERR_PTR(-ENOMEM
);
678 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
679 err
= f2fs_reserve_block(&dn
, index
);
681 f2fs_put_page(page
, 1);
687 if (PageUptodate(page
))
690 if (dn
.data_blkaddr
== NEW_ADDR
) {
691 zero_user_segment(page
, 0, PAGE_SIZE
);
692 if (!PageUptodate(page
))
693 SetPageUptodate(page
);
695 f2fs_put_page(page
, 1);
697 /* if ipage exists, blkaddr should be NEW_ADDR */
698 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
699 page
= get_lock_data_page(inode
, index
, true);
704 if (new_i_size
&& i_size_read(inode
) <
705 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
706 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
710 static int __allocate_data_block(struct dnode_of_data
*dn
)
712 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
713 struct f2fs_summary sum
;
718 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
721 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
722 if (dn
->data_blkaddr
== NEW_ADDR
)
725 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, &count
)))
729 get_node_info(sbi
, dn
->nid
, &ni
);
730 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
732 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
733 &sum
, CURSEG_WARM_DATA
);
734 set_data_blkaddr(dn
);
737 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
739 if (i_size_read(dn
->inode
) < ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
))
740 f2fs_i_size_write(dn
->inode
,
741 ((loff_t
)(fofs
+ 1) << PAGE_SHIFT
));
745 static inline bool __force_buffered_io(struct inode
*inode
, int rw
)
747 return ((f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) ||
748 (rw
== WRITE
&& test_opt(F2FS_I_SB(inode
), LFS
)) ||
749 F2FS_I_SB(inode
)->s_ndevs
);
752 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
754 struct inode
*inode
= file_inode(iocb
->ki_filp
);
755 struct f2fs_map_blocks map
;
758 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
761 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
762 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
763 if (map
.m_len
> map
.m_lblk
)
764 map
.m_len
-= map
.m_lblk
;
768 map
.m_next_pgofs
= NULL
;
770 if (iocb
->ki_flags
& IOCB_DIRECT
) {
771 err
= f2fs_convert_inline_inode(inode
);
774 return f2fs_map_blocks(inode
, &map
, 1,
775 __force_buffered_io(inode
, WRITE
) ?
776 F2FS_GET_BLOCK_PRE_AIO
:
777 F2FS_GET_BLOCK_PRE_DIO
);
779 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA
) {
780 err
= f2fs_convert_inline_inode(inode
);
784 if (!f2fs_has_inline_data(inode
))
785 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
790 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
791 * f2fs_map_blocks structure.
792 * If original data blocks are allocated, then give them to blockdev.
794 * a. preallocate requested block addresses
795 * b. do not use extent cache for better performance
796 * c. give the block addresses to blockdev
798 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
799 int create
, int flag
)
801 unsigned int maxblocks
= map
->m_len
;
802 struct dnode_of_data dn
;
803 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
804 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE
;
805 pgoff_t pgofs
, end_offset
, end
;
806 int err
= 0, ofs
= 1;
807 unsigned int ofs_in_node
, last_ofs_in_node
;
809 struct extent_info ei
= {0,0,0};
818 /* it only supports block size == page size */
819 pgofs
= (pgoff_t
)map
->m_lblk
;
820 end
= pgofs
+ maxblocks
;
822 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
823 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
824 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
825 map
->m_flags
= F2FS_MAP_MAPPED
;
833 /* When reading holes, we need its node page */
834 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
835 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
837 if (flag
== F2FS_GET_BLOCK_BMAP
)
839 if (err
== -ENOENT
) {
841 if (map
->m_next_pgofs
)
843 get_next_page_offset(&dn
, pgofs
);
849 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
850 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
853 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
855 if (blkaddr
== NEW_ADDR
|| blkaddr
== NULL_ADDR
) {
857 if (unlikely(f2fs_cp_error(sbi
))) {
861 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
862 if (blkaddr
== NULL_ADDR
) {
864 last_ofs_in_node
= dn
.ofs_in_node
;
867 err
= __allocate_data_block(&dn
);
869 set_inode_flag(inode
, FI_APPEND_WRITE
);
873 map
->m_flags
|= F2FS_MAP_NEW
;
874 blkaddr
= dn
.data_blkaddr
;
876 if (flag
== F2FS_GET_BLOCK_BMAP
) {
880 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
881 blkaddr
== NULL_ADDR
) {
882 if (map
->m_next_pgofs
)
883 *map
->m_next_pgofs
= pgofs
+ 1;
885 if (flag
!= F2FS_GET_BLOCK_FIEMAP
||
891 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
894 if (map
->m_len
== 0) {
895 /* preallocated unwritten block should be mapped for fiemap. */
896 if (blkaddr
== NEW_ADDR
)
897 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
898 map
->m_flags
|= F2FS_MAP_MAPPED
;
900 map
->m_pblk
= blkaddr
;
902 } else if ((map
->m_pblk
!= NEW_ADDR
&&
903 blkaddr
== (map
->m_pblk
+ ofs
)) ||
904 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
905 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
916 /* preallocate blocks in batch for one dnode page */
917 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
918 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
920 dn
.ofs_in_node
= ofs_in_node
;
921 err
= reserve_new_blocks(&dn
, prealloc
);
925 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
926 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
930 dn
.ofs_in_node
= end_offset
;
935 else if (dn
.ofs_in_node
< end_offset
)
942 f2fs_balance_fs(sbi
, dn
.node_changed
);
951 f2fs_balance_fs(sbi
, dn
.node_changed
);
954 trace_f2fs_map_blocks(inode
, map
, err
);
958 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
959 struct buffer_head
*bh
, int create
, int flag
,
962 struct f2fs_map_blocks map
;
966 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
967 map
.m_next_pgofs
= next_pgofs
;
969 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
971 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
972 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
973 bh
->b_size
= (u64
)map
.m_len
<< inode
->i_blkbits
;
978 static int get_data_block(struct inode
*inode
, sector_t iblock
,
979 struct buffer_head
*bh_result
, int create
, int flag
,
982 return __get_data_block(inode
, iblock
, bh_result
, create
,
986 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
987 struct buffer_head
*bh_result
, int create
)
989 return __get_data_block(inode
, iblock
, bh_result
, create
,
990 F2FS_GET_BLOCK_DIO
, NULL
);
993 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
994 struct buffer_head
*bh_result
, int create
)
996 /* Block number less than F2FS MAX BLOCKS */
997 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
1000 return __get_data_block(inode
, iblock
, bh_result
, create
,
1001 F2FS_GET_BLOCK_BMAP
, NULL
);
1004 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1006 return (offset
>> inode
->i_blkbits
);
1009 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1011 return (blk
<< inode
->i_blkbits
);
1014 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1017 struct buffer_head map_bh
;
1018 sector_t start_blk
, last_blk
;
1020 u64 logical
= 0, phys
= 0, size
= 0;
1024 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
1028 if (f2fs_has_inline_data(inode
)) {
1029 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1036 if (logical_to_blk(inode
, len
) == 0)
1037 len
= blk_to_logical(inode
, 1);
1039 start_blk
= logical_to_blk(inode
, start
);
1040 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1043 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1044 map_bh
.b_size
= len
;
1046 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
1047 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
1052 if (!buffer_mapped(&map_bh
)) {
1053 start_blk
= next_pgofs
;
1055 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
1056 F2FS_I_SB(inode
)->max_file_blocks
))
1059 flags
|= FIEMAP_EXTENT_LAST
;
1063 if (f2fs_encrypted_inode(inode
))
1064 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1066 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1070 if (start_blk
> last_blk
|| ret
)
1073 logical
= blk_to_logical(inode
, start_blk
);
1074 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1075 size
= map_bh
.b_size
;
1077 if (buffer_unwritten(&map_bh
))
1078 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1080 start_blk
+= logical_to_blk(inode
, size
);
1084 if (fatal_signal_pending(current
))
1092 inode_unlock(inode
);
1096 static struct bio
*f2fs_grab_bio(struct inode
*inode
, block_t blkaddr
,
1099 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1100 struct fscrypt_ctx
*ctx
= NULL
;
1103 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1104 ctx
= fscrypt_get_ctx(inode
, GFP_NOFS
);
1106 return ERR_CAST(ctx
);
1108 /* wait the page to be moved by cleaning */
1109 f2fs_wait_on_encrypted_page_writeback(sbi
, blkaddr
);
1112 bio
= bio_alloc(GFP_KERNEL
, min_t(int, nr_pages
, BIO_MAX_PAGES
));
1115 fscrypt_release_ctx(ctx
);
1116 return ERR_PTR(-ENOMEM
);
1118 f2fs_target_device(sbi
, blkaddr
, bio
);
1119 bio
->bi_end_io
= f2fs_read_end_io
;
1120 bio
->bi_private
= ctx
;
1126 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1127 * Major change was from block_size == page_size in f2fs by default.
1129 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1130 struct list_head
*pages
, struct page
*page
,
1133 struct bio
*bio
= NULL
;
1135 sector_t last_block_in_bio
= 0;
1136 struct inode
*inode
= mapping
->host
;
1137 const unsigned blkbits
= inode
->i_blkbits
;
1138 const unsigned blocksize
= 1 << blkbits
;
1139 sector_t block_in_file
;
1140 sector_t last_block
;
1141 sector_t last_block_in_file
;
1143 struct f2fs_map_blocks map
;
1149 map
.m_next_pgofs
= NULL
;
1151 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
1153 prefetchw(&page
->flags
);
1155 page
= list_last_entry(pages
, struct page
, lru
);
1156 list_del(&page
->lru
);
1157 if (add_to_page_cache_lru(page
, mapping
,
1159 readahead_gfp_mask(mapping
)))
1163 block_in_file
= (sector_t
)page
->index
;
1164 last_block
= block_in_file
+ nr_pages
;
1165 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1167 if (last_block
> last_block_in_file
)
1168 last_block
= last_block_in_file
;
1171 * Map blocks using the previous result first.
1173 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1174 block_in_file
> map
.m_lblk
&&
1175 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1179 * Then do more f2fs_map_blocks() calls until we are
1180 * done with this page.
1184 if (block_in_file
< last_block
) {
1185 map
.m_lblk
= block_in_file
;
1186 map
.m_len
= last_block
- block_in_file
;
1188 if (f2fs_map_blocks(inode
, &map
, 0,
1189 F2FS_GET_BLOCK_READ
))
1190 goto set_error_page
;
1193 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1194 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1195 SetPageMappedToDisk(page
);
1197 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1198 SetPageUptodate(page
);
1202 zero_user_segment(page
, 0, PAGE_SIZE
);
1203 if (!PageUptodate(page
))
1204 SetPageUptodate(page
);
1210 * This page will go to BIO. Do we need to send this
1213 if (bio
&& (last_block_in_bio
!= block_nr
- 1 ||
1214 !__same_bdev(F2FS_I_SB(inode
), block_nr
, bio
))) {
1216 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1220 bio
= f2fs_grab_bio(inode
, block_nr
, nr_pages
);
1223 goto set_error_page
;
1225 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
1228 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1229 goto submit_and_realloc
;
1231 last_block_in_bio
= block_nr
;
1235 zero_user_segment(page
, 0, PAGE_SIZE
);
1240 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1248 BUG_ON(pages
&& !list_empty(pages
));
1250 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1254 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1256 struct inode
*inode
= page
->mapping
->host
;
1259 trace_f2fs_readpage(page
, DATA
);
1261 /* If the file has inline data, try to read it directly */
1262 if (f2fs_has_inline_data(inode
))
1263 ret
= f2fs_read_inline_data(inode
, page
);
1265 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1269 static int f2fs_read_data_pages(struct file
*file
,
1270 struct address_space
*mapping
,
1271 struct list_head
*pages
, unsigned nr_pages
)
1273 struct inode
*inode
= file
->f_mapping
->host
;
1274 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
1276 trace_f2fs_readpages(inode
, page
, nr_pages
);
1278 /* If the file has inline data, skip readpages */
1279 if (f2fs_has_inline_data(inode
))
1282 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1285 int do_write_data_page(struct f2fs_io_info
*fio
)
1287 struct page
*page
= fio
->page
;
1288 struct inode
*inode
= page
->mapping
->host
;
1289 struct dnode_of_data dn
;
1292 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1293 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1297 fio
->old_blkaddr
= dn
.data_blkaddr
;
1299 /* This page is already truncated */
1300 if (fio
->old_blkaddr
== NULL_ADDR
) {
1301 ClearPageUptodate(page
);
1305 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1306 gfp_t gfp_flags
= GFP_NOFS
;
1308 /* wait for GCed encrypted page writeback */
1309 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode
),
1312 fio
->encrypted_page
= fscrypt_encrypt_page(inode
, fio
->page
,
1316 if (IS_ERR(fio
->encrypted_page
)) {
1317 err
= PTR_ERR(fio
->encrypted_page
);
1318 if (err
== -ENOMEM
) {
1319 /* flush pending ios and wait for a while */
1320 f2fs_flush_merged_bios(F2FS_I_SB(inode
));
1321 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1322 gfp_flags
|= __GFP_NOFAIL
;
1330 set_page_writeback(page
);
1333 * If current allocation needs SSR,
1334 * it had better in-place writes for updated data.
1336 if (unlikely(fio
->old_blkaddr
!= NEW_ADDR
&&
1337 !is_cold_data(page
) &&
1338 !IS_ATOMIC_WRITTEN_PAGE(page
) &&
1339 need_inplace_update(inode
))) {
1340 rewrite_data_page(fio
);
1341 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1342 trace_f2fs_do_write_data_page(page
, IPU
);
1344 write_data_page(&dn
, fio
);
1345 trace_f2fs_do_write_data_page(page
, OPU
);
1346 set_inode_flag(inode
, FI_APPEND_WRITE
);
1347 if (page
->index
== 0)
1348 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1351 f2fs_put_dnode(&dn
);
1355 static int __write_data_page(struct page
*page
, bool *submitted
,
1356 struct writeback_control
*wbc
)
1358 struct inode
*inode
= page
->mapping
->host
;
1359 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1360 loff_t i_size
= i_size_read(inode
);
1361 const pgoff_t end_index
= ((unsigned long long) i_size
)
1363 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
1364 unsigned offset
= 0;
1365 bool need_balance_fs
= false;
1367 struct f2fs_io_info fio
= {
1371 .op_flags
= wbc_to_write_flags(wbc
),
1373 .encrypted_page
= NULL
,
1377 trace_f2fs_writepage(page
, DATA
);
1379 if (page
->index
< end_index
)
1383 * If the offset is out-of-range of file size,
1384 * this page does not have to be written to disk.
1386 offset
= i_size
& (PAGE_SIZE
- 1);
1387 if ((page
->index
>= end_index
+ 1) || !offset
)
1390 zero_user_segment(page
, offset
, PAGE_SIZE
);
1392 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1394 if (f2fs_is_drop_cache(inode
))
1396 /* we should not write 0'th page having journal header */
1397 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
1398 (!wbc
->for_reclaim
&&
1399 available_free_memory(sbi
, BASE_CHECK
))))
1402 /* we should bypass data pages to proceed the kworkder jobs */
1403 if (unlikely(f2fs_cp_error(sbi
))) {
1404 mapping_set_error(page
->mapping
, -EIO
);
1408 /* Dentry blocks are controlled by checkpoint */
1409 if (S_ISDIR(inode
->i_mode
)) {
1410 err
= do_write_data_page(&fio
);
1414 if (!wbc
->for_reclaim
)
1415 need_balance_fs
= true;
1416 else if (has_not_enough_free_secs(sbi
, 0, 0))
1420 if (f2fs_has_inline_data(inode
)) {
1421 err
= f2fs_write_inline_data(inode
, page
);
1427 err
= do_write_data_page(&fio
);
1428 if (F2FS_I(inode
)->last_disk_size
< psize
)
1429 F2FS_I(inode
)->last_disk_size
= psize
;
1430 f2fs_unlock_op(sbi
);
1432 if (err
&& err
!= -ENOENT
)
1436 inode_dec_dirty_pages(inode
);
1438 ClearPageUptodate(page
);
1440 if (wbc
->for_reclaim
) {
1441 f2fs_submit_merged_bio_cond(sbi
, inode
, 0, page
->index
,
1443 remove_dirty_inode(inode
);
1448 f2fs_balance_fs(sbi
, need_balance_fs
);
1450 if (unlikely(f2fs_cp_error(sbi
))) {
1451 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1456 *submitted
= fio
.submitted
;
1461 redirty_page_for_writepage(wbc
, page
);
1463 return AOP_WRITEPAGE_ACTIVATE
;
1468 static int f2fs_write_data_page(struct page
*page
,
1469 struct writeback_control
*wbc
)
1471 return __write_data_page(page
, NULL
, wbc
);
1475 * This function was copied from write_cche_pages from mm/page-writeback.c.
1476 * The major change is making write step of cold data page separately from
1477 * warm/hot data page.
1479 static int f2fs_write_cache_pages(struct address_space
*mapping
,
1480 struct writeback_control
*wbc
)
1484 struct pagevec pvec
;
1486 pgoff_t
uninitialized_var(writeback_index
);
1488 pgoff_t end
; /* Inclusive */
1490 pgoff_t last_idx
= ULONG_MAX
;
1492 int range_whole
= 0;
1495 pagevec_init(&pvec
, 0);
1497 if (wbc
->range_cyclic
) {
1498 writeback_index
= mapping
->writeback_index
; /* prev offset */
1499 index
= writeback_index
;
1506 index
= wbc
->range_start
>> PAGE_SHIFT
;
1507 end
= wbc
->range_end
>> PAGE_SHIFT
;
1508 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1510 cycled
= 1; /* ignore range_cyclic tests */
1512 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1513 tag
= PAGECACHE_TAG_TOWRITE
;
1515 tag
= PAGECACHE_TAG_DIRTY
;
1517 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1518 tag_pages_for_writeback(mapping
, index
, end
);
1520 while (!done
&& (index
<= end
)) {
1523 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
1524 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
1528 for (i
= 0; i
< nr_pages
; i
++) {
1529 struct page
*page
= pvec
.pages
[i
];
1530 bool submitted
= false;
1532 if (page
->index
> end
) {
1537 done_index
= page
->index
;
1541 if (unlikely(page
->mapping
!= mapping
)) {
1547 if (!PageDirty(page
)) {
1548 /* someone wrote it for us */
1549 goto continue_unlock
;
1552 if (PageWriteback(page
)) {
1553 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1554 f2fs_wait_on_page_writeback(page
,
1557 goto continue_unlock
;
1560 BUG_ON(PageWriteback(page
));
1561 if (!clear_page_dirty_for_io(page
))
1562 goto continue_unlock
;
1564 ret
= __write_data_page(page
, &submitted
, wbc
);
1565 if (unlikely(ret
)) {
1567 * keep nr_to_write, since vfs uses this to
1568 * get # of written pages.
1570 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1575 done_index
= page
->index
+ 1;
1578 } else if (submitted
) {
1579 last_idx
= page
->index
;
1582 if (--wbc
->nr_to_write
<= 0 &&
1583 wbc
->sync_mode
== WB_SYNC_NONE
) {
1588 pagevec_release(&pvec
);
1592 if (!cycled
&& !done
) {
1595 end
= writeback_index
- 1;
1598 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1599 mapping
->writeback_index
= done_index
;
1601 if (last_idx
!= ULONG_MAX
)
1602 f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping
), mapping
->host
,
1603 0, last_idx
, DATA
, WRITE
);
1608 static int f2fs_write_data_pages(struct address_space
*mapping
,
1609 struct writeback_control
*wbc
)
1611 struct inode
*inode
= mapping
->host
;
1612 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1613 struct blk_plug plug
;
1616 /* deal with chardevs and other special file */
1617 if (!mapping
->a_ops
->writepage
)
1620 /* skip writing if there is no dirty page in this inode */
1621 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
1624 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1625 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1626 available_free_memory(sbi
, DIRTY_DENTS
))
1629 /* skip writing during file defragment */
1630 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
1633 /* during POR, we don't need to trigger writepage at all. */
1634 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1637 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1639 blk_start_plug(&plug
);
1640 ret
= f2fs_write_cache_pages(mapping
, wbc
);
1641 blk_finish_plug(&plug
);
1643 * if some pages were truncated, we cannot guarantee its mapping->host
1644 * to detect pending bios.
1647 remove_dirty_inode(inode
);
1651 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1652 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1656 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1658 struct inode
*inode
= mapping
->host
;
1659 loff_t i_size
= i_size_read(inode
);
1662 truncate_pagecache(inode
, i_size
);
1663 truncate_blocks(inode
, i_size
, true);
1667 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
1668 struct page
*page
, loff_t pos
, unsigned len
,
1669 block_t
*blk_addr
, bool *node_changed
)
1671 struct inode
*inode
= page
->mapping
->host
;
1672 pgoff_t index
= page
->index
;
1673 struct dnode_of_data dn
;
1675 bool locked
= false;
1676 struct extent_info ei
= {0,0,0};
1680 * we already allocated all the blocks, so we don't need to get
1681 * the block addresses when there is no need to fill the page.
1683 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
1684 !is_inode_flag_set(inode
, FI_NO_PREALLOC
))
1687 if (f2fs_has_inline_data(inode
) ||
1688 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
1693 /* check inline_data */
1694 ipage
= get_node_page(sbi
, inode
->i_ino
);
1695 if (IS_ERR(ipage
)) {
1696 err
= PTR_ERR(ipage
);
1700 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1702 if (f2fs_has_inline_data(inode
)) {
1703 if (pos
+ len
<= MAX_INLINE_DATA
) {
1704 read_inline_data(page
, ipage
);
1705 set_inode_flag(inode
, FI_DATA_EXIST
);
1707 set_inline_node(ipage
);
1709 err
= f2fs_convert_inline_page(&dn
, page
);
1712 if (dn
.data_blkaddr
== NULL_ADDR
)
1713 err
= f2fs_get_block(&dn
, index
);
1715 } else if (locked
) {
1716 err
= f2fs_get_block(&dn
, index
);
1718 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1719 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1722 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1723 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
1724 f2fs_put_dnode(&dn
);
1732 /* convert_inline_page can make node_changed */
1733 *blk_addr
= dn
.data_blkaddr
;
1734 *node_changed
= dn
.node_changed
;
1736 f2fs_put_dnode(&dn
);
1739 f2fs_unlock_op(sbi
);
1743 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1744 loff_t pos
, unsigned len
, unsigned flags
,
1745 struct page
**pagep
, void **fsdata
)
1747 struct inode
*inode
= mapping
->host
;
1748 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1749 struct page
*page
= NULL
;
1750 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
1751 bool need_balance
= false;
1752 block_t blkaddr
= NULL_ADDR
;
1755 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1758 * We should check this at this moment to avoid deadlock on inode page
1759 * and #0 page. The locking rule for inline_data conversion should be:
1760 * lock_page(page #0) -> lock_page(inode_page)
1763 err
= f2fs_convert_inline_inode(inode
);
1769 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
1770 * wait_for_stable_page. Will wait that below with our IO control.
1772 page
= pagecache_get_page(mapping
, index
,
1773 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
1781 err
= prepare_write_begin(sbi
, page
, pos
, len
,
1782 &blkaddr
, &need_balance
);
1786 if (need_balance
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1788 f2fs_balance_fs(sbi
, true);
1790 if (page
->mapping
!= mapping
) {
1791 /* The page got truncated from under us */
1792 f2fs_put_page(page
, 1);
1797 f2fs_wait_on_page_writeback(page
, DATA
, false);
1799 /* wait for GCed encrypted page writeback */
1800 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1801 f2fs_wait_on_encrypted_page_writeback(sbi
, blkaddr
);
1803 if (len
== PAGE_SIZE
|| PageUptodate(page
))
1806 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
)) {
1807 zero_user_segment(page
, len
, PAGE_SIZE
);
1811 if (blkaddr
== NEW_ADDR
) {
1812 zero_user_segment(page
, 0, PAGE_SIZE
);
1813 SetPageUptodate(page
);
1817 bio
= f2fs_grab_bio(inode
, blkaddr
, 1);
1822 bio
->bi_opf
= REQ_OP_READ
;
1823 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
1829 __submit_bio(sbi
, bio
, DATA
);
1832 if (unlikely(page
->mapping
!= mapping
)) {
1833 f2fs_put_page(page
, 1);
1836 if (unlikely(!PageUptodate(page
))) {
1844 f2fs_put_page(page
, 1);
1845 f2fs_write_failed(mapping
, pos
+ len
);
1849 static int f2fs_write_end(struct file
*file
,
1850 struct address_space
*mapping
,
1851 loff_t pos
, unsigned len
, unsigned copied
,
1852 struct page
*page
, void *fsdata
)
1854 struct inode
*inode
= page
->mapping
->host
;
1856 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1859 * This should be come from len == PAGE_SIZE, and we expect copied
1860 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
1861 * let generic_perform_write() try to copy data again through copied=0.
1863 if (!PageUptodate(page
)) {
1864 if (unlikely(copied
!= len
))
1867 SetPageUptodate(page
);
1872 set_page_dirty(page
);
1874 if (pos
+ copied
> i_size_read(inode
))
1875 f2fs_i_size_write(inode
, pos
+ copied
);
1877 f2fs_put_page(page
, 1);
1878 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1882 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1885 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1887 if (offset
& blocksize_mask
)
1890 if (iov_iter_alignment(iter
) & blocksize_mask
)
1896 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
1898 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1899 struct inode
*inode
= mapping
->host
;
1900 size_t count
= iov_iter_count(iter
);
1901 loff_t offset
= iocb
->ki_pos
;
1902 int rw
= iov_iter_rw(iter
);
1905 err
= check_direct_IO(inode
, iter
, offset
);
1909 if (__force_buffered_io(inode
, rw
))
1912 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
1914 down_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
1915 err
= blockdev_direct_IO(iocb
, inode
, iter
, get_data_block_dio
);
1916 up_read(&F2FS_I(inode
)->dio_rwsem
[rw
]);
1920 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1922 f2fs_write_failed(mapping
, offset
+ count
);
1925 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
1930 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1931 unsigned int length
)
1933 struct inode
*inode
= page
->mapping
->host
;
1934 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1936 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1937 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
1940 if (PageDirty(page
)) {
1941 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
1942 dec_page_count(sbi
, F2FS_DIRTY_META
);
1943 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
1944 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1946 inode_dec_dirty_pages(inode
);
1947 remove_dirty_inode(inode
);
1951 /* This is atomic written page, keep Private */
1952 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1955 set_page_private(page
, 0);
1956 ClearPagePrivate(page
);
1959 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1961 /* If this is dirty page, keep PagePrivate */
1962 if (PageDirty(page
))
1965 /* This is atomic written page, keep Private */
1966 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1969 set_page_private(page
, 0);
1970 ClearPagePrivate(page
);
1975 * This was copied from __set_page_dirty_buffers which gives higher performance
1976 * in very high speed storages. (e.g., pmem)
1978 void f2fs_set_page_dirty_nobuffers(struct page
*page
)
1980 struct address_space
*mapping
= page
->mapping
;
1981 unsigned long flags
;
1983 if (unlikely(!mapping
))
1986 spin_lock(&mapping
->private_lock
);
1987 lock_page_memcg(page
);
1989 spin_unlock(&mapping
->private_lock
);
1991 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1992 WARN_ON_ONCE(!PageUptodate(page
));
1993 account_page_dirtied(page
, mapping
);
1994 radix_tree_tag_set(&mapping
->page_tree
,
1995 page_index(page
), PAGECACHE_TAG_DIRTY
);
1996 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1997 unlock_page_memcg(page
);
1999 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
2003 static int f2fs_set_data_page_dirty(struct page
*page
)
2005 struct address_space
*mapping
= page
->mapping
;
2006 struct inode
*inode
= mapping
->host
;
2008 trace_f2fs_set_page_dirty(page
, DATA
);
2010 if (!PageUptodate(page
))
2011 SetPageUptodate(page
);
2013 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
2014 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
2015 register_inmem_page(inode
, page
);
2019 * Previously, this page has been registered, we just
2025 if (!PageDirty(page
)) {
2026 f2fs_set_page_dirty_nobuffers(page
);
2027 update_dirty_page(inode
, page
);
2033 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2035 struct inode
*inode
= mapping
->host
;
2037 if (f2fs_has_inline_data(inode
))
2040 /* make sure allocating whole blocks */
2041 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
2042 filemap_write_and_wait(mapping
);
2044 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
2047 #ifdef CONFIG_MIGRATION
2048 #include <linux/migrate.h>
2050 int f2fs_migrate_page(struct address_space
*mapping
,
2051 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
2053 int rc
, extra_count
;
2054 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
2055 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
2057 BUG_ON(PageWriteback(page
));
2059 /* migrating an atomic written page is safe with the inmem_lock hold */
2060 if (atomic_written
&& !mutex_trylock(&fi
->inmem_lock
))
2064 * A reference is expected if PagePrivate set when move mapping,
2065 * however F2FS breaks this for maintaining dirty page counts when
2066 * truncating pages. So here adjusting the 'extra_count' make it work.
2068 extra_count
= (atomic_written
? 1 : 0) - page_has_private(page
);
2069 rc
= migrate_page_move_mapping(mapping
, newpage
,
2070 page
, NULL
, mode
, extra_count
);
2071 if (rc
!= MIGRATEPAGE_SUCCESS
) {
2073 mutex_unlock(&fi
->inmem_lock
);
2077 if (atomic_written
) {
2078 struct inmem_pages
*cur
;
2079 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
2080 if (cur
->page
== page
) {
2081 cur
->page
= newpage
;
2084 mutex_unlock(&fi
->inmem_lock
);
2089 if (PagePrivate(page
))
2090 SetPagePrivate(newpage
);
2091 set_page_private(newpage
, page_private(page
));
2093 migrate_page_copy(newpage
, page
);
2095 return MIGRATEPAGE_SUCCESS
;
2099 const struct address_space_operations f2fs_dblock_aops
= {
2100 .readpage
= f2fs_read_data_page
,
2101 .readpages
= f2fs_read_data_pages
,
2102 .writepage
= f2fs_write_data_page
,
2103 .writepages
= f2fs_write_data_pages
,
2104 .write_begin
= f2fs_write_begin
,
2105 .write_end
= f2fs_write_end
,
2106 .set_page_dirty
= f2fs_set_data_page_dirty
,
2107 .invalidatepage
= f2fs_invalidate_page
,
2108 .releasepage
= f2fs_release_page
,
2109 .direct_IO
= f2fs_direct_IO
,
2111 #ifdef CONFIG_MIGRATION
2112 .migratepage
= f2fs_migrate_page
,