1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
15 #include <linux/pagevec.h>
20 #include <trace/events/f2fs.h>
22 static struct kmem_cache
*cic_entry_slab
;
23 static struct kmem_cache
*dic_entry_slab
;
25 static void *page_array_alloc(struct inode
*inode
, int nr
)
27 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
28 unsigned int size
= sizeof(struct page
*) * nr
;
30 if (likely(size
<= sbi
->page_array_slab_size
))
31 return kmem_cache_zalloc(sbi
->page_array_slab
, GFP_NOFS
);
32 return f2fs_kzalloc(sbi
, size
, GFP_NOFS
);
35 static void page_array_free(struct inode
*inode
, void *pages
, int nr
)
37 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
38 unsigned int size
= sizeof(struct page
*) * nr
;
43 if (likely(size
<= sbi
->page_array_slab_size
))
44 kmem_cache_free(sbi
->page_array_slab
, pages
);
49 struct f2fs_compress_ops
{
50 int (*init_compress_ctx
)(struct compress_ctx
*cc
);
51 void (*destroy_compress_ctx
)(struct compress_ctx
*cc
);
52 int (*compress_pages
)(struct compress_ctx
*cc
);
53 int (*init_decompress_ctx
)(struct decompress_io_ctx
*dic
);
54 void (*destroy_decompress_ctx
)(struct decompress_io_ctx
*dic
);
55 int (*decompress_pages
)(struct decompress_io_ctx
*dic
);
58 static unsigned int offset_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
60 return index
& (cc
->cluster_size
- 1);
63 static pgoff_t
cluster_idx(struct compress_ctx
*cc
, pgoff_t index
)
65 return index
>> cc
->log_cluster_size
;
68 static pgoff_t
start_idx_of_cluster(struct compress_ctx
*cc
)
70 return cc
->cluster_idx
<< cc
->log_cluster_size
;
73 bool f2fs_is_compressed_page(struct page
*page
)
75 if (!PagePrivate(page
))
77 if (!page_private(page
))
79 if (page_private_nonpointer(page
))
82 f2fs_bug_on(F2FS_M_SB(page
->mapping
),
83 *((u32
*)page_private(page
)) != F2FS_COMPRESSED_PAGE_MAGIC
);
87 static void f2fs_set_compressed_page(struct page
*page
,
88 struct inode
*inode
, pgoff_t index
, void *data
)
90 attach_page_private(page
, (void *)data
);
92 /* i_crypto_info and iv index */
94 page
->mapping
= inode
->i_mapping
;
97 static void f2fs_drop_rpages(struct compress_ctx
*cc
, int len
, bool unlock
)
101 for (i
= 0; i
< len
; i
++) {
105 unlock_page(cc
->rpages
[i
]);
107 put_page(cc
->rpages
[i
]);
111 static void f2fs_put_rpages(struct compress_ctx
*cc
)
113 f2fs_drop_rpages(cc
, cc
->cluster_size
, false);
116 static void f2fs_unlock_rpages(struct compress_ctx
*cc
, int len
)
118 f2fs_drop_rpages(cc
, len
, true);
121 static void f2fs_put_rpages_wbc(struct compress_ctx
*cc
,
122 struct writeback_control
*wbc
, bool redirty
, int unlock
)
126 for (i
= 0; i
< cc
->cluster_size
; i
++) {
130 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
131 f2fs_put_page(cc
->rpages
[i
], unlock
);
135 struct page
*f2fs_compress_control_page(struct page
*page
)
137 return ((struct compress_io_ctx
*)page_private(page
))->rpages
[0];
140 int f2fs_init_compress_ctx(struct compress_ctx
*cc
)
145 cc
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
146 return cc
->rpages
? 0 : -ENOMEM
;
149 void f2fs_destroy_compress_ctx(struct compress_ctx
*cc
, bool reuse
)
151 page_array_free(cc
->inode
, cc
->rpages
, cc
->cluster_size
);
156 cc
->cluster_idx
= NULL_CLUSTER
;
159 void f2fs_compress_ctx_add_page(struct compress_ctx
*cc
, struct page
*page
)
161 unsigned int cluster_ofs
;
163 if (!f2fs_cluster_can_merge_page(cc
, page
->index
))
164 f2fs_bug_on(F2FS_I_SB(cc
->inode
), 1);
166 cluster_ofs
= offset_in_cluster(cc
, page
->index
);
167 cc
->rpages
[cluster_ofs
] = page
;
169 cc
->cluster_idx
= cluster_idx(cc
, page
->index
);
172 #ifdef CONFIG_F2FS_FS_LZO
173 static int lzo_init_compress_ctx(struct compress_ctx
*cc
)
175 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
176 LZO1X_MEM_COMPRESS
, GFP_NOFS
);
180 cc
->clen
= lzo1x_worst_compress(PAGE_SIZE
<< cc
->log_cluster_size
);
184 static void lzo_destroy_compress_ctx(struct compress_ctx
*cc
)
190 static int lzo_compress_pages(struct compress_ctx
*cc
)
194 ret
= lzo1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
195 &cc
->clen
, cc
->private);
196 if (ret
!= LZO_E_OK
) {
197 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
198 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
204 static int lzo_decompress_pages(struct decompress_io_ctx
*dic
)
208 ret
= lzo1x_decompress_safe(dic
->cbuf
->cdata
, dic
->clen
,
209 dic
->rbuf
, &dic
->rlen
);
210 if (ret
!= LZO_E_OK
) {
211 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
212 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
216 if (dic
->rlen
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
217 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
218 "expected:%lu\n", KERN_ERR
,
219 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
221 PAGE_SIZE
<< dic
->log_cluster_size
);
227 static const struct f2fs_compress_ops f2fs_lzo_ops
= {
228 .init_compress_ctx
= lzo_init_compress_ctx
,
229 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
230 .compress_pages
= lzo_compress_pages
,
231 .decompress_pages
= lzo_decompress_pages
,
235 #ifdef CONFIG_F2FS_FS_LZ4
236 static int lz4_init_compress_ctx(struct compress_ctx
*cc
)
238 unsigned int size
= LZ4_MEM_COMPRESS
;
240 #ifdef CONFIG_F2FS_FS_LZ4HC
241 if (F2FS_I(cc
->inode
)->i_compress_flag
>> COMPRESS_LEVEL_OFFSET
)
242 size
= LZ4HC_MEM_COMPRESS
;
245 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
), size
, GFP_NOFS
);
250 * we do not change cc->clen to LZ4_compressBound(inputsize) to
251 * adapt worst compress case, because lz4 compressor can handle
252 * output budget properly.
254 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
258 static void lz4_destroy_compress_ctx(struct compress_ctx
*cc
)
264 #ifdef CONFIG_F2FS_FS_LZ4HC
265 static int lz4hc_compress_pages(struct compress_ctx
*cc
)
267 unsigned char level
= F2FS_I(cc
->inode
)->i_compress_flag
>>
268 COMPRESS_LEVEL_OFFSET
;
272 len
= LZ4_compress_HC(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
273 cc
->clen
, level
, cc
->private);
275 len
= LZ4_compress_default(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
276 cc
->clen
, cc
->private);
285 static int lz4_compress_pages(struct compress_ctx
*cc
)
289 #ifdef CONFIG_F2FS_FS_LZ4HC
290 return lz4hc_compress_pages(cc
);
292 len
= LZ4_compress_default(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
293 cc
->clen
, cc
->private);
301 static int lz4_decompress_pages(struct decompress_io_ctx
*dic
)
305 ret
= LZ4_decompress_safe(dic
->cbuf
->cdata
, dic
->rbuf
,
306 dic
->clen
, dic
->rlen
);
308 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
309 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
313 if (ret
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
314 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
315 "expected:%lu\n", KERN_ERR
,
316 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
318 PAGE_SIZE
<< dic
->log_cluster_size
);
324 static const struct f2fs_compress_ops f2fs_lz4_ops
= {
325 .init_compress_ctx
= lz4_init_compress_ctx
,
326 .destroy_compress_ctx
= lz4_destroy_compress_ctx
,
327 .compress_pages
= lz4_compress_pages
,
328 .decompress_pages
= lz4_decompress_pages
,
332 #ifdef CONFIG_F2FS_FS_ZSTD
333 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
335 static int zstd_init_compress_ctx(struct compress_ctx
*cc
)
337 ZSTD_parameters params
;
338 ZSTD_CStream
*stream
;
340 unsigned int workspace_size
;
341 unsigned char level
= F2FS_I(cc
->inode
)->i_compress_flag
>>
342 COMPRESS_LEVEL_OFFSET
;
345 level
= F2FS_ZSTD_DEFAULT_CLEVEL
;
347 params
= ZSTD_getParams(level
, cc
->rlen
, 0);
348 workspace_size
= ZSTD_CStreamWorkspaceBound(params
.cParams
);
350 workspace
= f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
351 workspace_size
, GFP_NOFS
);
355 stream
= ZSTD_initCStream(params
, 0, workspace
, workspace_size
);
357 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
358 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
364 cc
->private = workspace
;
365 cc
->private2
= stream
;
367 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
371 static void zstd_destroy_compress_ctx(struct compress_ctx
*cc
)
378 static int zstd_compress_pages(struct compress_ctx
*cc
)
380 ZSTD_CStream
*stream
= cc
->private2
;
382 ZSTD_outBuffer outbuf
;
383 int src_size
= cc
->rlen
;
384 int dst_size
= src_size
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
388 inbuf
.src
= cc
->rbuf
;
389 inbuf
.size
= src_size
;
392 outbuf
.dst
= cc
->cbuf
->cdata
;
393 outbuf
.size
= dst_size
;
395 ret
= ZSTD_compressStream(stream
, &outbuf
, &inbuf
);
396 if (ZSTD_isError(ret
)) {
397 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
398 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
399 __func__
, ZSTD_getErrorCode(ret
));
403 ret
= ZSTD_endStream(stream
, &outbuf
);
404 if (ZSTD_isError(ret
)) {
405 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
406 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
407 __func__
, ZSTD_getErrorCode(ret
));
412 * there is compressed data remained in intermediate buffer due to
413 * no more space in cbuf.cdata
418 cc
->clen
= outbuf
.pos
;
422 static int zstd_init_decompress_ctx(struct decompress_io_ctx
*dic
)
424 ZSTD_DStream
*stream
;
426 unsigned int workspace_size
;
427 unsigned int max_window_size
=
428 MAX_COMPRESS_WINDOW_SIZE(dic
->log_cluster_size
);
430 workspace_size
= ZSTD_DStreamWorkspaceBound(max_window_size
);
432 workspace
= f2fs_kvmalloc(F2FS_I_SB(dic
->inode
),
433 workspace_size
, GFP_NOFS
);
437 stream
= ZSTD_initDStream(max_window_size
, workspace
, workspace_size
);
439 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
440 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
446 dic
->private = workspace
;
447 dic
->private2
= stream
;
452 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx
*dic
)
454 kvfree(dic
->private);
456 dic
->private2
= NULL
;
459 static int zstd_decompress_pages(struct decompress_io_ctx
*dic
)
461 ZSTD_DStream
*stream
= dic
->private2
;
463 ZSTD_outBuffer outbuf
;
467 inbuf
.src
= dic
->cbuf
->cdata
;
468 inbuf
.size
= dic
->clen
;
471 outbuf
.dst
= dic
->rbuf
;
472 outbuf
.size
= dic
->rlen
;
474 ret
= ZSTD_decompressStream(stream
, &outbuf
, &inbuf
);
475 if (ZSTD_isError(ret
)) {
476 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
477 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
478 __func__
, ZSTD_getErrorCode(ret
));
482 if (dic
->rlen
!= outbuf
.pos
) {
483 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
484 "expected:%lu\n", KERN_ERR
,
485 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
487 PAGE_SIZE
<< dic
->log_cluster_size
);
494 static const struct f2fs_compress_ops f2fs_zstd_ops
= {
495 .init_compress_ctx
= zstd_init_compress_ctx
,
496 .destroy_compress_ctx
= zstd_destroy_compress_ctx
,
497 .compress_pages
= zstd_compress_pages
,
498 .init_decompress_ctx
= zstd_init_decompress_ctx
,
499 .destroy_decompress_ctx
= zstd_destroy_decompress_ctx
,
500 .decompress_pages
= zstd_decompress_pages
,
504 #ifdef CONFIG_F2FS_FS_LZO
505 #ifdef CONFIG_F2FS_FS_LZORLE
506 static int lzorle_compress_pages(struct compress_ctx
*cc
)
510 ret
= lzorle1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
511 &cc
->clen
, cc
->private);
512 if (ret
!= LZO_E_OK
) {
513 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
514 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
520 static const struct f2fs_compress_ops f2fs_lzorle_ops
= {
521 .init_compress_ctx
= lzo_init_compress_ctx
,
522 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
523 .compress_pages
= lzorle_compress_pages
,
524 .decompress_pages
= lzo_decompress_pages
,
529 static const struct f2fs_compress_ops
*f2fs_cops
[COMPRESS_MAX
] = {
530 #ifdef CONFIG_F2FS_FS_LZO
535 #ifdef CONFIG_F2FS_FS_LZ4
540 #ifdef CONFIG_F2FS_FS_ZSTD
545 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
552 bool f2fs_is_compress_backend_ready(struct inode
*inode
)
554 if (!f2fs_compressed_file(inode
))
556 return f2fs_cops
[F2FS_I(inode
)->i_compress_algorithm
];
559 static mempool_t
*compress_page_pool
;
560 static int num_compress_pages
= 512;
561 module_param(num_compress_pages
, uint
, 0444);
562 MODULE_PARM_DESC(num_compress_pages
,
563 "Number of intermediate compress pages to preallocate");
565 int f2fs_init_compress_mempool(void)
567 compress_page_pool
= mempool_create_page_pool(num_compress_pages
, 0);
568 if (!compress_page_pool
)
574 void f2fs_destroy_compress_mempool(void)
576 mempool_destroy(compress_page_pool
);
579 static struct page
*f2fs_compress_alloc_page(void)
583 page
= mempool_alloc(compress_page_pool
, GFP_NOFS
);
589 static void f2fs_compress_free_page(struct page
*page
)
593 detach_page_private(page
);
594 page
->mapping
= NULL
;
596 mempool_free(page
, compress_page_pool
);
599 #define MAX_VMAP_RETRIES 3
601 static void *f2fs_vmap(struct page
**pages
, unsigned int count
)
606 for (i
= 0; i
< MAX_VMAP_RETRIES
; i
++) {
607 buf
= vm_map_ram(pages
, count
, -1);
615 static int f2fs_compress_pages(struct compress_ctx
*cc
)
617 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
618 const struct f2fs_compress_ops
*cops
=
619 f2fs_cops
[fi
->i_compress_algorithm
];
620 unsigned int max_len
, new_nr_cpages
;
621 struct page
**new_cpages
;
625 trace_f2fs_compress_pages_start(cc
->inode
, cc
->cluster_idx
,
626 cc
->cluster_size
, fi
->i_compress_algorithm
);
628 if (cops
->init_compress_ctx
) {
629 ret
= cops
->init_compress_ctx(cc
);
634 max_len
= COMPRESS_HEADER_SIZE
+ cc
->clen
;
635 cc
->nr_cpages
= DIV_ROUND_UP(max_len
, PAGE_SIZE
);
637 cc
->cpages
= page_array_alloc(cc
->inode
, cc
->nr_cpages
);
640 goto destroy_compress_ctx
;
643 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
644 cc
->cpages
[i
] = f2fs_compress_alloc_page();
645 if (!cc
->cpages
[i
]) {
647 goto out_free_cpages
;
651 cc
->rbuf
= f2fs_vmap(cc
->rpages
, cc
->cluster_size
);
654 goto out_free_cpages
;
657 cc
->cbuf
= f2fs_vmap(cc
->cpages
, cc
->nr_cpages
);
660 goto out_vunmap_rbuf
;
663 ret
= cops
->compress_pages(cc
);
665 goto out_vunmap_cbuf
;
667 max_len
= PAGE_SIZE
* (cc
->cluster_size
- 1) - COMPRESS_HEADER_SIZE
;
669 if (cc
->clen
> max_len
) {
671 goto out_vunmap_cbuf
;
674 cc
->cbuf
->clen
= cpu_to_le32(cc
->clen
);
676 if (fi
->i_compress_flag
& 1 << COMPRESS_CHKSUM
)
677 chksum
= f2fs_crc32(F2FS_I_SB(cc
->inode
),
678 cc
->cbuf
->cdata
, cc
->clen
);
679 cc
->cbuf
->chksum
= cpu_to_le32(chksum
);
681 for (i
= 0; i
< COMPRESS_DATA_RESERVED_SIZE
; i
++)
682 cc
->cbuf
->reserved
[i
] = cpu_to_le32(0);
684 new_nr_cpages
= DIV_ROUND_UP(cc
->clen
+ COMPRESS_HEADER_SIZE
, PAGE_SIZE
);
686 /* Now we're going to cut unnecessary tail pages */
687 new_cpages
= page_array_alloc(cc
->inode
, new_nr_cpages
);
690 goto out_vunmap_cbuf
;
693 /* zero out any unused part of the last page */
694 memset(&cc
->cbuf
->cdata
[cc
->clen
], 0,
695 (new_nr_cpages
* PAGE_SIZE
) -
696 (cc
->clen
+ COMPRESS_HEADER_SIZE
));
698 vm_unmap_ram(cc
->cbuf
, cc
->nr_cpages
);
699 vm_unmap_ram(cc
->rbuf
, cc
->cluster_size
);
701 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
702 if (i
< new_nr_cpages
) {
703 new_cpages
[i
] = cc
->cpages
[i
];
706 f2fs_compress_free_page(cc
->cpages
[i
]);
707 cc
->cpages
[i
] = NULL
;
710 if (cops
->destroy_compress_ctx
)
711 cops
->destroy_compress_ctx(cc
);
713 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
714 cc
->cpages
= new_cpages
;
715 cc
->nr_cpages
= new_nr_cpages
;
717 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
722 vm_unmap_ram(cc
->cbuf
, cc
->nr_cpages
);
724 vm_unmap_ram(cc
->rbuf
, cc
->cluster_size
);
726 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
728 f2fs_compress_free_page(cc
->cpages
[i
]);
730 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
732 destroy_compress_ctx
:
733 if (cops
->destroy_compress_ctx
)
734 cops
->destroy_compress_ctx(cc
);
736 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
741 void f2fs_decompress_cluster(struct decompress_io_ctx
*dic
)
743 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dic
->inode
);
744 struct f2fs_inode_info
*fi
= F2FS_I(dic
->inode
);
745 const struct f2fs_compress_ops
*cops
=
746 f2fs_cops
[fi
->i_compress_algorithm
];
750 trace_f2fs_decompress_pages_start(dic
->inode
, dic
->cluster_idx
,
751 dic
->cluster_size
, fi
->i_compress_algorithm
);
758 dic
->tpages
= page_array_alloc(dic
->inode
, dic
->cluster_size
);
764 for (i
= 0; i
< dic
->cluster_size
; i
++) {
765 if (dic
->rpages
[i
]) {
766 dic
->tpages
[i
] = dic
->rpages
[i
];
770 dic
->tpages
[i
] = f2fs_compress_alloc_page();
771 if (!dic
->tpages
[i
]) {
777 if (cops
->init_decompress_ctx
) {
778 ret
= cops
->init_decompress_ctx(dic
);
783 dic
->rbuf
= f2fs_vmap(dic
->tpages
, dic
->cluster_size
);
786 goto out_destroy_decompress_ctx
;
789 dic
->cbuf
= f2fs_vmap(dic
->cpages
, dic
->nr_cpages
);
792 goto out_vunmap_rbuf
;
795 dic
->clen
= le32_to_cpu(dic
->cbuf
->clen
);
796 dic
->rlen
= PAGE_SIZE
<< dic
->log_cluster_size
;
798 if (dic
->clen
> PAGE_SIZE
* dic
->nr_cpages
- COMPRESS_HEADER_SIZE
) {
800 goto out_vunmap_cbuf
;
803 ret
= cops
->decompress_pages(dic
);
805 if (!ret
&& (fi
->i_compress_flag
& 1 << COMPRESS_CHKSUM
)) {
806 u32 provided
= le32_to_cpu(dic
->cbuf
->chksum
);
807 u32 calculated
= f2fs_crc32(sbi
, dic
->cbuf
->cdata
, dic
->clen
);
809 if (provided
!= calculated
) {
810 if (!is_inode_flag_set(dic
->inode
, FI_COMPRESS_CORRUPT
)) {
811 set_inode_flag(dic
->inode
, FI_COMPRESS_CORRUPT
);
813 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
814 KERN_INFO
, sbi
->sb
->s_id
, dic
->inode
->i_ino
,
815 provided
, calculated
);
817 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
822 vm_unmap_ram(dic
->cbuf
, dic
->nr_cpages
);
824 vm_unmap_ram(dic
->rbuf
, dic
->cluster_size
);
825 out_destroy_decompress_ctx
:
826 if (cops
->destroy_decompress_ctx
)
827 cops
->destroy_decompress_ctx(dic
);
829 trace_f2fs_decompress_pages_end(dic
->inode
, dic
->cluster_idx
,
831 f2fs_decompress_end_io(dic
, ret
);
835 * This is called when a page of a compressed cluster has been read from disk
836 * (or failed to be read from disk). It checks whether this page was the last
837 * page being waited on in the cluster, and if so, it decompresses the cluster
838 * (or in the case of a failure, cleans up without actually decompressing).
840 void f2fs_end_read_compressed_page(struct page
*page
, bool failed
,
843 struct decompress_io_ctx
*dic
=
844 (struct decompress_io_ctx
*)page_private(page
);
845 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dic
->inode
);
847 dec_page_count(sbi
, F2FS_RD_DATA
);
850 WRITE_ONCE(dic
->failed
, true);
852 f2fs_cache_compressed_page(sbi
, page
,
853 dic
->inode
->i_ino
, blkaddr
);
855 if (atomic_dec_and_test(&dic
->remaining_pages
))
856 f2fs_decompress_cluster(dic
);
859 static bool is_page_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
861 if (cc
->cluster_idx
== NULL_CLUSTER
)
863 return cc
->cluster_idx
== cluster_idx(cc
, index
);
866 bool f2fs_cluster_is_empty(struct compress_ctx
*cc
)
868 return cc
->nr_rpages
== 0;
871 static bool f2fs_cluster_is_full(struct compress_ctx
*cc
)
873 return cc
->cluster_size
== cc
->nr_rpages
;
876 bool f2fs_cluster_can_merge_page(struct compress_ctx
*cc
, pgoff_t index
)
878 if (f2fs_cluster_is_empty(cc
))
880 return is_page_in_cluster(cc
, index
);
883 static bool cluster_has_invalid_data(struct compress_ctx
*cc
)
885 loff_t i_size
= i_size_read(cc
->inode
);
886 unsigned nr_pages
= DIV_ROUND_UP(i_size
, PAGE_SIZE
);
889 for (i
= 0; i
< cc
->cluster_size
; i
++) {
890 struct page
*page
= cc
->rpages
[i
];
892 f2fs_bug_on(F2FS_I_SB(cc
->inode
), !page
);
895 if (page
->index
>= nr_pages
)
901 static int __f2fs_cluster_blocks(struct inode
*inode
,
902 unsigned int cluster_idx
, bool compr
)
904 struct dnode_of_data dn
;
905 unsigned int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
906 unsigned int start_idx
= cluster_idx
<<
907 F2FS_I(inode
)->i_log_cluster_size
;
910 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
911 ret
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
918 if (dn
.data_blkaddr
== COMPRESS_ADDR
) {
922 for (i
= 1; i
< cluster_size
; i
++) {
925 blkaddr
= data_blkaddr(dn
.inode
,
926 dn
.node_page
, dn
.ofs_in_node
+ i
);
928 if (__is_valid_data_blkaddr(blkaddr
))
931 if (blkaddr
!= NULL_ADDR
)
936 f2fs_bug_on(F2FS_I_SB(inode
),
937 !compr
&& ret
!= cluster_size
&&
938 !is_inode_flag_set(inode
, FI_COMPRESS_RELEASED
));
945 /* return # of compressed blocks in compressed cluster */
946 static int f2fs_compressed_blocks(struct compress_ctx
*cc
)
948 return __f2fs_cluster_blocks(cc
->inode
, cc
->cluster_idx
, true);
951 /* return # of valid blocks in compressed cluster */
952 int f2fs_is_compressed_cluster(struct inode
*inode
, pgoff_t index
)
954 return __f2fs_cluster_blocks(inode
,
955 index
>> F2FS_I(inode
)->i_log_cluster_size
,
959 static bool cluster_may_compress(struct compress_ctx
*cc
)
961 if (!f2fs_need_compress_data(cc
->inode
))
963 if (f2fs_is_atomic_file(cc
->inode
))
965 if (!f2fs_cluster_is_full(cc
))
967 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc
->inode
))))
969 return !cluster_has_invalid_data(cc
);
972 static void set_cluster_writeback(struct compress_ctx
*cc
)
976 for (i
= 0; i
< cc
->cluster_size
; i
++) {
978 set_page_writeback(cc
->rpages
[i
]);
982 static void set_cluster_dirty(struct compress_ctx
*cc
)
986 for (i
= 0; i
< cc
->cluster_size
; i
++)
988 set_page_dirty(cc
->rpages
[i
]);
991 static int prepare_compress_overwrite(struct compress_ctx
*cc
,
992 struct page
**pagep
, pgoff_t index
, void **fsdata
)
994 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
995 struct address_space
*mapping
= cc
->inode
->i_mapping
;
997 sector_t last_block_in_bio
;
998 unsigned fgp_flag
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
;
999 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1003 ret
= f2fs_is_compressed_cluster(cc
->inode
, start_idx
);
1007 ret
= f2fs_init_compress_ctx(cc
);
1011 /* keep page reference to avoid page reclaim */
1012 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1013 page
= f2fs_pagecache_get_page(mapping
, start_idx
+ i
,
1014 fgp_flag
, GFP_NOFS
);
1020 if (PageUptodate(page
))
1021 f2fs_put_page(page
, 1);
1023 f2fs_compress_ctx_add_page(cc
, page
);
1026 if (!f2fs_cluster_is_empty(cc
)) {
1027 struct bio
*bio
= NULL
;
1029 ret
= f2fs_read_multi_pages(cc
, &bio
, cc
->cluster_size
,
1030 &last_block_in_bio
, false, true);
1031 f2fs_put_rpages(cc
);
1032 f2fs_destroy_compress_ctx(cc
, true);
1036 f2fs_submit_bio(sbi
, bio
, DATA
);
1038 ret
= f2fs_init_compress_ctx(cc
);
1043 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1044 f2fs_bug_on(sbi
, cc
->rpages
[i
]);
1046 page
= find_lock_page(mapping
, start_idx
+ i
);
1048 /* page can be truncated */
1049 goto release_and_retry
;
1052 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
1053 f2fs_compress_ctx_add_page(cc
, page
);
1055 if (!PageUptodate(page
)) {
1057 f2fs_put_rpages(cc
);
1058 f2fs_unlock_rpages(cc
, i
+ 1);
1059 f2fs_destroy_compress_ctx(cc
, true);
1065 *fsdata
= cc
->rpages
;
1066 *pagep
= cc
->rpages
[offset_in_cluster(cc
, index
)];
1067 return cc
->cluster_size
;
1071 f2fs_put_rpages(cc
);
1072 f2fs_unlock_rpages(cc
, i
);
1073 f2fs_destroy_compress_ctx(cc
, true);
1078 int f2fs_prepare_compress_overwrite(struct inode
*inode
,
1079 struct page
**pagep
, pgoff_t index
, void **fsdata
)
1081 struct compress_ctx cc
= {
1083 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
1084 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
1085 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
1090 return prepare_compress_overwrite(&cc
, pagep
, index
, fsdata
);
1093 bool f2fs_compress_write_end(struct inode
*inode
, void *fsdata
,
1094 pgoff_t index
, unsigned copied
)
1097 struct compress_ctx cc
= {
1099 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
1100 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
1103 bool first_index
= (index
== cc
.rpages
[0]->index
);
1106 set_cluster_dirty(&cc
);
1108 f2fs_put_rpages_wbc(&cc
, NULL
, false, 1);
1109 f2fs_destroy_compress_ctx(&cc
, false);
1114 int f2fs_truncate_partial_cluster(struct inode
*inode
, u64 from
, bool lock
)
1116 void *fsdata
= NULL
;
1118 int log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
;
1119 pgoff_t start_idx
= from
>> (PAGE_SHIFT
+ log_cluster_size
) <<
1123 err
= f2fs_is_compressed_cluster(inode
, start_idx
);
1127 /* truncate normal cluster */
1129 return f2fs_do_truncate_blocks(inode
, from
, lock
);
1131 /* truncate compressed cluster */
1132 err
= f2fs_prepare_compress_overwrite(inode
, &pagep
,
1133 start_idx
, &fsdata
);
1135 /* should not be a normal cluster */
1136 f2fs_bug_on(F2FS_I_SB(inode
), err
== 0);
1142 struct page
**rpages
= fsdata
;
1143 int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
1146 for (i
= cluster_size
- 1; i
>= 0; i
--) {
1147 loff_t start
= rpages
[i
]->index
<< PAGE_SHIFT
;
1149 if (from
<= start
) {
1150 zero_user_segment(rpages
[i
], 0, PAGE_SIZE
);
1152 zero_user_segment(rpages
[i
], from
- start
,
1158 f2fs_compress_write_end(inode
, fsdata
, start_idx
, true);
1163 static int f2fs_write_compressed_pages(struct compress_ctx
*cc
,
1165 struct writeback_control
*wbc
,
1166 enum iostat_type io_type
)
1168 struct inode
*inode
= cc
->inode
;
1169 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1170 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1171 struct f2fs_io_info fio
= {
1173 .ino
= cc
->inode
->i_ino
,
1176 .op_flags
= wbc_to_write_flags(wbc
),
1177 .old_blkaddr
= NEW_ADDR
,
1179 .encrypted_page
= NULL
,
1180 .compressed_page
= NULL
,
1184 .encrypted
= fscrypt_inode_uses_fs_layer_crypto(cc
->inode
),
1186 struct dnode_of_data dn
;
1187 struct node_info ni
;
1188 struct compress_io_ctx
*cic
;
1189 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1190 unsigned int last_index
= cc
->cluster_size
- 1;
1194 /* we should bypass data pages to proceed the kworkder jobs */
1195 if (unlikely(f2fs_cp_error(sbi
))) {
1196 mapping_set_error(cc
->rpages
[0]->mapping
, -EIO
);
1200 if (IS_NOQUOTA(inode
)) {
1202 * We need to wait for node_write to avoid block allocation during
1203 * checkpoint. This can only happen to quota writes which can cause
1204 * the below discard race condition.
1206 down_read(&sbi
->node_write
);
1207 } else if (!f2fs_trylock_op(sbi
)) {
1211 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
1213 err
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
1217 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1218 if (data_blkaddr(dn
.inode
, dn
.node_page
,
1219 dn
.ofs_in_node
+ i
) == NULL_ADDR
)
1223 psize
= (loff_t
)(cc
->rpages
[last_index
]->index
+ 1) << PAGE_SHIFT
;
1225 err
= f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
1229 fio
.version
= ni
.version
;
1231 cic
= kmem_cache_zalloc(cic_entry_slab
, GFP_NOFS
);
1235 cic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1237 atomic_set(&cic
->pending_pages
, cc
->nr_cpages
);
1238 cic
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
1242 cic
->nr_rpages
= cc
->cluster_size
;
1244 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1245 f2fs_set_compressed_page(cc
->cpages
[i
], inode
,
1246 cc
->rpages
[i
+ 1]->index
, cic
);
1247 fio
.compressed_page
= cc
->cpages
[i
];
1249 fio
.old_blkaddr
= data_blkaddr(dn
.inode
, dn
.node_page
,
1250 dn
.ofs_in_node
+ i
+ 1);
1252 /* wait for GCed page writeback via META_MAPPING */
1253 f2fs_wait_on_block_writeback(inode
, fio
.old_blkaddr
);
1255 if (fio
.encrypted
) {
1256 fio
.page
= cc
->rpages
[i
+ 1];
1257 err
= f2fs_encrypt_one_page(&fio
);
1259 goto out_destroy_crypt
;
1260 cc
->cpages
[i
] = fio
.encrypted_page
;
1264 set_cluster_writeback(cc
);
1266 for (i
= 0; i
< cc
->cluster_size
; i
++)
1267 cic
->rpages
[i
] = cc
->rpages
[i
];
1269 for (i
= 0; i
< cc
->cluster_size
; i
++, dn
.ofs_in_node
++) {
1272 blkaddr
= f2fs_data_blkaddr(&dn
);
1273 fio
.page
= cc
->rpages
[i
];
1274 fio
.old_blkaddr
= blkaddr
;
1276 /* cluster header */
1278 if (blkaddr
== COMPRESS_ADDR
)
1280 if (__is_valid_data_blkaddr(blkaddr
))
1281 f2fs_invalidate_blocks(sbi
, blkaddr
);
1282 f2fs_update_data_blkaddr(&dn
, COMPRESS_ADDR
);
1283 goto unlock_continue
;
1286 if (fio
.compr_blocks
&& __is_valid_data_blkaddr(blkaddr
))
1289 if (i
> cc
->nr_cpages
) {
1290 if (__is_valid_data_blkaddr(blkaddr
)) {
1291 f2fs_invalidate_blocks(sbi
, blkaddr
);
1292 f2fs_update_data_blkaddr(&dn
, NEW_ADDR
);
1294 goto unlock_continue
;
1297 f2fs_bug_on(fio
.sbi
, blkaddr
== NULL_ADDR
);
1300 fio
.encrypted_page
= cc
->cpages
[i
- 1];
1302 fio
.compressed_page
= cc
->cpages
[i
- 1];
1304 cc
->cpages
[i
- 1] = NULL
;
1305 f2fs_outplace_write_data(&dn
, &fio
);
1308 inode_dec_dirty_pages(cc
->inode
);
1309 unlock_page(fio
.page
);
1312 if (fio
.compr_blocks
)
1313 f2fs_i_compr_blocks_update(inode
, fio
.compr_blocks
- 1, false);
1314 f2fs_i_compr_blocks_update(inode
, cc
->nr_cpages
, true);
1315 add_compr_block_stat(inode
, cc
->nr_cpages
);
1317 set_inode_flag(cc
->inode
, FI_APPEND_WRITE
);
1318 if (cc
->cluster_idx
== 0)
1319 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1321 f2fs_put_dnode(&dn
);
1322 if (IS_NOQUOTA(inode
))
1323 up_read(&sbi
->node_write
);
1325 f2fs_unlock_op(sbi
);
1327 spin_lock(&fi
->i_size_lock
);
1328 if (fi
->last_disk_size
< psize
)
1329 fi
->last_disk_size
= psize
;
1330 spin_unlock(&fi
->i_size_lock
);
1332 f2fs_put_rpages(cc
);
1333 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
1335 f2fs_destroy_compress_ctx(cc
, false);
1339 page_array_free(cc
->inode
, cic
->rpages
, cc
->cluster_size
);
1341 for (--i
; i
>= 0; i
--)
1342 fscrypt_finalize_bounce_page(&cc
->cpages
[i
]);
1343 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1346 f2fs_compress_free_page(cc
->cpages
[i
]);
1347 cc
->cpages
[i
] = NULL
;
1350 kmem_cache_free(cic_entry_slab
, cic
);
1352 f2fs_put_dnode(&dn
);
1354 if (IS_NOQUOTA(inode
))
1355 up_read(&sbi
->node_write
);
1357 f2fs_unlock_op(sbi
);
1359 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
1364 void f2fs_compress_write_end_io(struct bio
*bio
, struct page
*page
)
1366 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
1367 struct compress_io_ctx
*cic
=
1368 (struct compress_io_ctx
*)page_private(page
);
1371 if (unlikely(bio
->bi_status
))
1372 mapping_set_error(cic
->inode
->i_mapping
, -EIO
);
1374 f2fs_compress_free_page(page
);
1376 dec_page_count(sbi
, F2FS_WB_DATA
);
1378 if (atomic_dec_return(&cic
->pending_pages
))
1381 for (i
= 0; i
< cic
->nr_rpages
; i
++) {
1382 WARN_ON(!cic
->rpages
[i
]);
1383 clear_page_private_gcing(cic
->rpages
[i
]);
1384 end_page_writeback(cic
->rpages
[i
]);
1387 page_array_free(cic
->inode
, cic
->rpages
, cic
->nr_rpages
);
1388 kmem_cache_free(cic_entry_slab
, cic
);
1391 static int f2fs_write_raw_pages(struct compress_ctx
*cc
,
1393 struct writeback_control
*wbc
,
1394 enum iostat_type io_type
)
1396 struct address_space
*mapping
= cc
->inode
->i_mapping
;
1397 int _submitted
, compr_blocks
, ret
;
1398 int i
= -1, err
= 0;
1400 compr_blocks
= f2fs_compressed_blocks(cc
);
1401 if (compr_blocks
< 0) {
1406 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1410 if (cc
->rpages
[i
]->mapping
!= mapping
) {
1411 unlock_page(cc
->rpages
[i
]);
1415 BUG_ON(!PageLocked(cc
->rpages
[i
]));
1417 ret
= f2fs_write_single_data_page(cc
->rpages
[i
], &_submitted
,
1418 NULL
, NULL
, wbc
, io_type
,
1419 compr_blocks
, false);
1421 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1422 unlock_page(cc
->rpages
[i
]);
1424 } else if (ret
== -EAGAIN
) {
1426 * for quota file, just redirty left pages to
1427 * avoid deadlock caused by cluster update race
1428 * from foreground operation.
1430 if (IS_NOQUOTA(cc
->inode
)) {
1436 congestion_wait(BLK_RW_ASYNC
,
1437 DEFAULT_IO_TIMEOUT
);
1438 lock_page(cc
->rpages
[i
]);
1440 if (!PageDirty(cc
->rpages
[i
])) {
1441 unlock_page(cc
->rpages
[i
]);
1445 clear_page_dirty_for_io(cc
->rpages
[i
]);
1452 *submitted
+= _submitted
;
1455 f2fs_balance_fs(F2FS_M_SB(mapping
), true);
1459 for (++i
; i
< cc
->cluster_size
; i
++) {
1462 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
1463 unlock_page(cc
->rpages
[i
]);
1468 int f2fs_write_multi_pages(struct compress_ctx
*cc
,
1470 struct writeback_control
*wbc
,
1471 enum iostat_type io_type
)
1476 if (cluster_may_compress(cc
)) {
1477 err
= f2fs_compress_pages(cc
);
1478 if (err
== -EAGAIN
) {
1481 f2fs_put_rpages_wbc(cc
, wbc
, true, 1);
1485 err
= f2fs_write_compressed_pages(cc
, submitted
,
1489 f2fs_bug_on(F2FS_I_SB(cc
->inode
), err
!= -EAGAIN
);
1492 f2fs_bug_on(F2FS_I_SB(cc
->inode
), *submitted
);
1494 err
= f2fs_write_raw_pages(cc
, submitted
, wbc
, io_type
);
1495 f2fs_put_rpages_wbc(cc
, wbc
, false, 0);
1497 f2fs_destroy_compress_ctx(cc
, false);
1501 static void f2fs_free_dic(struct decompress_io_ctx
*dic
);
1503 struct decompress_io_ctx
*f2fs_alloc_dic(struct compress_ctx
*cc
)
1505 struct decompress_io_ctx
*dic
;
1506 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1509 dic
= kmem_cache_zalloc(dic_entry_slab
, GFP_NOFS
);
1511 return ERR_PTR(-ENOMEM
);
1513 dic
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
1515 kmem_cache_free(dic_entry_slab
, dic
);
1516 return ERR_PTR(-ENOMEM
);
1519 dic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1520 dic
->inode
= cc
->inode
;
1521 atomic_set(&dic
->remaining_pages
, cc
->nr_cpages
);
1522 dic
->cluster_idx
= cc
->cluster_idx
;
1523 dic
->cluster_size
= cc
->cluster_size
;
1524 dic
->log_cluster_size
= cc
->log_cluster_size
;
1525 dic
->nr_cpages
= cc
->nr_cpages
;
1526 refcount_set(&dic
->refcnt
, 1);
1527 dic
->failed
= false;
1528 dic
->need_verity
= f2fs_need_verity(cc
->inode
, start_idx
);
1530 for (i
= 0; i
< dic
->cluster_size
; i
++)
1531 dic
->rpages
[i
] = cc
->rpages
[i
];
1532 dic
->nr_rpages
= cc
->cluster_size
;
1534 dic
->cpages
= page_array_alloc(dic
->inode
, dic
->nr_cpages
);
1538 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1541 page
= f2fs_compress_alloc_page();
1545 f2fs_set_compressed_page(page
, cc
->inode
,
1546 start_idx
+ i
+ 1, dic
);
1547 dic
->cpages
[i
] = page
;
1554 return ERR_PTR(-ENOMEM
);
1557 static void f2fs_free_dic(struct decompress_io_ctx
*dic
)
1562 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1565 if (!dic
->tpages
[i
])
1567 f2fs_compress_free_page(dic
->tpages
[i
]);
1569 page_array_free(dic
->inode
, dic
->tpages
, dic
->cluster_size
);
1573 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1574 if (!dic
->cpages
[i
])
1576 f2fs_compress_free_page(dic
->cpages
[i
]);
1578 page_array_free(dic
->inode
, dic
->cpages
, dic
->nr_cpages
);
1581 page_array_free(dic
->inode
, dic
->rpages
, dic
->nr_rpages
);
1582 kmem_cache_free(dic_entry_slab
, dic
);
1585 static void f2fs_put_dic(struct decompress_io_ctx
*dic
)
1587 if (refcount_dec_and_test(&dic
->refcnt
))
1592 * Update and unlock the cluster's pagecache pages, and release the reference to
1593 * the decompress_io_ctx that was being held for I/O completion.
1595 static void __f2fs_decompress_end_io(struct decompress_io_ctx
*dic
, bool failed
)
1599 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1600 struct page
*rpage
= dic
->rpages
[i
];
1605 /* PG_error was set if verity failed. */
1606 if (failed
|| PageError(rpage
)) {
1607 ClearPageUptodate(rpage
);
1608 /* will re-read again later */
1609 ClearPageError(rpage
);
1611 SetPageUptodate(rpage
);
1619 static void f2fs_verify_cluster(struct work_struct
*work
)
1621 struct decompress_io_ctx
*dic
=
1622 container_of(work
, struct decompress_io_ctx
, verity_work
);
1625 /* Verify the cluster's decompressed pages with fs-verity. */
1626 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1627 struct page
*rpage
= dic
->rpages
[i
];
1629 if (rpage
&& !fsverity_verify_page(rpage
))
1630 SetPageError(rpage
);
1633 __f2fs_decompress_end_io(dic
, false);
1637 * This is called when a compressed cluster has been decompressed
1638 * (or failed to be read and/or decompressed).
1640 void f2fs_decompress_end_io(struct decompress_io_ctx
*dic
, bool failed
)
1642 if (!failed
&& dic
->need_verity
) {
1644 * Note that to avoid deadlocks, the verity work can't be done
1645 * on the decompression workqueue. This is because verifying
1646 * the data pages can involve reading metadata pages from the
1647 * file, and these metadata pages may be compressed.
1649 INIT_WORK(&dic
->verity_work
, f2fs_verify_cluster
);
1650 fsverity_enqueue_verify_work(&dic
->verity_work
);
1652 __f2fs_decompress_end_io(dic
, failed
);
1657 * Put a reference to a compressed page's decompress_io_ctx.
1659 * This is called when the page is no longer needed and can be freed.
1661 void f2fs_put_page_dic(struct page
*page
)
1663 struct decompress_io_ctx
*dic
=
1664 (struct decompress_io_ctx
*)page_private(page
);
1669 const struct address_space_operations f2fs_compress_aops
= {
1670 .releasepage
= f2fs_release_page
,
1671 .invalidatepage
= f2fs_invalidate_page
,
1674 struct address_space
*COMPRESS_MAPPING(struct f2fs_sb_info
*sbi
)
1676 return sbi
->compress_inode
->i_mapping
;
1679 void f2fs_invalidate_compress_page(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
1681 if (!sbi
->compress_inode
)
1683 invalidate_mapping_pages(COMPRESS_MAPPING(sbi
), blkaddr
, blkaddr
);
1686 void f2fs_cache_compressed_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1687 nid_t ino
, block_t blkaddr
)
1692 if (!test_opt(sbi
, COMPRESS_CACHE
))
1695 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE_READ
))
1698 if (!f2fs_available_free_memory(sbi
, COMPRESS_PAGE
))
1701 cpage
= find_get_page(COMPRESS_MAPPING(sbi
), blkaddr
);
1703 f2fs_put_page(cpage
, 0);
1707 cpage
= alloc_page(__GFP_NOWARN
| __GFP_IO
);
1711 ret
= add_to_page_cache_lru(cpage
, COMPRESS_MAPPING(sbi
),
1714 f2fs_put_page(cpage
, 0);
1718 set_page_private_data(cpage
, ino
);
1720 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE_READ
))
1723 memcpy(page_address(cpage
), page_address(page
), PAGE_SIZE
);
1724 SetPageUptodate(cpage
);
1726 f2fs_put_page(cpage
, 1);
1729 bool f2fs_load_compressed_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1733 bool hitted
= false;
1735 if (!test_opt(sbi
, COMPRESS_CACHE
))
1738 cpage
= f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi
),
1739 blkaddr
, FGP_LOCK
| FGP_NOWAIT
, GFP_NOFS
);
1741 if (PageUptodate(cpage
)) {
1742 atomic_inc(&sbi
->compress_page_hit
);
1743 memcpy(page_address(page
),
1744 page_address(cpage
), PAGE_SIZE
);
1747 f2fs_put_page(cpage
, 1);
1753 void f2fs_invalidate_compress_pages(struct f2fs_sb_info
*sbi
, nid_t ino
)
1755 struct address_space
*mapping
= sbi
->compress_inode
->i_mapping
;
1756 struct pagevec pvec
;
1758 pgoff_t end
= MAX_BLKADDR(sbi
);
1760 if (!mapping
->nrpages
)
1763 pagevec_init(&pvec
);
1766 unsigned int nr_pages
;
1769 nr_pages
= pagevec_lookup_range(&pvec
, mapping
,
1774 for (i
= 0; i
< nr_pages
; i
++) {
1775 struct page
*page
= pvec
.pages
[i
];
1777 if (page
->index
> end
)
1781 if (page
->mapping
!= mapping
) {
1786 if (ino
!= get_page_private_data(page
)) {
1791 generic_error_remove_page(mapping
, page
);
1794 pagevec_release(&pvec
);
1796 } while (index
< end
);
1799 int f2fs_init_compress_inode(struct f2fs_sb_info
*sbi
)
1801 struct inode
*inode
;
1803 if (!test_opt(sbi
, COMPRESS_CACHE
))
1806 inode
= f2fs_iget(sbi
->sb
, F2FS_COMPRESS_INO(sbi
));
1808 return PTR_ERR(inode
);
1809 sbi
->compress_inode
= inode
;
1811 sbi
->compress_percent
= COMPRESS_PERCENT
;
1812 sbi
->compress_watermark
= COMPRESS_WATERMARK
;
1814 atomic_set(&sbi
->compress_page_hit
, 0);
1819 void f2fs_destroy_compress_inode(struct f2fs_sb_info
*sbi
)
1821 if (!sbi
->compress_inode
)
1823 iput(sbi
->compress_inode
);
1824 sbi
->compress_inode
= NULL
;
1827 int f2fs_init_page_array_cache(struct f2fs_sb_info
*sbi
)
1829 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
1832 sprintf(slab_name
, "f2fs_page_array_entry-%u:%u", MAJOR(dev
), MINOR(dev
));
1834 sbi
->page_array_slab_size
= sizeof(struct page
*) <<
1835 F2FS_OPTION(sbi
).compress_log_size
;
1837 sbi
->page_array_slab
= f2fs_kmem_cache_create(slab_name
,
1838 sbi
->page_array_slab_size
);
1839 if (!sbi
->page_array_slab
)
1844 void f2fs_destroy_page_array_cache(struct f2fs_sb_info
*sbi
)
1846 kmem_cache_destroy(sbi
->page_array_slab
);
1849 static int __init
f2fs_init_cic_cache(void)
1851 cic_entry_slab
= f2fs_kmem_cache_create("f2fs_cic_entry",
1852 sizeof(struct compress_io_ctx
));
1853 if (!cic_entry_slab
)
1858 static void f2fs_destroy_cic_cache(void)
1860 kmem_cache_destroy(cic_entry_slab
);
1863 static int __init
f2fs_init_dic_cache(void)
1865 dic_entry_slab
= f2fs_kmem_cache_create("f2fs_dic_entry",
1866 sizeof(struct decompress_io_ctx
));
1867 if (!dic_entry_slab
)
1872 static void f2fs_destroy_dic_cache(void)
1874 kmem_cache_destroy(dic_entry_slab
);
1877 int __init
f2fs_init_compress_cache(void)
1881 err
= f2fs_init_cic_cache();
1884 err
= f2fs_init_dic_cache();
1889 f2fs_destroy_cic_cache();
1894 void f2fs_destroy_compress_cache(void)
1896 f2fs_destroy_dic_cache();
1897 f2fs_destroy_cic_cache();