2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/log2.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
43 #include "ordered-data.h"
44 #include "compression.h"
45 #include "extent_io.h"
46 #include "extent_map.h"
48 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
50 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
53 case BTRFS_COMPRESS_ZLIB
:
54 case BTRFS_COMPRESS_LZO
:
55 case BTRFS_COMPRESS_ZSTD
:
56 case BTRFS_COMPRESS_NONE
:
57 return btrfs_compress_types
[type
];
63 bool btrfs_compress_is_valid_type(const char *str
, size_t len
)
67 for (i
= 1; i
< ARRAY_SIZE(btrfs_compress_types
); i
++) {
68 size_t comp_len
= strlen(btrfs_compress_types
[i
]);
73 if (!strncmp(btrfs_compress_types
[i
], str
, comp_len
))
79 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
81 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
82 unsigned long disk_size
)
84 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
86 return sizeof(struct compressed_bio
) +
87 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
90 static int check_compressed_csum(struct btrfs_inode
*inode
,
91 struct compressed_bio
*cb
,
99 u32
*cb_sum
= &cb
->sums
;
101 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
104 for (i
= 0; i
< cb
->nr_pages
; i
++) {
105 page
= cb
->compressed_pages
[i
];
108 kaddr
= kmap_atomic(page
);
109 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
110 btrfs_csum_final(csum
, (u8
*)&csum
);
111 kunmap_atomic(kaddr
);
113 if (csum
!= *cb_sum
) {
114 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
115 *cb_sum
, cb
->mirror_num
);
127 /* when we finish reading compressed pages from the disk, we
128 * decompress them and then run the bio end_io routines on the
129 * decompressed pages (in the inode address space).
131 * This allows the checksumming and other IO error handling routines
134 * The compressed pages are freed here, and it must be run
137 static void end_compressed_bio_read(struct bio
*bio
)
139 struct compressed_bio
*cb
= bio
->bi_private
;
143 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
149 /* if there are more bios still pending for this compressed
152 if (!refcount_dec_and_test(&cb
->pending_bios
))
156 * Record the correct mirror_num in cb->orig_bio so that
157 * read-repair can work properly.
159 ASSERT(btrfs_io_bio(cb
->orig_bio
));
160 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
161 cb
->mirror_num
= mirror
;
164 * Some IO in this cb have failed, just skip checksum as there
165 * is no way it could be correct.
171 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
172 (u64
)bio
->bi_iter
.bi_sector
<< 9);
176 /* ok, we're the last bio for this extent, lets start
179 ret
= btrfs_decompress_bio(cb
);
185 /* release the compressed pages */
187 for (index
= 0; index
< cb
->nr_pages
; index
++) {
188 page
= cb
->compressed_pages
[index
];
189 page
->mapping
= NULL
;
193 /* do io completion on the original bio */
195 bio_io_error(cb
->orig_bio
);
198 struct bio_vec
*bvec
;
201 * we have verified the checksum already, set page
202 * checked so the end_io handlers know about it
204 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
205 bio_for_each_segment_all(bvec
, cb
->orig_bio
, i
)
206 SetPageChecked(bvec
->bv_page
);
208 bio_endio(cb
->orig_bio
);
211 /* finally free the cb struct */
212 kfree(cb
->compressed_pages
);
219 * Clear the writeback bits on all of the file
220 * pages for a compressed write
222 static noinline
void end_compressed_writeback(struct inode
*inode
,
223 const struct compressed_bio
*cb
)
225 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
226 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
227 struct page
*pages
[16];
228 unsigned long nr_pages
= end_index
- index
+ 1;
233 mapping_set_error(inode
->i_mapping
, -EIO
);
235 while (nr_pages
> 0) {
236 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
238 nr_pages
, ARRAY_SIZE(pages
)), pages
);
244 for (i
= 0; i
< ret
; i
++) {
246 SetPageError(pages
[i
]);
247 end_page_writeback(pages
[i
]);
253 /* the inode may be gone now */
257 * do the cleanup once all the compressed pages hit the disk.
258 * This will clear writeback on the file pages and free the compressed
261 * This also calls the writeback end hooks for the file pages so that
262 * metadata and checksums can be updated in the file.
264 static void end_compressed_bio_write(struct bio
*bio
)
266 struct extent_io_tree
*tree
;
267 struct compressed_bio
*cb
= bio
->bi_private
;
275 /* if there are more bios still pending for this compressed
278 if (!refcount_dec_and_test(&cb
->pending_bios
))
281 /* ok, we're the last bio for this extent, step one is to
282 * call back into the FS and do all the end_io operations
285 tree
= &BTRFS_I(inode
)->io_tree
;
286 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
287 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
289 cb
->start
+ cb
->len
- 1,
292 BLK_STS_OK
: BLK_STS_NOTSUPP
);
293 cb
->compressed_pages
[0]->mapping
= NULL
;
295 end_compressed_writeback(inode
, cb
);
296 /* note, our inode could be gone now */
299 * release the compressed pages, these came from alloc_page and
300 * are not attached to the inode at all
303 for (index
= 0; index
< cb
->nr_pages
; index
++) {
304 page
= cb
->compressed_pages
[index
];
305 page
->mapping
= NULL
;
309 /* finally free the cb struct */
310 kfree(cb
->compressed_pages
);
317 * worker function to build and submit bios for previously compressed pages.
318 * The corresponding pages in the inode should be marked for writeback
319 * and the compressed pages should have a reference on them for dropping
320 * when the IO is complete.
322 * This also checksums the file bytes and gets things ready for
325 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
326 unsigned long len
, u64 disk_start
,
327 unsigned long compressed_len
,
328 struct page
**compressed_pages
,
329 unsigned long nr_pages
,
330 unsigned int write_flags
)
332 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
333 struct bio
*bio
= NULL
;
334 struct compressed_bio
*cb
;
335 unsigned long bytes_left
;
336 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
339 u64 first_byte
= disk_start
;
340 struct block_device
*bdev
;
342 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
344 WARN_ON(start
& ((u64
)PAGE_SIZE
- 1));
345 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
347 return BLK_STS_RESOURCE
;
348 refcount_set(&cb
->pending_bios
, 0);
354 cb
->compressed_pages
= compressed_pages
;
355 cb
->compressed_len
= compressed_len
;
357 cb
->nr_pages
= nr_pages
;
359 bdev
= fs_info
->fs_devices
->latest_bdev
;
361 bio
= btrfs_bio_alloc(bdev
, first_byte
);
362 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
363 bio
->bi_private
= cb
;
364 bio
->bi_end_io
= end_compressed_bio_write
;
365 refcount_set(&cb
->pending_bios
, 1);
367 /* create and submit bios for the compressed pages */
368 bytes_left
= compressed_len
;
369 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
372 page
= compressed_pages
[pg_index
];
373 page
->mapping
= inode
->i_mapping
;
374 if (bio
->bi_iter
.bi_size
)
375 submit
= io_tree
->ops
->merge_bio_hook(page
, 0,
379 page
->mapping
= NULL
;
380 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
385 * inc the count before we submit the bio so
386 * we know the end IO handler won't happen before
387 * we inc the count. Otherwise, the cb might get
388 * freed before we're done setting it up
390 refcount_inc(&cb
->pending_bios
);
391 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
392 BTRFS_WQ_ENDIO_DATA
);
393 BUG_ON(ret
); /* -ENOMEM */
396 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
397 BUG_ON(ret
); /* -ENOMEM */
400 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
402 bio
->bi_status
= ret
;
408 bio
= btrfs_bio_alloc(bdev
, first_byte
);
409 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
410 bio
->bi_private
= cb
;
411 bio
->bi_end_io
= end_compressed_bio_write
;
412 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
414 if (bytes_left
< PAGE_SIZE
) {
416 "bytes left %lu compress len %lu nr %lu",
417 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
419 bytes_left
-= PAGE_SIZE
;
420 first_byte
+= PAGE_SIZE
;
425 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
426 BUG_ON(ret
); /* -ENOMEM */
429 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
430 BUG_ON(ret
); /* -ENOMEM */
433 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
435 bio
->bi_status
= ret
;
443 static u64
bio_end_offset(struct bio
*bio
)
445 struct bio_vec
*last
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
447 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
450 static noinline
int add_ra_bio_pages(struct inode
*inode
,
452 struct compressed_bio
*cb
)
454 unsigned long end_index
;
455 unsigned long pg_index
;
457 u64 isize
= i_size_read(inode
);
460 unsigned long nr_pages
= 0;
461 struct extent_map
*em
;
462 struct address_space
*mapping
= inode
->i_mapping
;
463 struct extent_map_tree
*em_tree
;
464 struct extent_io_tree
*tree
;
468 last_offset
= bio_end_offset(cb
->orig_bio
);
469 em_tree
= &BTRFS_I(inode
)->extent_tree
;
470 tree
= &BTRFS_I(inode
)->io_tree
;
475 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
477 while (last_offset
< compressed_end
) {
478 pg_index
= last_offset
>> PAGE_SHIFT
;
480 if (pg_index
> end_index
)
484 page
= radix_tree_lookup(&mapping
->page_tree
, pg_index
);
486 if (page
&& !radix_tree_exceptional_entry(page
)) {
493 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
498 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
503 end
= last_offset
+ PAGE_SIZE
- 1;
505 * at this point, we have a locked page in the page cache
506 * for these bytes in the file. But, we have to make
507 * sure they map to this compressed extent on disk.
509 set_page_extent_mapped(page
);
510 lock_extent(tree
, last_offset
, end
);
511 read_lock(&em_tree
->lock
);
512 em
= lookup_extent_mapping(em_tree
, last_offset
,
514 read_unlock(&em_tree
->lock
);
516 if (!em
|| last_offset
< em
->start
||
517 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
518 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
520 unlock_extent(tree
, last_offset
, end
);
527 if (page
->index
== end_index
) {
529 size_t zero_offset
= isize
& (PAGE_SIZE
- 1);
533 zeros
= PAGE_SIZE
- zero_offset
;
534 userpage
= kmap_atomic(page
);
535 memset(userpage
+ zero_offset
, 0, zeros
);
536 flush_dcache_page(page
);
537 kunmap_atomic(userpage
);
541 ret
= bio_add_page(cb
->orig_bio
, page
,
544 if (ret
== PAGE_SIZE
) {
548 unlock_extent(tree
, last_offset
, end
);
554 last_offset
+= PAGE_SIZE
;
560 * for a compressed read, the bio we get passed has all the inode pages
561 * in it. We don't actually do IO on those pages but allocate new ones
562 * to hold the compressed pages on disk.
564 * bio->bi_iter.bi_sector points to the compressed extent on disk
565 * bio->bi_io_vec points to all of the inode pages
567 * After the compressed pages are read, we copy the bytes into the
568 * bio we were passed and then call the bio end_io calls
570 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
571 int mirror_num
, unsigned long bio_flags
)
573 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
574 struct extent_io_tree
*tree
;
575 struct extent_map_tree
*em_tree
;
576 struct compressed_bio
*cb
;
577 unsigned long compressed_len
;
578 unsigned long nr_pages
;
579 unsigned long pg_index
;
581 struct block_device
*bdev
;
582 struct bio
*comp_bio
;
583 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
586 struct extent_map
*em
;
587 blk_status_t ret
= BLK_STS_RESOURCE
;
591 tree
= &BTRFS_I(inode
)->io_tree
;
592 em_tree
= &BTRFS_I(inode
)->extent_tree
;
594 /* we need the actual starting offset of this extent in the file */
595 read_lock(&em_tree
->lock
);
596 em
= lookup_extent_mapping(em_tree
,
597 page_offset(bio
->bi_io_vec
->bv_page
),
599 read_unlock(&em_tree
->lock
);
601 return BLK_STS_IOERR
;
603 compressed_len
= em
->block_len
;
604 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
608 refcount_set(&cb
->pending_bios
, 0);
611 cb
->mirror_num
= mirror_num
;
614 cb
->start
= em
->orig_start
;
616 em_start
= em
->start
;
621 cb
->len
= bio
->bi_iter
.bi_size
;
622 cb
->compressed_len
= compressed_len
;
623 cb
->compress_type
= extent_compress_type(bio_flags
);
626 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
627 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
629 if (!cb
->compressed_pages
)
632 bdev
= fs_info
->fs_devices
->latest_bdev
;
634 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
635 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
637 if (!cb
->compressed_pages
[pg_index
]) {
638 faili
= pg_index
- 1;
639 ret
= BLK_STS_RESOURCE
;
643 faili
= nr_pages
- 1;
644 cb
->nr_pages
= nr_pages
;
646 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
648 /* include any pages we added in add_ra-bio_pages */
649 cb
->len
= bio
->bi_iter
.bi_size
;
651 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
652 bio_set_op_attrs (comp_bio
, REQ_OP_READ
, 0);
653 comp_bio
->bi_private
= cb
;
654 comp_bio
->bi_end_io
= end_compressed_bio_read
;
655 refcount_set(&cb
->pending_bios
, 1);
657 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
660 page
= cb
->compressed_pages
[pg_index
];
661 page
->mapping
= inode
->i_mapping
;
662 page
->index
= em_start
>> PAGE_SHIFT
;
664 if (comp_bio
->bi_iter
.bi_size
)
665 submit
= tree
->ops
->merge_bio_hook(page
, 0,
669 page
->mapping
= NULL
;
670 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
674 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
675 BTRFS_WQ_ENDIO_DATA
);
676 BUG_ON(ret
); /* -ENOMEM */
679 * inc the count before we submit the bio so
680 * we know the end IO handler won't happen before
681 * we inc the count. Otherwise, the cb might get
682 * freed before we're done setting it up
684 refcount_inc(&cb
->pending_bios
);
686 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
687 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
689 BUG_ON(ret
); /* -ENOMEM */
691 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
692 fs_info
->sectorsize
);
694 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
696 comp_bio
->bi_status
= ret
;
702 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
703 bio_set_op_attrs(comp_bio
, REQ_OP_READ
, 0);
704 comp_bio
->bi_private
= cb
;
705 comp_bio
->bi_end_io
= end_compressed_bio_read
;
707 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
709 cur_disk_byte
+= PAGE_SIZE
;
713 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
714 BUG_ON(ret
); /* -ENOMEM */
716 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
717 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
718 BUG_ON(ret
); /* -ENOMEM */
721 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
723 comp_bio
->bi_status
= ret
;
732 __free_page(cb
->compressed_pages
[faili
]);
736 kfree(cb
->compressed_pages
);
745 * Heuristic uses systematic sampling to collect data from the input data
746 * range, the logic can be tuned by the following constants:
748 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
749 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
751 #define SAMPLING_READ_SIZE (16)
752 #define SAMPLING_INTERVAL (256)
755 * For statistical analysis of the input data we consider bytes that form a
756 * Galois Field of 256 objects. Each object has an attribute count, ie. how
757 * many times the object appeared in the sample.
759 #define BUCKET_SIZE (256)
762 * The size of the sample is based on a statistical sampling rule of thumb.
763 * The common way is to perform sampling tests as long as the number of
764 * elements in each cell is at least 5.
766 * Instead of 5, we choose 32 to obtain more accurate results.
767 * If the data contain the maximum number of symbols, which is 256, we obtain a
768 * sample size bound by 8192.
770 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
771 * from up to 512 locations.
773 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
774 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
780 struct heuristic_ws
{
781 /* Partial copy of input data */
784 /* Buckets store counters for each byte value */
785 struct bucket_item
*bucket
;
786 struct list_head list
;
789 static void free_heuristic_ws(struct list_head
*ws
)
791 struct heuristic_ws
*workspace
;
793 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
795 kvfree(workspace
->sample
);
796 kfree(workspace
->bucket
);
800 static struct list_head
*alloc_heuristic_ws(void)
802 struct heuristic_ws
*ws
;
804 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
806 return ERR_PTR(-ENOMEM
);
808 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
812 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
816 INIT_LIST_HEAD(&ws
->list
);
819 free_heuristic_ws(&ws
->list
);
820 return ERR_PTR(-ENOMEM
);
823 struct workspaces_list
{
824 struct list_head idle_ws
;
826 /* Number of free workspaces */
828 /* Total number of allocated workspaces */
830 /* Waiters for a free workspace */
831 wait_queue_head_t ws_wait
;
834 static struct workspaces_list btrfs_comp_ws
[BTRFS_COMPRESS_TYPES
];
836 static struct workspaces_list btrfs_heuristic_ws
;
838 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
839 &btrfs_zlib_compress
,
841 &btrfs_zstd_compress
,
844 void __init
btrfs_init_compress(void)
846 struct list_head
*workspace
;
849 INIT_LIST_HEAD(&btrfs_heuristic_ws
.idle_ws
);
850 spin_lock_init(&btrfs_heuristic_ws
.ws_lock
);
851 atomic_set(&btrfs_heuristic_ws
.total_ws
, 0);
852 init_waitqueue_head(&btrfs_heuristic_ws
.ws_wait
);
854 workspace
= alloc_heuristic_ws();
855 if (IS_ERR(workspace
)) {
857 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
859 atomic_set(&btrfs_heuristic_ws
.total_ws
, 1);
860 btrfs_heuristic_ws
.free_ws
= 1;
861 list_add(workspace
, &btrfs_heuristic_ws
.idle_ws
);
864 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
865 INIT_LIST_HEAD(&btrfs_comp_ws
[i
].idle_ws
);
866 spin_lock_init(&btrfs_comp_ws
[i
].ws_lock
);
867 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 0);
868 init_waitqueue_head(&btrfs_comp_ws
[i
].ws_wait
);
871 * Preallocate one workspace for each compression type so
872 * we can guarantee forward progress in the worst case
874 workspace
= btrfs_compress_op
[i
]->alloc_workspace();
875 if (IS_ERR(workspace
)) {
876 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
878 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 1);
879 btrfs_comp_ws
[i
].free_ws
= 1;
880 list_add(workspace
, &btrfs_comp_ws
[i
].idle_ws
);
886 * This finds an available workspace or allocates a new one.
887 * If it's not possible to allocate a new one, waits until there's one.
888 * Preallocation makes a forward progress guarantees and we do not return
891 static struct list_head
*__find_workspace(int type
, bool heuristic
)
893 struct list_head
*workspace
;
894 int cpus
= num_online_cpus();
897 struct list_head
*idle_ws
;
900 wait_queue_head_t
*ws_wait
;
904 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
905 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
906 total_ws
= &btrfs_heuristic_ws
.total_ws
;
907 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
908 free_ws
= &btrfs_heuristic_ws
.free_ws
;
910 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
911 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
912 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
913 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
914 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
919 if (!list_empty(idle_ws
)) {
920 workspace
= idle_ws
->next
;
923 spin_unlock(ws_lock
);
927 if (atomic_read(total_ws
) > cpus
) {
930 spin_unlock(ws_lock
);
931 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
932 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
934 finish_wait(ws_wait
, &wait
);
937 atomic_inc(total_ws
);
938 spin_unlock(ws_lock
);
941 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
942 * to turn it off here because we might get called from the restricted
943 * context of btrfs_compress_bio/btrfs_compress_pages
945 nofs_flag
= memalloc_nofs_save();
947 workspace
= alloc_heuristic_ws();
949 workspace
= btrfs_compress_op
[idx
]->alloc_workspace();
950 memalloc_nofs_restore(nofs_flag
);
952 if (IS_ERR(workspace
)) {
953 atomic_dec(total_ws
);
957 * Do not return the error but go back to waiting. There's a
958 * workspace preallocated for each type and the compression
959 * time is bounded so we get to a workspace eventually. This
960 * makes our caller's life easier.
962 * To prevent silent and low-probability deadlocks (when the
963 * initial preallocation fails), check if there are any
966 if (atomic_read(total_ws
) == 0) {
967 static DEFINE_RATELIMIT_STATE(_rs
,
968 /* once per minute */ 60 * HZ
,
971 if (__ratelimit(&_rs
)) {
972 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
980 static struct list_head
*find_workspace(int type
)
982 return __find_workspace(type
, false);
986 * put a workspace struct back on the list or free it if we have enough
987 * idle ones sitting around
989 static void __free_workspace(int type
, struct list_head
*workspace
,
993 struct list_head
*idle_ws
;
996 wait_queue_head_t
*ws_wait
;
1000 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
1001 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
1002 total_ws
= &btrfs_heuristic_ws
.total_ws
;
1003 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
1004 free_ws
= &btrfs_heuristic_ws
.free_ws
;
1006 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
1007 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
1008 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
1009 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
1010 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
1014 if (*free_ws
<= num_online_cpus()) {
1015 list_add(workspace
, idle_ws
);
1017 spin_unlock(ws_lock
);
1020 spin_unlock(ws_lock
);
1023 free_heuristic_ws(workspace
);
1025 btrfs_compress_op
[idx
]->free_workspace(workspace
);
1026 atomic_dec(total_ws
);
1029 * Make sure counter is updated before we wake up waiters.
1032 if (waitqueue_active(ws_wait
))
1036 static void free_workspace(int type
, struct list_head
*ws
)
1038 return __free_workspace(type
, ws
, false);
1042 * cleanup function for module exit
1044 static void free_workspaces(void)
1046 struct list_head
*workspace
;
1049 while (!list_empty(&btrfs_heuristic_ws
.idle_ws
)) {
1050 workspace
= btrfs_heuristic_ws
.idle_ws
.next
;
1051 list_del(workspace
);
1052 free_heuristic_ws(workspace
);
1053 atomic_dec(&btrfs_heuristic_ws
.total_ws
);
1056 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
1057 while (!list_empty(&btrfs_comp_ws
[i
].idle_ws
)) {
1058 workspace
= btrfs_comp_ws
[i
].idle_ws
.next
;
1059 list_del(workspace
);
1060 btrfs_compress_op
[i
]->free_workspace(workspace
);
1061 atomic_dec(&btrfs_comp_ws
[i
].total_ws
);
1067 * Given an address space and start and length, compress the bytes into @pages
1068 * that are allocated on demand.
1070 * @type_level is encoded algorithm and level, where level 0 means whatever
1071 * default the algorithm chooses and is opaque here;
1072 * - compression algo are 0-3
1073 * - the level are bits 4-7
1075 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1076 * and returns number of actually allocated pages
1078 * @total_in is used to return the number of bytes actually read. It
1079 * may be smaller than the input length if we had to exit early because we
1080 * ran out of room in the pages array or because we cross the
1081 * max_out threshold.
1083 * @total_out is an in/out parameter, must be set to the input length and will
1084 * be also used to return the total number of compressed bytes
1086 * @max_out tells us the max number of bytes that we're allowed to
1089 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1090 u64 start
, struct page
**pages
,
1091 unsigned long *out_pages
,
1092 unsigned long *total_in
,
1093 unsigned long *total_out
)
1095 struct list_head
*workspace
;
1097 int type
= type_level
& 0xF;
1099 workspace
= find_workspace(type
);
1101 btrfs_compress_op
[type
- 1]->set_level(workspace
, type_level
);
1102 ret
= btrfs_compress_op
[type
-1]->compress_pages(workspace
, mapping
,
1105 total_in
, total_out
);
1106 free_workspace(type
, workspace
);
1111 * pages_in is an array of pages with compressed data.
1113 * disk_start is the starting logical offset of this array in the file
1115 * orig_bio contains the pages from the file that we want to decompress into
1117 * srclen is the number of bytes in pages_in
1119 * The basic idea is that we have a bio that was created by readpages.
1120 * The pages in the bio are for the uncompressed data, and they may not
1121 * be contiguous. They all correspond to the range of bytes covered by
1122 * the compressed extent.
1124 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1126 struct list_head
*workspace
;
1128 int type
= cb
->compress_type
;
1130 workspace
= find_workspace(type
);
1131 ret
= btrfs_compress_op
[type
- 1]->decompress_bio(workspace
, cb
);
1132 free_workspace(type
, workspace
);
1138 * a less complex decompression routine. Our compressed data fits in a
1139 * single page, and we want to read a single page out of it.
1140 * start_byte tells us the offset into the compressed data we're interested in
1142 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
1143 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1145 struct list_head
*workspace
;
1148 workspace
= find_workspace(type
);
1150 ret
= btrfs_compress_op
[type
-1]->decompress(workspace
, data_in
,
1151 dest_page
, start_byte
,
1154 free_workspace(type
, workspace
);
1158 void btrfs_exit_compress(void)
1164 * Copy uncompressed data from working buffer to pages.
1166 * buf_start is the byte offset we're of the start of our workspace buffer.
1168 * total_out is the last byte of the buffer
1170 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1171 unsigned long total_out
, u64 disk_start
,
1174 unsigned long buf_offset
;
1175 unsigned long current_buf_start
;
1176 unsigned long start_byte
;
1177 unsigned long prev_start_byte
;
1178 unsigned long working_bytes
= total_out
- buf_start
;
1179 unsigned long bytes
;
1181 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1184 * start byte is the first byte of the page we're currently
1185 * copying into relative to the start of the compressed data.
1187 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1189 /* we haven't yet hit data corresponding to this page */
1190 if (total_out
<= start_byte
)
1194 * the start of the data we care about is offset into
1195 * the middle of our working buffer
1197 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1198 buf_offset
= start_byte
- buf_start
;
1199 working_bytes
-= buf_offset
;
1203 current_buf_start
= buf_start
;
1205 /* copy bytes from the working buffer into the pages */
1206 while (working_bytes
> 0) {
1207 bytes
= min_t(unsigned long, bvec
.bv_len
,
1208 PAGE_SIZE
- buf_offset
);
1209 bytes
= min(bytes
, working_bytes
);
1211 kaddr
= kmap_atomic(bvec
.bv_page
);
1212 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1213 kunmap_atomic(kaddr
);
1214 flush_dcache_page(bvec
.bv_page
);
1216 buf_offset
+= bytes
;
1217 working_bytes
-= bytes
;
1218 current_buf_start
+= bytes
;
1220 /* check if we need to pick another page */
1221 bio_advance(bio
, bytes
);
1222 if (!bio
->bi_iter
.bi_size
)
1224 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1225 prev_start_byte
= start_byte
;
1226 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1229 * We need to make sure we're only adjusting
1230 * our offset into compression working buffer when
1231 * we're switching pages. Otherwise we can incorrectly
1232 * keep copying when we were actually done.
1234 if (start_byte
!= prev_start_byte
) {
1236 * make sure our new page is covered by this
1239 if (total_out
<= start_byte
)
1243 * the next page in the biovec might not be adjacent
1244 * to the last page, but it might still be found
1245 * inside this working buffer. bump our offset pointer
1247 if (total_out
> start_byte
&&
1248 current_buf_start
< start_byte
) {
1249 buf_offset
= start_byte
- buf_start
;
1250 working_bytes
= total_out
- start_byte
;
1251 current_buf_start
= buf_start
+ buf_offset
;
1260 * Shannon Entropy calculation
1262 * Pure byte distribution analysis fails to determine compressiability of data.
1263 * Try calculating entropy to estimate the average minimum number of bits
1264 * needed to encode the sampled data.
1266 * For convenience, return the percentage of needed bits, instead of amount of
1269 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1270 * and can be compressible with high probability
1272 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1274 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1276 #define ENTROPY_LVL_ACEPTABLE (65)
1277 #define ENTROPY_LVL_HIGH (80)
1280 * For increasead precision in shannon_entropy calculation,
1281 * let's do pow(n, M) to save more digits after comma:
1283 * - maximum int bit length is 64
1284 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1285 * - 13 * 4 = 52 < 64 -> M = 4
1289 static inline u32
ilog2_w(u64 n
)
1291 return ilog2(n
* n
* n
* n
);
1294 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1296 const u32 entropy_max
= 8 * ilog2_w(2);
1297 u32 entropy_sum
= 0;
1298 u32 p
, p_base
, sz_base
;
1301 sz_base
= ilog2_w(ws
->sample_size
);
1302 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1303 p
= ws
->bucket
[i
].count
;
1304 p_base
= ilog2_w(p
);
1305 entropy_sum
+= p
* (sz_base
- p_base
);
1308 entropy_sum
/= ws
->sample_size
;
1309 return entropy_sum
* 100 / entropy_max
;
1312 /* Compare buckets by size, ascending */
1313 static int bucket_comp_rev(const void *lv
, const void *rv
)
1315 const struct bucket_item
*l
= (const struct bucket_item
*)lv
;
1316 const struct bucket_item
*r
= (const struct bucket_item
*)rv
;
1318 return r
->count
- l
->count
;
1322 * Size of the core byte set - how many bytes cover 90% of the sample
1324 * There are several types of structured binary data that use nearly all byte
1325 * values. The distribution can be uniform and counts in all buckets will be
1326 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1328 * Other possibility is normal (Gaussian) distribution, where the data could
1329 * be potentially compressible, but we have to take a few more steps to decide
1332 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1333 * compression algo can easy fix that
1334 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1335 * probability is not compressible
1337 #define BYTE_CORE_SET_LOW (64)
1338 #define BYTE_CORE_SET_HIGH (200)
1340 static int byte_core_set_size(struct heuristic_ws
*ws
)
1343 u32 coreset_sum
= 0;
1344 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1345 struct bucket_item
*bucket
= ws
->bucket
;
1347 /* Sort in reverse order */
1348 sort(bucket
, BUCKET_SIZE
, sizeof(*bucket
), &bucket_comp_rev
, NULL
);
1350 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1351 coreset_sum
+= bucket
[i
].count
;
1353 if (coreset_sum
> core_set_threshold
)
1356 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1357 coreset_sum
+= bucket
[i
].count
;
1358 if (coreset_sum
> core_set_threshold
)
1366 * Count byte values in buckets.
1367 * This heuristic can detect textual data (configs, xml, json, html, etc).
1368 * Because in most text-like data byte set is restricted to limited number of
1369 * possible characters, and that restriction in most cases makes data easy to
1372 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1373 * less - compressible
1374 * more - need additional analysis
1376 #define BYTE_SET_THRESHOLD (64)
1378 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1381 u32 byte_set_size
= 0;
1383 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1384 if (ws
->bucket
[i
].count
> 0)
1389 * Continue collecting count of byte values in buckets. If the byte
1390 * set size is bigger then the threshold, it's pointless to continue,
1391 * the detection technique would fail for this type of data.
1393 for (; i
< BUCKET_SIZE
; i
++) {
1394 if (ws
->bucket
[i
].count
> 0) {
1396 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1397 return byte_set_size
;
1401 return byte_set_size
;
1404 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1406 const u32 half_of_sample
= ws
->sample_size
/ 2;
1407 const u8
*data
= ws
->sample
;
1409 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1412 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1413 struct heuristic_ws
*ws
)
1416 u64 index
, index_end
;
1417 u32 i
, curr_sample_pos
;
1421 * Compression handles the input data by chunks of 128KiB
1422 * (defined by BTRFS_MAX_UNCOMPRESSED)
1424 * We do the same for the heuristic and loop over the whole range.
1426 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1427 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1429 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1430 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1432 index
= start
>> PAGE_SHIFT
;
1433 index_end
= end
>> PAGE_SHIFT
;
1435 /* Don't miss unaligned end */
1436 if (!IS_ALIGNED(end
, PAGE_SIZE
))
1439 curr_sample_pos
= 0;
1440 while (index
< index_end
) {
1441 page
= find_get_page(inode
->i_mapping
, index
);
1442 in_data
= kmap(page
);
1443 /* Handle case where the start is not aligned to PAGE_SIZE */
1444 i
= start
% PAGE_SIZE
;
1445 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1446 /* Don't sample any garbage from the last page */
1447 if (start
> end
- SAMPLING_READ_SIZE
)
1449 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1450 SAMPLING_READ_SIZE
);
1451 i
+= SAMPLING_INTERVAL
;
1452 start
+= SAMPLING_INTERVAL
;
1453 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1461 ws
->sample_size
= curr_sample_pos
;
1465 * Compression heuristic.
1467 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1468 * quickly (compared to direct compression) detect data characteristics
1469 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1472 * The following types of analysis can be performed:
1473 * - detect mostly zero data
1474 * - detect data with low "byte set" size (text, etc)
1475 * - detect data with low/high "core byte" set
1477 * Return non-zero if the compression should be done, 0 otherwise.
1479 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1481 struct list_head
*ws_list
= __find_workspace(0, true);
1482 struct heuristic_ws
*ws
;
1487 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1489 heuristic_collect_sample(inode
, start
, end
, ws
);
1491 if (sample_repeated_patterns(ws
)) {
1496 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1498 for (i
= 0; i
< ws
->sample_size
; i
++) {
1499 byte
= ws
->sample
[i
];
1500 ws
->bucket
[byte
].count
++;
1503 i
= byte_set_size(ws
);
1504 if (i
< BYTE_SET_THRESHOLD
) {
1509 i
= byte_core_set_size(ws
);
1510 if (i
<= BYTE_CORE_SET_LOW
) {
1515 if (i
>= BYTE_CORE_SET_HIGH
) {
1520 i
= shannon_entropy(ws
);
1521 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1527 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1528 * needed to give green light to compression.
1530 * For now just assume that compression at that level is not worth the
1531 * resources because:
1533 * 1. it is possible to defrag the data later
1535 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1536 * values, every bucket has counter at level ~54. The heuristic would
1537 * be confused. This can happen when data have some internal repeated
1538 * patterns like "abbacbbc...". This can be detected by analyzing
1539 * pairs of bytes, which is too costly.
1541 if (i
< ENTROPY_LVL_HIGH
) {
1550 __free_workspace(0, ws_list
, true);
1554 unsigned int btrfs_compress_str2level(const char *str
)
1556 if (strncmp(str
, "zlib", 4) != 0)
1559 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1560 if (str
[4] == ':' && '1' <= str
[5] && str
[5] <= '9' && str
[6] == 0)
1561 return str
[5] - '0';
1563 return BTRFS_ZLIB_DEFAULT_LEVEL
;