1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/kernel.h>
8 #include <linux/file.h>
10 #include <linux/pagemap.h>
11 #include <linux/highmem.h>
12 #include <linux/time.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sched/mm.h>
19 #include <linux/log2.h>
22 #include "transaction.h"
23 #include "btrfs_inode.h"
25 #include "ordered-data.h"
26 #include "compression.h"
27 #include "extent_io.h"
28 #include "extent_map.h"
30 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
32 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
35 case BTRFS_COMPRESS_ZLIB
:
36 case BTRFS_COMPRESS_LZO
:
37 case BTRFS_COMPRESS_ZSTD
:
38 case BTRFS_COMPRESS_NONE
:
39 return btrfs_compress_types
[type
];
45 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
47 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
48 unsigned long disk_size
)
50 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
52 return sizeof(struct compressed_bio
) +
53 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
56 static int check_compressed_csum(struct btrfs_inode
*inode
,
57 struct compressed_bio
*cb
,
65 u32
*cb_sum
= &cb
->sums
;
67 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
70 for (i
= 0; i
< cb
->nr_pages
; i
++) {
71 page
= cb
->compressed_pages
[i
];
74 kaddr
= kmap_atomic(page
);
75 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
76 btrfs_csum_final(csum
, (u8
*)&csum
);
79 if (csum
!= *cb_sum
) {
80 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
81 *cb_sum
, cb
->mirror_num
);
93 /* when we finish reading compressed pages from the disk, we
94 * decompress them and then run the bio end_io routines on the
95 * decompressed pages (in the inode address space).
97 * This allows the checksumming and other IO error handling routines
100 * The compressed pages are freed here, and it must be run
103 static void end_compressed_bio_read(struct bio
*bio
)
105 struct compressed_bio
*cb
= bio
->bi_private
;
109 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
115 /* if there are more bios still pending for this compressed
118 if (!refcount_dec_and_test(&cb
->pending_bios
))
122 * Record the correct mirror_num in cb->orig_bio so that
123 * read-repair can work properly.
125 ASSERT(btrfs_io_bio(cb
->orig_bio
));
126 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
127 cb
->mirror_num
= mirror
;
130 * Some IO in this cb have failed, just skip checksum as there
131 * is no way it could be correct.
137 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
138 (u64
)bio
->bi_iter
.bi_sector
<< 9);
142 /* ok, we're the last bio for this extent, lets start
145 ret
= btrfs_decompress_bio(cb
);
151 /* release the compressed pages */
153 for (index
= 0; index
< cb
->nr_pages
; index
++) {
154 page
= cb
->compressed_pages
[index
];
155 page
->mapping
= NULL
;
159 /* do io completion on the original bio */
161 bio_io_error(cb
->orig_bio
);
163 struct bio_vec
*bvec
;
164 struct bvec_iter_all iter_all
;
167 * we have verified the checksum already, set page
168 * checked so the end_io handlers know about it
170 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
171 bio_for_each_segment_all(bvec
, cb
->orig_bio
, iter_all
)
172 SetPageChecked(bvec
->bv_page
);
174 bio_endio(cb
->orig_bio
);
177 /* finally free the cb struct */
178 kfree(cb
->compressed_pages
);
185 * Clear the writeback bits on all of the file
186 * pages for a compressed write
188 static noinline
void end_compressed_writeback(struct inode
*inode
,
189 const struct compressed_bio
*cb
)
191 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
192 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
193 struct page
*pages
[16];
194 unsigned long nr_pages
= end_index
- index
+ 1;
199 mapping_set_error(inode
->i_mapping
, -EIO
);
201 while (nr_pages
> 0) {
202 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
204 nr_pages
, ARRAY_SIZE(pages
)), pages
);
210 for (i
= 0; i
< ret
; i
++) {
212 SetPageError(pages
[i
]);
213 end_page_writeback(pages
[i
]);
219 /* the inode may be gone now */
223 * do the cleanup once all the compressed pages hit the disk.
224 * This will clear writeback on the file pages and free the compressed
227 * This also calls the writeback end hooks for the file pages so that
228 * metadata and checksums can be updated in the file.
230 static void end_compressed_bio_write(struct bio
*bio
)
232 struct compressed_bio
*cb
= bio
->bi_private
;
240 /* if there are more bios still pending for this compressed
243 if (!refcount_dec_and_test(&cb
->pending_bios
))
246 /* ok, we're the last bio for this extent, step one is to
247 * call back into the FS and do all the end_io operations
250 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
251 btrfs_writepage_endio_finish_ordered(cb
->compressed_pages
[0],
252 cb
->start
, cb
->start
+ cb
->len
- 1,
253 bio
->bi_status
== BLK_STS_OK
);
254 cb
->compressed_pages
[0]->mapping
= NULL
;
256 end_compressed_writeback(inode
, cb
);
257 /* note, our inode could be gone now */
260 * release the compressed pages, these came from alloc_page and
261 * are not attached to the inode at all
264 for (index
= 0; index
< cb
->nr_pages
; index
++) {
265 page
= cb
->compressed_pages
[index
];
266 page
->mapping
= NULL
;
270 /* finally free the cb struct */
271 kfree(cb
->compressed_pages
);
278 * worker function to build and submit bios for previously compressed pages.
279 * The corresponding pages in the inode should be marked for writeback
280 * and the compressed pages should have a reference on them for dropping
281 * when the IO is complete.
283 * This also checksums the file bytes and gets things ready for
286 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
287 unsigned long len
, u64 disk_start
,
288 unsigned long compressed_len
,
289 struct page
**compressed_pages
,
290 unsigned long nr_pages
,
291 unsigned int write_flags
)
293 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
294 struct bio
*bio
= NULL
;
295 struct compressed_bio
*cb
;
296 unsigned long bytes_left
;
299 u64 first_byte
= disk_start
;
300 struct block_device
*bdev
;
302 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
304 WARN_ON(!PAGE_ALIGNED(start
));
305 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
307 return BLK_STS_RESOURCE
;
308 refcount_set(&cb
->pending_bios
, 0);
314 cb
->compressed_pages
= compressed_pages
;
315 cb
->compressed_len
= compressed_len
;
317 cb
->nr_pages
= nr_pages
;
319 bdev
= fs_info
->fs_devices
->latest_bdev
;
321 bio
= btrfs_bio_alloc(bdev
, first_byte
);
322 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
323 bio
->bi_private
= cb
;
324 bio
->bi_end_io
= end_compressed_bio_write
;
325 refcount_set(&cb
->pending_bios
, 1);
327 /* create and submit bios for the compressed pages */
328 bytes_left
= compressed_len
;
329 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
332 page
= compressed_pages
[pg_index
];
333 page
->mapping
= inode
->i_mapping
;
334 if (bio
->bi_iter
.bi_size
)
335 submit
= btrfs_bio_fits_in_stripe(page
, PAGE_SIZE
, bio
,
338 page
->mapping
= NULL
;
339 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
342 * inc the count before we submit the bio so
343 * we know the end IO handler won't happen before
344 * we inc the count. Otherwise, the cb might get
345 * freed before we're done setting it up
347 refcount_inc(&cb
->pending_bios
);
348 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
349 BTRFS_WQ_ENDIO_DATA
);
350 BUG_ON(ret
); /* -ENOMEM */
353 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
354 BUG_ON(ret
); /* -ENOMEM */
357 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
359 bio
->bi_status
= ret
;
363 bio
= btrfs_bio_alloc(bdev
, first_byte
);
364 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
365 bio
->bi_private
= cb
;
366 bio
->bi_end_io
= end_compressed_bio_write
;
367 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
369 if (bytes_left
< PAGE_SIZE
) {
371 "bytes left %lu compress len %lu nr %lu",
372 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
374 bytes_left
-= PAGE_SIZE
;
375 first_byte
+= PAGE_SIZE
;
379 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
380 BUG_ON(ret
); /* -ENOMEM */
383 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
384 BUG_ON(ret
); /* -ENOMEM */
387 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
389 bio
->bi_status
= ret
;
396 static u64
bio_end_offset(struct bio
*bio
)
398 struct bio_vec
*last
= bio_last_bvec_all(bio
);
400 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
403 static noinline
int add_ra_bio_pages(struct inode
*inode
,
405 struct compressed_bio
*cb
)
407 unsigned long end_index
;
408 unsigned long pg_index
;
410 u64 isize
= i_size_read(inode
);
413 unsigned long nr_pages
= 0;
414 struct extent_map
*em
;
415 struct address_space
*mapping
= inode
->i_mapping
;
416 struct extent_map_tree
*em_tree
;
417 struct extent_io_tree
*tree
;
421 last_offset
= bio_end_offset(cb
->orig_bio
);
422 em_tree
= &BTRFS_I(inode
)->extent_tree
;
423 tree
= &BTRFS_I(inode
)->io_tree
;
428 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
430 while (last_offset
< compressed_end
) {
431 pg_index
= last_offset
>> PAGE_SHIFT
;
433 if (pg_index
> end_index
)
436 page
= xa_load(&mapping
->i_pages
, pg_index
);
437 if (page
&& !xa_is_value(page
)) {
444 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
449 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
454 end
= last_offset
+ PAGE_SIZE
- 1;
456 * at this point, we have a locked page in the page cache
457 * for these bytes in the file. But, we have to make
458 * sure they map to this compressed extent on disk.
460 set_page_extent_mapped(page
);
461 lock_extent(tree
, last_offset
, end
);
462 read_lock(&em_tree
->lock
);
463 em
= lookup_extent_mapping(em_tree
, last_offset
,
465 read_unlock(&em_tree
->lock
);
467 if (!em
|| last_offset
< em
->start
||
468 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
469 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
471 unlock_extent(tree
, last_offset
, end
);
478 if (page
->index
== end_index
) {
480 size_t zero_offset
= offset_in_page(isize
);
484 zeros
= PAGE_SIZE
- zero_offset
;
485 userpage
= kmap_atomic(page
);
486 memset(userpage
+ zero_offset
, 0, zeros
);
487 flush_dcache_page(page
);
488 kunmap_atomic(userpage
);
492 ret
= bio_add_page(cb
->orig_bio
, page
,
495 if (ret
== PAGE_SIZE
) {
499 unlock_extent(tree
, last_offset
, end
);
505 last_offset
+= PAGE_SIZE
;
511 * for a compressed read, the bio we get passed has all the inode pages
512 * in it. We don't actually do IO on those pages but allocate new ones
513 * to hold the compressed pages on disk.
515 * bio->bi_iter.bi_sector points to the compressed extent on disk
516 * bio->bi_io_vec points to all of the inode pages
518 * After the compressed pages are read, we copy the bytes into the
519 * bio we were passed and then call the bio end_io calls
521 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
522 int mirror_num
, unsigned long bio_flags
)
524 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
525 struct extent_map_tree
*em_tree
;
526 struct compressed_bio
*cb
;
527 unsigned long compressed_len
;
528 unsigned long nr_pages
;
529 unsigned long pg_index
;
531 struct block_device
*bdev
;
532 struct bio
*comp_bio
;
533 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
536 struct extent_map
*em
;
537 blk_status_t ret
= BLK_STS_RESOURCE
;
541 em_tree
= &BTRFS_I(inode
)->extent_tree
;
543 /* we need the actual starting offset of this extent in the file */
544 read_lock(&em_tree
->lock
);
545 em
= lookup_extent_mapping(em_tree
,
546 page_offset(bio_first_page_all(bio
)),
548 read_unlock(&em_tree
->lock
);
550 return BLK_STS_IOERR
;
552 compressed_len
= em
->block_len
;
553 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
557 refcount_set(&cb
->pending_bios
, 0);
560 cb
->mirror_num
= mirror_num
;
563 cb
->start
= em
->orig_start
;
565 em_start
= em
->start
;
570 cb
->len
= bio
->bi_iter
.bi_size
;
571 cb
->compressed_len
= compressed_len
;
572 cb
->compress_type
= extent_compress_type(bio_flags
);
575 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
576 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
578 if (!cb
->compressed_pages
)
581 bdev
= fs_info
->fs_devices
->latest_bdev
;
583 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
584 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
586 if (!cb
->compressed_pages
[pg_index
]) {
587 faili
= pg_index
- 1;
588 ret
= BLK_STS_RESOURCE
;
592 faili
= nr_pages
- 1;
593 cb
->nr_pages
= nr_pages
;
595 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
597 /* include any pages we added in add_ra-bio_pages */
598 cb
->len
= bio
->bi_iter
.bi_size
;
600 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
601 comp_bio
->bi_opf
= REQ_OP_READ
;
602 comp_bio
->bi_private
= cb
;
603 comp_bio
->bi_end_io
= end_compressed_bio_read
;
604 refcount_set(&cb
->pending_bios
, 1);
606 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
609 page
= cb
->compressed_pages
[pg_index
];
610 page
->mapping
= inode
->i_mapping
;
611 page
->index
= em_start
>> PAGE_SHIFT
;
613 if (comp_bio
->bi_iter
.bi_size
)
614 submit
= btrfs_bio_fits_in_stripe(page
, PAGE_SIZE
,
617 page
->mapping
= NULL
;
618 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
620 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
621 BTRFS_WQ_ENDIO_DATA
);
622 BUG_ON(ret
); /* -ENOMEM */
625 * inc the count before we submit the bio so
626 * we know the end IO handler won't happen before
627 * we inc the count. Otherwise, the cb might get
628 * freed before we're done setting it up
630 refcount_inc(&cb
->pending_bios
);
632 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
633 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
635 BUG_ON(ret
); /* -ENOMEM */
637 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
638 fs_info
->sectorsize
);
640 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
642 comp_bio
->bi_status
= ret
;
646 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
647 comp_bio
->bi_opf
= REQ_OP_READ
;
648 comp_bio
->bi_private
= cb
;
649 comp_bio
->bi_end_io
= end_compressed_bio_read
;
651 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
653 cur_disk_byte
+= PAGE_SIZE
;
656 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
657 BUG_ON(ret
); /* -ENOMEM */
659 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
660 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
661 BUG_ON(ret
); /* -ENOMEM */
664 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
666 comp_bio
->bi_status
= ret
;
674 __free_page(cb
->compressed_pages
[faili
]);
678 kfree(cb
->compressed_pages
);
687 * Heuristic uses systematic sampling to collect data from the input data
688 * range, the logic can be tuned by the following constants:
690 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
691 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
693 #define SAMPLING_READ_SIZE (16)
694 #define SAMPLING_INTERVAL (256)
697 * For statistical analysis of the input data we consider bytes that form a
698 * Galois Field of 256 objects. Each object has an attribute count, ie. how
699 * many times the object appeared in the sample.
701 #define BUCKET_SIZE (256)
704 * The size of the sample is based on a statistical sampling rule of thumb.
705 * The common way is to perform sampling tests as long as the number of
706 * elements in each cell is at least 5.
708 * Instead of 5, we choose 32 to obtain more accurate results.
709 * If the data contain the maximum number of symbols, which is 256, we obtain a
710 * sample size bound by 8192.
712 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
713 * from up to 512 locations.
715 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
716 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
722 struct heuristic_ws
{
723 /* Partial copy of input data */
726 /* Buckets store counters for each byte value */
727 struct bucket_item
*bucket
;
729 struct bucket_item
*bucket_b
;
730 struct list_head list
;
733 static struct workspace_manager heuristic_wsm
;
735 static void heuristic_init_workspace_manager(void)
737 btrfs_init_workspace_manager(&heuristic_wsm
, &btrfs_heuristic_compress
);
740 static void heuristic_cleanup_workspace_manager(void)
742 btrfs_cleanup_workspace_manager(&heuristic_wsm
);
745 static struct list_head
*heuristic_get_workspace(unsigned int level
)
747 return btrfs_get_workspace(&heuristic_wsm
, level
);
750 static void heuristic_put_workspace(struct list_head
*ws
)
752 btrfs_put_workspace(&heuristic_wsm
, ws
);
755 static void free_heuristic_ws(struct list_head
*ws
)
757 struct heuristic_ws
*workspace
;
759 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
761 kvfree(workspace
->sample
);
762 kfree(workspace
->bucket
);
763 kfree(workspace
->bucket_b
);
767 static struct list_head
*alloc_heuristic_ws(unsigned int level
)
769 struct heuristic_ws
*ws
;
771 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
773 return ERR_PTR(-ENOMEM
);
775 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
779 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
783 ws
->bucket_b
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket_b
), GFP_KERNEL
);
787 INIT_LIST_HEAD(&ws
->list
);
790 free_heuristic_ws(&ws
->list
);
791 return ERR_PTR(-ENOMEM
);
794 const struct btrfs_compress_op btrfs_heuristic_compress
= {
795 .init_workspace_manager
= heuristic_init_workspace_manager
,
796 .cleanup_workspace_manager
= heuristic_cleanup_workspace_manager
,
797 .get_workspace
= heuristic_get_workspace
,
798 .put_workspace
= heuristic_put_workspace
,
799 .alloc_workspace
= alloc_heuristic_ws
,
800 .free_workspace
= free_heuristic_ws
,
803 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
804 /* The heuristic is represented as compression type 0 */
805 &btrfs_heuristic_compress
,
806 &btrfs_zlib_compress
,
808 &btrfs_zstd_compress
,
811 void btrfs_init_workspace_manager(struct workspace_manager
*wsm
,
812 const struct btrfs_compress_op
*ops
)
814 struct list_head
*workspace
;
818 INIT_LIST_HEAD(&wsm
->idle_ws
);
819 spin_lock_init(&wsm
->ws_lock
);
820 atomic_set(&wsm
->total_ws
, 0);
821 init_waitqueue_head(&wsm
->ws_wait
);
824 * Preallocate one workspace for each compression type so we can
825 * guarantee forward progress in the worst case
827 workspace
= wsm
->ops
->alloc_workspace(0);
828 if (IS_ERR(workspace
)) {
830 "BTRFS: cannot preallocate compression workspace, will try later\n");
832 atomic_set(&wsm
->total_ws
, 1);
834 list_add(workspace
, &wsm
->idle_ws
);
838 void btrfs_cleanup_workspace_manager(struct workspace_manager
*wsman
)
840 struct list_head
*ws
;
842 while (!list_empty(&wsman
->idle_ws
)) {
843 ws
= wsman
->idle_ws
.next
;
845 wsman
->ops
->free_workspace(ws
);
846 atomic_dec(&wsman
->total_ws
);
851 * This finds an available workspace or allocates a new one.
852 * If it's not possible to allocate a new one, waits until there's one.
853 * Preallocation makes a forward progress guarantees and we do not return
856 struct list_head
*btrfs_get_workspace(struct workspace_manager
*wsm
,
859 struct list_head
*workspace
;
860 int cpus
= num_online_cpus();
862 struct list_head
*idle_ws
;
865 wait_queue_head_t
*ws_wait
;
868 idle_ws
= &wsm
->idle_ws
;
869 ws_lock
= &wsm
->ws_lock
;
870 total_ws
= &wsm
->total_ws
;
871 ws_wait
= &wsm
->ws_wait
;
872 free_ws
= &wsm
->free_ws
;
876 if (!list_empty(idle_ws
)) {
877 workspace
= idle_ws
->next
;
880 spin_unlock(ws_lock
);
884 if (atomic_read(total_ws
) > cpus
) {
887 spin_unlock(ws_lock
);
888 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
889 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
891 finish_wait(ws_wait
, &wait
);
894 atomic_inc(total_ws
);
895 spin_unlock(ws_lock
);
898 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
899 * to turn it off here because we might get called from the restricted
900 * context of btrfs_compress_bio/btrfs_compress_pages
902 nofs_flag
= memalloc_nofs_save();
903 workspace
= wsm
->ops
->alloc_workspace(level
);
904 memalloc_nofs_restore(nofs_flag
);
906 if (IS_ERR(workspace
)) {
907 atomic_dec(total_ws
);
911 * Do not return the error but go back to waiting. There's a
912 * workspace preallocated for each type and the compression
913 * time is bounded so we get to a workspace eventually. This
914 * makes our caller's life easier.
916 * To prevent silent and low-probability deadlocks (when the
917 * initial preallocation fails), check if there are any
920 if (atomic_read(total_ws
) == 0) {
921 static DEFINE_RATELIMIT_STATE(_rs
,
922 /* once per minute */ 60 * HZ
,
925 if (__ratelimit(&_rs
)) {
926 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
934 static struct list_head
*get_workspace(int type
, int level
)
936 return btrfs_compress_op
[type
]->get_workspace(level
);
940 * put a workspace struct back on the list or free it if we have enough
941 * idle ones sitting around
943 void btrfs_put_workspace(struct workspace_manager
*wsm
, struct list_head
*ws
)
945 struct list_head
*idle_ws
;
948 wait_queue_head_t
*ws_wait
;
951 idle_ws
= &wsm
->idle_ws
;
952 ws_lock
= &wsm
->ws_lock
;
953 total_ws
= &wsm
->total_ws
;
954 ws_wait
= &wsm
->ws_wait
;
955 free_ws
= &wsm
->free_ws
;
958 if (*free_ws
<= num_online_cpus()) {
959 list_add(ws
, idle_ws
);
961 spin_unlock(ws_lock
);
964 spin_unlock(ws_lock
);
966 wsm
->ops
->free_workspace(ws
);
967 atomic_dec(total_ws
);
969 cond_wake_up(ws_wait
);
972 static void put_workspace(int type
, struct list_head
*ws
)
974 return btrfs_compress_op
[type
]->put_workspace(ws
);
978 * Given an address space and start and length, compress the bytes into @pages
979 * that are allocated on demand.
981 * @type_level is encoded algorithm and level, where level 0 means whatever
982 * default the algorithm chooses and is opaque here;
983 * - compression algo are 0-3
984 * - the level are bits 4-7
986 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
987 * and returns number of actually allocated pages
989 * @total_in is used to return the number of bytes actually read. It
990 * may be smaller than the input length if we had to exit early because we
991 * ran out of room in the pages array or because we cross the
994 * @total_out is an in/out parameter, must be set to the input length and will
995 * be also used to return the total number of compressed bytes
997 * @max_out tells us the max number of bytes that we're allowed to
1000 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1001 u64 start
, struct page
**pages
,
1002 unsigned long *out_pages
,
1003 unsigned long *total_in
,
1004 unsigned long *total_out
)
1006 int type
= btrfs_compress_type(type_level
);
1007 int level
= btrfs_compress_level(type_level
);
1008 struct list_head
*workspace
;
1011 level
= btrfs_compress_op
[type
]->set_level(level
);
1012 workspace
= get_workspace(type
, level
);
1013 ret
= btrfs_compress_op
[type
]->compress_pages(workspace
, mapping
,
1016 total_in
, total_out
);
1017 put_workspace(type
, workspace
);
1022 * pages_in is an array of pages with compressed data.
1024 * disk_start is the starting logical offset of this array in the file
1026 * orig_bio contains the pages from the file that we want to decompress into
1028 * srclen is the number of bytes in pages_in
1030 * The basic idea is that we have a bio that was created by readpages.
1031 * The pages in the bio are for the uncompressed data, and they may not
1032 * be contiguous. They all correspond to the range of bytes covered by
1033 * the compressed extent.
1035 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1037 struct list_head
*workspace
;
1039 int type
= cb
->compress_type
;
1041 workspace
= get_workspace(type
, 0);
1042 ret
= btrfs_compress_op
[type
]->decompress_bio(workspace
, cb
);
1043 put_workspace(type
, workspace
);
1049 * a less complex decompression routine. Our compressed data fits in a
1050 * single page, and we want to read a single page out of it.
1051 * start_byte tells us the offset into the compressed data we're interested in
1053 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
1054 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1056 struct list_head
*workspace
;
1059 workspace
= get_workspace(type
, 0);
1060 ret
= btrfs_compress_op
[type
]->decompress(workspace
, data_in
,
1061 dest_page
, start_byte
,
1063 put_workspace(type
, workspace
);
1068 void __init
btrfs_init_compress(void)
1072 for (i
= 0; i
< BTRFS_NR_WORKSPACE_MANAGERS
; i
++)
1073 btrfs_compress_op
[i
]->init_workspace_manager();
1076 void __cold
btrfs_exit_compress(void)
1080 for (i
= 0; i
< BTRFS_NR_WORKSPACE_MANAGERS
; i
++)
1081 btrfs_compress_op
[i
]->cleanup_workspace_manager();
1085 * Copy uncompressed data from working buffer to pages.
1087 * buf_start is the byte offset we're of the start of our workspace buffer.
1089 * total_out is the last byte of the buffer
1091 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1092 unsigned long total_out
, u64 disk_start
,
1095 unsigned long buf_offset
;
1096 unsigned long current_buf_start
;
1097 unsigned long start_byte
;
1098 unsigned long prev_start_byte
;
1099 unsigned long working_bytes
= total_out
- buf_start
;
1100 unsigned long bytes
;
1102 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1105 * start byte is the first byte of the page we're currently
1106 * copying into relative to the start of the compressed data.
1108 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1110 /* we haven't yet hit data corresponding to this page */
1111 if (total_out
<= start_byte
)
1115 * the start of the data we care about is offset into
1116 * the middle of our working buffer
1118 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1119 buf_offset
= start_byte
- buf_start
;
1120 working_bytes
-= buf_offset
;
1124 current_buf_start
= buf_start
;
1126 /* copy bytes from the working buffer into the pages */
1127 while (working_bytes
> 0) {
1128 bytes
= min_t(unsigned long, bvec
.bv_len
,
1129 PAGE_SIZE
- buf_offset
);
1130 bytes
= min(bytes
, working_bytes
);
1132 kaddr
= kmap_atomic(bvec
.bv_page
);
1133 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1134 kunmap_atomic(kaddr
);
1135 flush_dcache_page(bvec
.bv_page
);
1137 buf_offset
+= bytes
;
1138 working_bytes
-= bytes
;
1139 current_buf_start
+= bytes
;
1141 /* check if we need to pick another page */
1142 bio_advance(bio
, bytes
);
1143 if (!bio
->bi_iter
.bi_size
)
1145 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1146 prev_start_byte
= start_byte
;
1147 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1150 * We need to make sure we're only adjusting
1151 * our offset into compression working buffer when
1152 * we're switching pages. Otherwise we can incorrectly
1153 * keep copying when we were actually done.
1155 if (start_byte
!= prev_start_byte
) {
1157 * make sure our new page is covered by this
1160 if (total_out
<= start_byte
)
1164 * the next page in the biovec might not be adjacent
1165 * to the last page, but it might still be found
1166 * inside this working buffer. bump our offset pointer
1168 if (total_out
> start_byte
&&
1169 current_buf_start
< start_byte
) {
1170 buf_offset
= start_byte
- buf_start
;
1171 working_bytes
= total_out
- start_byte
;
1172 current_buf_start
= buf_start
+ buf_offset
;
1181 * Shannon Entropy calculation
1183 * Pure byte distribution analysis fails to determine compressibility of data.
1184 * Try calculating entropy to estimate the average minimum number of bits
1185 * needed to encode the sampled data.
1187 * For convenience, return the percentage of needed bits, instead of amount of
1190 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1191 * and can be compressible with high probability
1193 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1195 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1197 #define ENTROPY_LVL_ACEPTABLE (65)
1198 #define ENTROPY_LVL_HIGH (80)
1201 * For increasead precision in shannon_entropy calculation,
1202 * let's do pow(n, M) to save more digits after comma:
1204 * - maximum int bit length is 64
1205 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1206 * - 13 * 4 = 52 < 64 -> M = 4
1210 static inline u32
ilog2_w(u64 n
)
1212 return ilog2(n
* n
* n
* n
);
1215 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1217 const u32 entropy_max
= 8 * ilog2_w(2);
1218 u32 entropy_sum
= 0;
1219 u32 p
, p_base
, sz_base
;
1222 sz_base
= ilog2_w(ws
->sample_size
);
1223 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1224 p
= ws
->bucket
[i
].count
;
1225 p_base
= ilog2_w(p
);
1226 entropy_sum
+= p
* (sz_base
- p_base
);
1229 entropy_sum
/= ws
->sample_size
;
1230 return entropy_sum
* 100 / entropy_max
;
1233 #define RADIX_BASE 4U
1234 #define COUNTERS_SIZE (1U << RADIX_BASE)
1236 static u8
get4bits(u64 num
, int shift
) {
1241 low4bits
= (COUNTERS_SIZE
- 1) - (num
% COUNTERS_SIZE
);
1246 * Use 4 bits as radix base
1247 * Use 16 u32 counters for calculating new position in buf array
1249 * @array - array that will be sorted
1250 * @array_buf - buffer array to store sorting results
1251 * must be equal in size to @array
1254 static void radix_sort(struct bucket_item
*array
, struct bucket_item
*array_buf
,
1259 u32 counters
[COUNTERS_SIZE
];
1267 * Try avoid useless loop iterations for small numbers stored in big
1268 * counters. Example: 48 33 4 ... in 64bit array
1270 max_num
= array
[0].count
;
1271 for (i
= 1; i
< num
; i
++) {
1272 buf_num
= array
[i
].count
;
1273 if (buf_num
> max_num
)
1277 buf_num
= ilog2(max_num
);
1278 bitlen
= ALIGN(buf_num
, RADIX_BASE
* 2);
1281 while (shift
< bitlen
) {
1282 memset(counters
, 0, sizeof(counters
));
1284 for (i
= 0; i
< num
; i
++) {
1285 buf_num
= array
[i
].count
;
1286 addr
= get4bits(buf_num
, shift
);
1290 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1291 counters
[i
] += counters
[i
- 1];
1293 for (i
= num
- 1; i
>= 0; i
--) {
1294 buf_num
= array
[i
].count
;
1295 addr
= get4bits(buf_num
, shift
);
1297 new_addr
= counters
[addr
];
1298 array_buf
[new_addr
] = array
[i
];
1301 shift
+= RADIX_BASE
;
1304 * Normal radix expects to move data from a temporary array, to
1305 * the main one. But that requires some CPU time. Avoid that
1306 * by doing another sort iteration to original array instead of
1309 memset(counters
, 0, sizeof(counters
));
1311 for (i
= 0; i
< num
; i
++) {
1312 buf_num
= array_buf
[i
].count
;
1313 addr
= get4bits(buf_num
, shift
);
1317 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1318 counters
[i
] += counters
[i
- 1];
1320 for (i
= num
- 1; i
>= 0; i
--) {
1321 buf_num
= array_buf
[i
].count
;
1322 addr
= get4bits(buf_num
, shift
);
1324 new_addr
= counters
[addr
];
1325 array
[new_addr
] = array_buf
[i
];
1328 shift
+= RADIX_BASE
;
1333 * Size of the core byte set - how many bytes cover 90% of the sample
1335 * There are several types of structured binary data that use nearly all byte
1336 * values. The distribution can be uniform and counts in all buckets will be
1337 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1339 * Other possibility is normal (Gaussian) distribution, where the data could
1340 * be potentially compressible, but we have to take a few more steps to decide
1343 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1344 * compression algo can easy fix that
1345 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1346 * probability is not compressible
1348 #define BYTE_CORE_SET_LOW (64)
1349 #define BYTE_CORE_SET_HIGH (200)
1351 static int byte_core_set_size(struct heuristic_ws
*ws
)
1354 u32 coreset_sum
= 0;
1355 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1356 struct bucket_item
*bucket
= ws
->bucket
;
1358 /* Sort in reverse order */
1359 radix_sort(ws
->bucket
, ws
->bucket_b
, BUCKET_SIZE
);
1361 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1362 coreset_sum
+= bucket
[i
].count
;
1364 if (coreset_sum
> core_set_threshold
)
1367 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1368 coreset_sum
+= bucket
[i
].count
;
1369 if (coreset_sum
> core_set_threshold
)
1377 * Count byte values in buckets.
1378 * This heuristic can detect textual data (configs, xml, json, html, etc).
1379 * Because in most text-like data byte set is restricted to limited number of
1380 * possible characters, and that restriction in most cases makes data easy to
1383 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1384 * less - compressible
1385 * more - need additional analysis
1387 #define BYTE_SET_THRESHOLD (64)
1389 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1392 u32 byte_set_size
= 0;
1394 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1395 if (ws
->bucket
[i
].count
> 0)
1400 * Continue collecting count of byte values in buckets. If the byte
1401 * set size is bigger then the threshold, it's pointless to continue,
1402 * the detection technique would fail for this type of data.
1404 for (; i
< BUCKET_SIZE
; i
++) {
1405 if (ws
->bucket
[i
].count
> 0) {
1407 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1408 return byte_set_size
;
1412 return byte_set_size
;
1415 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1417 const u32 half_of_sample
= ws
->sample_size
/ 2;
1418 const u8
*data
= ws
->sample
;
1420 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1423 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1424 struct heuristic_ws
*ws
)
1427 u64 index
, index_end
;
1428 u32 i
, curr_sample_pos
;
1432 * Compression handles the input data by chunks of 128KiB
1433 * (defined by BTRFS_MAX_UNCOMPRESSED)
1435 * We do the same for the heuristic and loop over the whole range.
1437 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1438 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1440 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1441 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1443 index
= start
>> PAGE_SHIFT
;
1444 index_end
= end
>> PAGE_SHIFT
;
1446 /* Don't miss unaligned end */
1447 if (!IS_ALIGNED(end
, PAGE_SIZE
))
1450 curr_sample_pos
= 0;
1451 while (index
< index_end
) {
1452 page
= find_get_page(inode
->i_mapping
, index
);
1453 in_data
= kmap(page
);
1454 /* Handle case where the start is not aligned to PAGE_SIZE */
1455 i
= start
% PAGE_SIZE
;
1456 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1457 /* Don't sample any garbage from the last page */
1458 if (start
> end
- SAMPLING_READ_SIZE
)
1460 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1461 SAMPLING_READ_SIZE
);
1462 i
+= SAMPLING_INTERVAL
;
1463 start
+= SAMPLING_INTERVAL
;
1464 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1472 ws
->sample_size
= curr_sample_pos
;
1476 * Compression heuristic.
1478 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1479 * quickly (compared to direct compression) detect data characteristics
1480 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1483 * The following types of analysis can be performed:
1484 * - detect mostly zero data
1485 * - detect data with low "byte set" size (text, etc)
1486 * - detect data with low/high "core byte" set
1488 * Return non-zero if the compression should be done, 0 otherwise.
1490 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1492 struct list_head
*ws_list
= get_workspace(0, 0);
1493 struct heuristic_ws
*ws
;
1498 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1500 heuristic_collect_sample(inode
, start
, end
, ws
);
1502 if (sample_repeated_patterns(ws
)) {
1507 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1509 for (i
= 0; i
< ws
->sample_size
; i
++) {
1510 byte
= ws
->sample
[i
];
1511 ws
->bucket
[byte
].count
++;
1514 i
= byte_set_size(ws
);
1515 if (i
< BYTE_SET_THRESHOLD
) {
1520 i
= byte_core_set_size(ws
);
1521 if (i
<= BYTE_CORE_SET_LOW
) {
1526 if (i
>= BYTE_CORE_SET_HIGH
) {
1531 i
= shannon_entropy(ws
);
1532 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1538 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1539 * needed to give green light to compression.
1541 * For now just assume that compression at that level is not worth the
1542 * resources because:
1544 * 1. it is possible to defrag the data later
1546 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1547 * values, every bucket has counter at level ~54. The heuristic would
1548 * be confused. This can happen when data have some internal repeated
1549 * patterns like "abbacbbc...". This can be detected by analyzing
1550 * pairs of bytes, which is too costly.
1552 if (i
< ENTROPY_LVL_HIGH
) {
1561 put_workspace(0, ws_list
);
1566 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1567 * level, unrecognized string will set the default level
1569 unsigned int btrfs_compress_str2level(unsigned int type
, const char *str
)
1571 unsigned int level
= 0;
1577 if (str
[0] == ':') {
1578 ret
= kstrtouint(str
+ 1, 10, &level
);
1583 level
= btrfs_compress_op
[type
]->set_level(level
);