2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/log2.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
43 #include "ordered-data.h"
44 #include "compression.h"
45 #include "extent_io.h"
46 #include "extent_map.h"
48 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
50 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
51 unsigned long disk_size
)
53 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
55 return sizeof(struct compressed_bio
) +
56 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
59 static int check_compressed_csum(struct btrfs_inode
*inode
,
60 struct compressed_bio
*cb
,
68 u32
*cb_sum
= &cb
->sums
;
70 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
73 for (i
= 0; i
< cb
->nr_pages
; i
++) {
74 page
= cb
->compressed_pages
[i
];
77 kaddr
= kmap_atomic(page
);
78 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
79 btrfs_csum_final(csum
, (u8
*)&csum
);
82 if (csum
!= *cb_sum
) {
83 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
84 *cb_sum
, cb
->mirror_num
);
96 /* when we finish reading compressed pages from the disk, we
97 * decompress them and then run the bio end_io routines on the
98 * decompressed pages (in the inode address space).
100 * This allows the checksumming and other IO error handling routines
103 * The compressed pages are freed here, and it must be run
106 static void end_compressed_bio_read(struct bio
*bio
)
108 struct compressed_bio
*cb
= bio
->bi_private
;
112 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
118 /* if there are more bios still pending for this compressed
121 if (!refcount_dec_and_test(&cb
->pending_bios
))
125 * Record the correct mirror_num in cb->orig_bio so that
126 * read-repair can work properly.
128 ASSERT(btrfs_io_bio(cb
->orig_bio
));
129 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
130 cb
->mirror_num
= mirror
;
133 * Some IO in this cb have failed, just skip checksum as there
134 * is no way it could be correct.
140 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
141 (u64
)bio
->bi_iter
.bi_sector
<< 9);
145 /* ok, we're the last bio for this extent, lets start
148 ret
= btrfs_decompress_bio(cb
);
154 /* release the compressed pages */
156 for (index
= 0; index
< cb
->nr_pages
; index
++) {
157 page
= cb
->compressed_pages
[index
];
158 page
->mapping
= NULL
;
162 /* do io completion on the original bio */
164 bio_io_error(cb
->orig_bio
);
167 struct bio_vec
*bvec
;
170 * we have verified the checksum already, set page
171 * checked so the end_io handlers know about it
173 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
174 bio_for_each_segment_all(bvec
, cb
->orig_bio
, i
)
175 SetPageChecked(bvec
->bv_page
);
177 bio_endio(cb
->orig_bio
);
180 /* finally free the cb struct */
181 kfree(cb
->compressed_pages
);
188 * Clear the writeback bits on all of the file
189 * pages for a compressed write
191 static noinline
void end_compressed_writeback(struct inode
*inode
,
192 const struct compressed_bio
*cb
)
194 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
195 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
196 struct page
*pages
[16];
197 unsigned long nr_pages
= end_index
- index
+ 1;
202 mapping_set_error(inode
->i_mapping
, -EIO
);
204 while (nr_pages
> 0) {
205 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
207 nr_pages
, ARRAY_SIZE(pages
)), pages
);
213 for (i
= 0; i
< ret
; i
++) {
215 SetPageError(pages
[i
]);
216 end_page_writeback(pages
[i
]);
222 /* the inode may be gone now */
226 * do the cleanup once all the compressed pages hit the disk.
227 * This will clear writeback on the file pages and free the compressed
230 * This also calls the writeback end hooks for the file pages so that
231 * metadata and checksums can be updated in the file.
233 static void end_compressed_bio_write(struct bio
*bio
)
235 struct extent_io_tree
*tree
;
236 struct compressed_bio
*cb
= bio
->bi_private
;
244 /* if there are more bios still pending for this compressed
247 if (!refcount_dec_and_test(&cb
->pending_bios
))
250 /* ok, we're the last bio for this extent, step one is to
251 * call back into the FS and do all the end_io operations
254 tree
= &BTRFS_I(inode
)->io_tree
;
255 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
256 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
258 cb
->start
+ cb
->len
- 1,
261 BLK_STS_OK
: BLK_STS_NOTSUPP
);
262 cb
->compressed_pages
[0]->mapping
= NULL
;
264 end_compressed_writeback(inode
, cb
);
265 /* note, our inode could be gone now */
268 * release the compressed pages, these came from alloc_page and
269 * are not attached to the inode at all
272 for (index
= 0; index
< cb
->nr_pages
; index
++) {
273 page
= cb
->compressed_pages
[index
];
274 page
->mapping
= NULL
;
278 /* finally free the cb struct */
279 kfree(cb
->compressed_pages
);
286 * worker function to build and submit bios for previously compressed pages.
287 * The corresponding pages in the inode should be marked for writeback
288 * and the compressed pages should have a reference on them for dropping
289 * when the IO is complete.
291 * This also checksums the file bytes and gets things ready for
294 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
295 unsigned long len
, u64 disk_start
,
296 unsigned long compressed_len
,
297 struct page
**compressed_pages
,
298 unsigned long nr_pages
,
299 unsigned int write_flags
)
301 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
302 struct bio
*bio
= NULL
;
303 struct compressed_bio
*cb
;
304 unsigned long bytes_left
;
305 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
308 u64 first_byte
= disk_start
;
309 struct block_device
*bdev
;
311 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
313 WARN_ON(start
& ((u64
)PAGE_SIZE
- 1));
314 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
316 return BLK_STS_RESOURCE
;
317 refcount_set(&cb
->pending_bios
, 0);
323 cb
->compressed_pages
= compressed_pages
;
324 cb
->compressed_len
= compressed_len
;
326 cb
->nr_pages
= nr_pages
;
328 bdev
= fs_info
->fs_devices
->latest_bdev
;
330 bio
= btrfs_bio_alloc(bdev
, first_byte
);
331 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
332 bio
->bi_private
= cb
;
333 bio
->bi_end_io
= end_compressed_bio_write
;
334 refcount_set(&cb
->pending_bios
, 1);
336 /* create and submit bios for the compressed pages */
337 bytes_left
= compressed_len
;
338 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
341 page
= compressed_pages
[pg_index
];
342 page
->mapping
= inode
->i_mapping
;
343 if (bio
->bi_iter
.bi_size
)
344 submit
= io_tree
->ops
->merge_bio_hook(page
, 0,
348 page
->mapping
= NULL
;
349 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
354 * inc the count before we submit the bio so
355 * we know the end IO handler won't happen before
356 * we inc the count. Otherwise, the cb might get
357 * freed before we're done setting it up
359 refcount_inc(&cb
->pending_bios
);
360 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
361 BTRFS_WQ_ENDIO_DATA
);
362 BUG_ON(ret
); /* -ENOMEM */
365 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
366 BUG_ON(ret
); /* -ENOMEM */
369 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
371 bio
->bi_status
= ret
;
377 bio
= btrfs_bio_alloc(bdev
, first_byte
);
378 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
379 bio
->bi_private
= cb
;
380 bio
->bi_end_io
= end_compressed_bio_write
;
381 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
383 if (bytes_left
< PAGE_SIZE
) {
385 "bytes left %lu compress len %lu nr %lu",
386 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
388 bytes_left
-= PAGE_SIZE
;
389 first_byte
+= PAGE_SIZE
;
394 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
395 BUG_ON(ret
); /* -ENOMEM */
398 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
399 BUG_ON(ret
); /* -ENOMEM */
402 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
404 bio
->bi_status
= ret
;
412 static u64
bio_end_offset(struct bio
*bio
)
414 struct bio_vec
*last
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
416 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
419 static noinline
int add_ra_bio_pages(struct inode
*inode
,
421 struct compressed_bio
*cb
)
423 unsigned long end_index
;
424 unsigned long pg_index
;
426 u64 isize
= i_size_read(inode
);
429 unsigned long nr_pages
= 0;
430 struct extent_map
*em
;
431 struct address_space
*mapping
= inode
->i_mapping
;
432 struct extent_map_tree
*em_tree
;
433 struct extent_io_tree
*tree
;
437 last_offset
= bio_end_offset(cb
->orig_bio
);
438 em_tree
= &BTRFS_I(inode
)->extent_tree
;
439 tree
= &BTRFS_I(inode
)->io_tree
;
444 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
446 while (last_offset
< compressed_end
) {
447 pg_index
= last_offset
>> PAGE_SHIFT
;
449 if (pg_index
> end_index
)
453 page
= radix_tree_lookup(&mapping
->page_tree
, pg_index
);
455 if (page
&& !radix_tree_exceptional_entry(page
)) {
462 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
467 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
472 end
= last_offset
+ PAGE_SIZE
- 1;
474 * at this point, we have a locked page in the page cache
475 * for these bytes in the file. But, we have to make
476 * sure they map to this compressed extent on disk.
478 set_page_extent_mapped(page
);
479 lock_extent(tree
, last_offset
, end
);
480 read_lock(&em_tree
->lock
);
481 em
= lookup_extent_mapping(em_tree
, last_offset
,
483 read_unlock(&em_tree
->lock
);
485 if (!em
|| last_offset
< em
->start
||
486 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
487 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
489 unlock_extent(tree
, last_offset
, end
);
496 if (page
->index
== end_index
) {
498 size_t zero_offset
= isize
& (PAGE_SIZE
- 1);
502 zeros
= PAGE_SIZE
- zero_offset
;
503 userpage
= kmap_atomic(page
);
504 memset(userpage
+ zero_offset
, 0, zeros
);
505 flush_dcache_page(page
);
506 kunmap_atomic(userpage
);
510 ret
= bio_add_page(cb
->orig_bio
, page
,
513 if (ret
== PAGE_SIZE
) {
517 unlock_extent(tree
, last_offset
, end
);
523 last_offset
+= PAGE_SIZE
;
529 * for a compressed read, the bio we get passed has all the inode pages
530 * in it. We don't actually do IO on those pages but allocate new ones
531 * to hold the compressed pages on disk.
533 * bio->bi_iter.bi_sector points to the compressed extent on disk
534 * bio->bi_io_vec points to all of the inode pages
536 * After the compressed pages are read, we copy the bytes into the
537 * bio we were passed and then call the bio end_io calls
539 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
540 int mirror_num
, unsigned long bio_flags
)
542 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
543 struct extent_io_tree
*tree
;
544 struct extent_map_tree
*em_tree
;
545 struct compressed_bio
*cb
;
546 unsigned long compressed_len
;
547 unsigned long nr_pages
;
548 unsigned long pg_index
;
550 struct block_device
*bdev
;
551 struct bio
*comp_bio
;
552 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
555 struct extent_map
*em
;
556 blk_status_t ret
= BLK_STS_RESOURCE
;
560 tree
= &BTRFS_I(inode
)->io_tree
;
561 em_tree
= &BTRFS_I(inode
)->extent_tree
;
563 /* we need the actual starting offset of this extent in the file */
564 read_lock(&em_tree
->lock
);
565 em
= lookup_extent_mapping(em_tree
,
566 page_offset(bio
->bi_io_vec
->bv_page
),
568 read_unlock(&em_tree
->lock
);
570 return BLK_STS_IOERR
;
572 compressed_len
= em
->block_len
;
573 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
577 refcount_set(&cb
->pending_bios
, 0);
580 cb
->mirror_num
= mirror_num
;
583 cb
->start
= em
->orig_start
;
585 em_start
= em
->start
;
590 cb
->len
= bio
->bi_iter
.bi_size
;
591 cb
->compressed_len
= compressed_len
;
592 cb
->compress_type
= extent_compress_type(bio_flags
);
595 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
596 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
598 if (!cb
->compressed_pages
)
601 bdev
= fs_info
->fs_devices
->latest_bdev
;
603 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
604 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
606 if (!cb
->compressed_pages
[pg_index
]) {
607 faili
= pg_index
- 1;
608 ret
= BLK_STS_RESOURCE
;
612 faili
= nr_pages
- 1;
613 cb
->nr_pages
= nr_pages
;
615 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
617 /* include any pages we added in add_ra-bio_pages */
618 cb
->len
= bio
->bi_iter
.bi_size
;
620 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
621 bio_set_op_attrs (comp_bio
, REQ_OP_READ
, 0);
622 comp_bio
->bi_private
= cb
;
623 comp_bio
->bi_end_io
= end_compressed_bio_read
;
624 refcount_set(&cb
->pending_bios
, 1);
626 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
629 page
= cb
->compressed_pages
[pg_index
];
630 page
->mapping
= inode
->i_mapping
;
631 page
->index
= em_start
>> PAGE_SHIFT
;
633 if (comp_bio
->bi_iter
.bi_size
)
634 submit
= tree
->ops
->merge_bio_hook(page
, 0,
638 page
->mapping
= NULL
;
639 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
643 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
644 BTRFS_WQ_ENDIO_DATA
);
645 BUG_ON(ret
); /* -ENOMEM */
648 * inc the count before we submit the bio so
649 * we know the end IO handler won't happen before
650 * we inc the count. Otherwise, the cb might get
651 * freed before we're done setting it up
653 refcount_inc(&cb
->pending_bios
);
655 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
656 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
658 BUG_ON(ret
); /* -ENOMEM */
660 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
661 fs_info
->sectorsize
);
663 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
665 comp_bio
->bi_status
= ret
;
671 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
672 bio_set_op_attrs(comp_bio
, REQ_OP_READ
, 0);
673 comp_bio
->bi_private
= cb
;
674 comp_bio
->bi_end_io
= end_compressed_bio_read
;
676 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
678 cur_disk_byte
+= PAGE_SIZE
;
682 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
683 BUG_ON(ret
); /* -ENOMEM */
685 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
686 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
687 BUG_ON(ret
); /* -ENOMEM */
690 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
692 comp_bio
->bi_status
= ret
;
701 __free_page(cb
->compressed_pages
[faili
]);
705 kfree(cb
->compressed_pages
);
714 * Heuristic uses systematic sampling to collect data from the input data
715 * range, the logic can be tuned by the following constants:
717 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
718 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
720 #define SAMPLING_READ_SIZE (16)
721 #define SAMPLING_INTERVAL (256)
724 * For statistical analysis of the input data we consider bytes that form a
725 * Galois Field of 256 objects. Each object has an attribute count, ie. how
726 * many times the object appeared in the sample.
728 #define BUCKET_SIZE (256)
731 * The size of the sample is based on a statistical sampling rule of thumb.
732 * The common way is to perform sampling tests as long as the number of
733 * elements in each cell is at least 5.
735 * Instead of 5, we choose 32 to obtain more accurate results.
736 * If the data contain the maximum number of symbols, which is 256, we obtain a
737 * sample size bound by 8192.
739 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
740 * from up to 512 locations.
742 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
743 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
749 struct heuristic_ws
{
750 /* Partial copy of input data */
753 /* Buckets store counters for each byte value */
754 struct bucket_item
*bucket
;
755 struct list_head list
;
758 static void free_heuristic_ws(struct list_head
*ws
)
760 struct heuristic_ws
*workspace
;
762 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
764 kvfree(workspace
->sample
);
765 kfree(workspace
->bucket
);
769 static struct list_head
*alloc_heuristic_ws(void)
771 struct heuristic_ws
*ws
;
773 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
775 return ERR_PTR(-ENOMEM
);
777 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
781 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
785 INIT_LIST_HEAD(&ws
->list
);
788 free_heuristic_ws(&ws
->list
);
789 return ERR_PTR(-ENOMEM
);
792 struct workspaces_list
{
793 struct list_head idle_ws
;
795 /* Number of free workspaces */
797 /* Total number of allocated workspaces */
799 /* Waiters for a free workspace */
800 wait_queue_head_t ws_wait
;
803 static struct workspaces_list btrfs_comp_ws
[BTRFS_COMPRESS_TYPES
];
805 static struct workspaces_list btrfs_heuristic_ws
;
807 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
808 &btrfs_zlib_compress
,
810 &btrfs_zstd_compress
,
813 void __init
btrfs_init_compress(void)
815 struct list_head
*workspace
;
818 INIT_LIST_HEAD(&btrfs_heuristic_ws
.idle_ws
);
819 spin_lock_init(&btrfs_heuristic_ws
.ws_lock
);
820 atomic_set(&btrfs_heuristic_ws
.total_ws
, 0);
821 init_waitqueue_head(&btrfs_heuristic_ws
.ws_wait
);
823 workspace
= alloc_heuristic_ws();
824 if (IS_ERR(workspace
)) {
826 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
828 atomic_set(&btrfs_heuristic_ws
.total_ws
, 1);
829 btrfs_heuristic_ws
.free_ws
= 1;
830 list_add(workspace
, &btrfs_heuristic_ws
.idle_ws
);
833 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
834 INIT_LIST_HEAD(&btrfs_comp_ws
[i
].idle_ws
);
835 spin_lock_init(&btrfs_comp_ws
[i
].ws_lock
);
836 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 0);
837 init_waitqueue_head(&btrfs_comp_ws
[i
].ws_wait
);
840 * Preallocate one workspace for each compression type so
841 * we can guarantee forward progress in the worst case
843 workspace
= btrfs_compress_op
[i
]->alloc_workspace();
844 if (IS_ERR(workspace
)) {
845 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
847 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 1);
848 btrfs_comp_ws
[i
].free_ws
= 1;
849 list_add(workspace
, &btrfs_comp_ws
[i
].idle_ws
);
855 * This finds an available workspace or allocates a new one.
856 * If it's not possible to allocate a new one, waits until there's one.
857 * Preallocation makes a forward progress guarantees and we do not return
860 static struct list_head
*__find_workspace(int type
, bool heuristic
)
862 struct list_head
*workspace
;
863 int cpus
= num_online_cpus();
866 struct list_head
*idle_ws
;
869 wait_queue_head_t
*ws_wait
;
873 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
874 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
875 total_ws
= &btrfs_heuristic_ws
.total_ws
;
876 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
877 free_ws
= &btrfs_heuristic_ws
.free_ws
;
879 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
880 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
881 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
882 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
883 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
888 if (!list_empty(idle_ws
)) {
889 workspace
= idle_ws
->next
;
892 spin_unlock(ws_lock
);
896 if (atomic_read(total_ws
) > cpus
) {
899 spin_unlock(ws_lock
);
900 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
901 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
903 finish_wait(ws_wait
, &wait
);
906 atomic_inc(total_ws
);
907 spin_unlock(ws_lock
);
910 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
911 * to turn it off here because we might get called from the restricted
912 * context of btrfs_compress_bio/btrfs_compress_pages
914 nofs_flag
= memalloc_nofs_save();
916 workspace
= alloc_heuristic_ws();
918 workspace
= btrfs_compress_op
[idx
]->alloc_workspace();
919 memalloc_nofs_restore(nofs_flag
);
921 if (IS_ERR(workspace
)) {
922 atomic_dec(total_ws
);
926 * Do not return the error but go back to waiting. There's a
927 * workspace preallocated for each type and the compression
928 * time is bounded so we get to a workspace eventually. This
929 * makes our caller's life easier.
931 * To prevent silent and low-probability deadlocks (when the
932 * initial preallocation fails), check if there are any
935 if (atomic_read(total_ws
) == 0) {
936 static DEFINE_RATELIMIT_STATE(_rs
,
937 /* once per minute */ 60 * HZ
,
940 if (__ratelimit(&_rs
)) {
941 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
949 static struct list_head
*find_workspace(int type
)
951 return __find_workspace(type
, false);
955 * put a workspace struct back on the list or free it if we have enough
956 * idle ones sitting around
958 static void __free_workspace(int type
, struct list_head
*workspace
,
962 struct list_head
*idle_ws
;
965 wait_queue_head_t
*ws_wait
;
969 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
970 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
971 total_ws
= &btrfs_heuristic_ws
.total_ws
;
972 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
973 free_ws
= &btrfs_heuristic_ws
.free_ws
;
975 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
976 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
977 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
978 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
979 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
983 if (*free_ws
<= num_online_cpus()) {
984 list_add(workspace
, idle_ws
);
986 spin_unlock(ws_lock
);
989 spin_unlock(ws_lock
);
992 free_heuristic_ws(workspace
);
994 btrfs_compress_op
[idx
]->free_workspace(workspace
);
995 atomic_dec(total_ws
);
998 * Make sure counter is updated before we wake up waiters.
1001 if (waitqueue_active(ws_wait
))
1005 static void free_workspace(int type
, struct list_head
*ws
)
1007 return __free_workspace(type
, ws
, false);
1011 * cleanup function for module exit
1013 static void free_workspaces(void)
1015 struct list_head
*workspace
;
1018 while (!list_empty(&btrfs_heuristic_ws
.idle_ws
)) {
1019 workspace
= btrfs_heuristic_ws
.idle_ws
.next
;
1020 list_del(workspace
);
1021 free_heuristic_ws(workspace
);
1022 atomic_dec(&btrfs_heuristic_ws
.total_ws
);
1025 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
1026 while (!list_empty(&btrfs_comp_ws
[i
].idle_ws
)) {
1027 workspace
= btrfs_comp_ws
[i
].idle_ws
.next
;
1028 list_del(workspace
);
1029 btrfs_compress_op
[i
]->free_workspace(workspace
);
1030 atomic_dec(&btrfs_comp_ws
[i
].total_ws
);
1036 * Given an address space and start and length, compress the bytes into @pages
1037 * that are allocated on demand.
1039 * @type_level is encoded algorithm and level, where level 0 means whatever
1040 * default the algorithm chooses and is opaque here;
1041 * - compression algo are 0-3
1042 * - the level are bits 4-7
1044 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1045 * and returns number of actually allocated pages
1047 * @total_in is used to return the number of bytes actually read. It
1048 * may be smaller than the input length if we had to exit early because we
1049 * ran out of room in the pages array or because we cross the
1050 * max_out threshold.
1052 * @total_out is an in/out parameter, must be set to the input length and will
1053 * be also used to return the total number of compressed bytes
1055 * @max_out tells us the max number of bytes that we're allowed to
1058 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1059 u64 start
, struct page
**pages
,
1060 unsigned long *out_pages
,
1061 unsigned long *total_in
,
1062 unsigned long *total_out
)
1064 struct list_head
*workspace
;
1066 int type
= type_level
& 0xF;
1068 workspace
= find_workspace(type
);
1070 btrfs_compress_op
[type
- 1]->set_level(workspace
, type_level
);
1071 ret
= btrfs_compress_op
[type
-1]->compress_pages(workspace
, mapping
,
1074 total_in
, total_out
);
1075 free_workspace(type
, workspace
);
1080 * pages_in is an array of pages with compressed data.
1082 * disk_start is the starting logical offset of this array in the file
1084 * orig_bio contains the pages from the file that we want to decompress into
1086 * srclen is the number of bytes in pages_in
1088 * The basic idea is that we have a bio that was created by readpages.
1089 * The pages in the bio are for the uncompressed data, and they may not
1090 * be contiguous. They all correspond to the range of bytes covered by
1091 * the compressed extent.
1093 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1095 struct list_head
*workspace
;
1097 int type
= cb
->compress_type
;
1099 workspace
= find_workspace(type
);
1100 ret
= btrfs_compress_op
[type
- 1]->decompress_bio(workspace
, cb
);
1101 free_workspace(type
, workspace
);
1107 * a less complex decompression routine. Our compressed data fits in a
1108 * single page, and we want to read a single page out of it.
1109 * start_byte tells us the offset into the compressed data we're interested in
1111 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
1112 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1114 struct list_head
*workspace
;
1117 workspace
= find_workspace(type
);
1119 ret
= btrfs_compress_op
[type
-1]->decompress(workspace
, data_in
,
1120 dest_page
, start_byte
,
1123 free_workspace(type
, workspace
);
1127 void btrfs_exit_compress(void)
1133 * Copy uncompressed data from working buffer to pages.
1135 * buf_start is the byte offset we're of the start of our workspace buffer.
1137 * total_out is the last byte of the buffer
1139 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1140 unsigned long total_out
, u64 disk_start
,
1143 unsigned long buf_offset
;
1144 unsigned long current_buf_start
;
1145 unsigned long start_byte
;
1146 unsigned long prev_start_byte
;
1147 unsigned long working_bytes
= total_out
- buf_start
;
1148 unsigned long bytes
;
1150 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1153 * start byte is the first byte of the page we're currently
1154 * copying into relative to the start of the compressed data.
1156 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1158 /* we haven't yet hit data corresponding to this page */
1159 if (total_out
<= start_byte
)
1163 * the start of the data we care about is offset into
1164 * the middle of our working buffer
1166 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1167 buf_offset
= start_byte
- buf_start
;
1168 working_bytes
-= buf_offset
;
1172 current_buf_start
= buf_start
;
1174 /* copy bytes from the working buffer into the pages */
1175 while (working_bytes
> 0) {
1176 bytes
= min_t(unsigned long, bvec
.bv_len
,
1177 PAGE_SIZE
- buf_offset
);
1178 bytes
= min(bytes
, working_bytes
);
1180 kaddr
= kmap_atomic(bvec
.bv_page
);
1181 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1182 kunmap_atomic(kaddr
);
1183 flush_dcache_page(bvec
.bv_page
);
1185 buf_offset
+= bytes
;
1186 working_bytes
-= bytes
;
1187 current_buf_start
+= bytes
;
1189 /* check if we need to pick another page */
1190 bio_advance(bio
, bytes
);
1191 if (!bio
->bi_iter
.bi_size
)
1193 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1194 prev_start_byte
= start_byte
;
1195 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1198 * We need to make sure we're only adjusting
1199 * our offset into compression working buffer when
1200 * we're switching pages. Otherwise we can incorrectly
1201 * keep copying when we were actually done.
1203 if (start_byte
!= prev_start_byte
) {
1205 * make sure our new page is covered by this
1208 if (total_out
<= start_byte
)
1212 * the next page in the biovec might not be adjacent
1213 * to the last page, but it might still be found
1214 * inside this working buffer. bump our offset pointer
1216 if (total_out
> start_byte
&&
1217 current_buf_start
< start_byte
) {
1218 buf_offset
= start_byte
- buf_start
;
1219 working_bytes
= total_out
- start_byte
;
1220 current_buf_start
= buf_start
+ buf_offset
;
1229 * Shannon Entropy calculation
1231 * Pure byte distribution analysis fails to determine compressiability of data.
1232 * Try calculating entropy to estimate the average minimum number of bits
1233 * needed to encode the sampled data.
1235 * For convenience, return the percentage of needed bits, instead of amount of
1238 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1239 * and can be compressible with high probability
1241 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1243 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1245 #define ENTROPY_LVL_ACEPTABLE (65)
1246 #define ENTROPY_LVL_HIGH (80)
1249 * For increasead precision in shannon_entropy calculation,
1250 * let's do pow(n, M) to save more digits after comma:
1252 * - maximum int bit length is 64
1253 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1254 * - 13 * 4 = 52 < 64 -> M = 4
1258 static inline u32
ilog2_w(u64 n
)
1260 return ilog2(n
* n
* n
* n
);
1263 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1265 const u32 entropy_max
= 8 * ilog2_w(2);
1266 u32 entropy_sum
= 0;
1267 u32 p
, p_base
, sz_base
;
1270 sz_base
= ilog2_w(ws
->sample_size
);
1271 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1272 p
= ws
->bucket
[i
].count
;
1273 p_base
= ilog2_w(p
);
1274 entropy_sum
+= p
* (sz_base
- p_base
);
1277 entropy_sum
/= ws
->sample_size
;
1278 return entropy_sum
* 100 / entropy_max
;
1281 /* Compare buckets by size, ascending */
1282 static int bucket_comp_rev(const void *lv
, const void *rv
)
1284 const struct bucket_item
*l
= (const struct bucket_item
*)lv
;
1285 const struct bucket_item
*r
= (const struct bucket_item
*)rv
;
1287 return r
->count
- l
->count
;
1291 * Size of the core byte set - how many bytes cover 90% of the sample
1293 * There are several types of structured binary data that use nearly all byte
1294 * values. The distribution can be uniform and counts in all buckets will be
1295 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1297 * Other possibility is normal (Gaussian) distribution, where the data could
1298 * be potentially compressible, but we have to take a few more steps to decide
1301 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1302 * compression algo can easy fix that
1303 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1304 * probability is not compressible
1306 #define BYTE_CORE_SET_LOW (64)
1307 #define BYTE_CORE_SET_HIGH (200)
1309 static int byte_core_set_size(struct heuristic_ws
*ws
)
1312 u32 coreset_sum
= 0;
1313 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1314 struct bucket_item
*bucket
= ws
->bucket
;
1316 /* Sort in reverse order */
1317 sort(bucket
, BUCKET_SIZE
, sizeof(*bucket
), &bucket_comp_rev
, NULL
);
1319 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1320 coreset_sum
+= bucket
[i
].count
;
1322 if (coreset_sum
> core_set_threshold
)
1325 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1326 coreset_sum
+= bucket
[i
].count
;
1327 if (coreset_sum
> core_set_threshold
)
1335 * Count byte values in buckets.
1336 * This heuristic can detect textual data (configs, xml, json, html, etc).
1337 * Because in most text-like data byte set is restricted to limited number of
1338 * possible characters, and that restriction in most cases makes data easy to
1341 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1342 * less - compressible
1343 * more - need additional analysis
1345 #define BYTE_SET_THRESHOLD (64)
1347 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1350 u32 byte_set_size
= 0;
1352 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1353 if (ws
->bucket
[i
].count
> 0)
1358 * Continue collecting count of byte values in buckets. If the byte
1359 * set size is bigger then the threshold, it's pointless to continue,
1360 * the detection technique would fail for this type of data.
1362 for (; i
< BUCKET_SIZE
; i
++) {
1363 if (ws
->bucket
[i
].count
> 0) {
1365 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1366 return byte_set_size
;
1370 return byte_set_size
;
1373 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1375 const u32 half_of_sample
= ws
->sample_size
/ 2;
1376 const u8
*data
= ws
->sample
;
1378 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1381 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1382 struct heuristic_ws
*ws
)
1385 u64 index
, index_end
;
1386 u32 i
, curr_sample_pos
;
1390 * Compression handles the input data by chunks of 128KiB
1391 * (defined by BTRFS_MAX_UNCOMPRESSED)
1393 * We do the same for the heuristic and loop over the whole range.
1395 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1396 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1398 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1399 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1401 index
= start
>> PAGE_SHIFT
;
1402 index_end
= end
>> PAGE_SHIFT
;
1404 /* Don't miss unaligned end */
1405 if (!IS_ALIGNED(end
, PAGE_SIZE
))
1408 curr_sample_pos
= 0;
1409 while (index
< index_end
) {
1410 page
= find_get_page(inode
->i_mapping
, index
);
1411 in_data
= kmap(page
);
1412 /* Handle case where the start is not aligned to PAGE_SIZE */
1413 i
= start
% PAGE_SIZE
;
1414 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1415 /* Don't sample any garbage from the last page */
1416 if (start
> end
- SAMPLING_READ_SIZE
)
1418 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1419 SAMPLING_READ_SIZE
);
1420 i
+= SAMPLING_INTERVAL
;
1421 start
+= SAMPLING_INTERVAL
;
1422 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1430 ws
->sample_size
= curr_sample_pos
;
1434 * Compression heuristic.
1436 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1437 * quickly (compared to direct compression) detect data characteristics
1438 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1441 * The following types of analysis can be performed:
1442 * - detect mostly zero data
1443 * - detect data with low "byte set" size (text, etc)
1444 * - detect data with low/high "core byte" set
1446 * Return non-zero if the compression should be done, 0 otherwise.
1448 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1450 struct list_head
*ws_list
= __find_workspace(0, true);
1451 struct heuristic_ws
*ws
;
1456 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1458 heuristic_collect_sample(inode
, start
, end
, ws
);
1460 if (sample_repeated_patterns(ws
)) {
1465 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1467 for (i
= 0; i
< ws
->sample_size
; i
++) {
1468 byte
= ws
->sample
[i
];
1469 ws
->bucket
[byte
].count
++;
1472 i
= byte_set_size(ws
);
1473 if (i
< BYTE_SET_THRESHOLD
) {
1478 i
= byte_core_set_size(ws
);
1479 if (i
<= BYTE_CORE_SET_LOW
) {
1484 if (i
>= BYTE_CORE_SET_HIGH
) {
1489 i
= shannon_entropy(ws
);
1490 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1496 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1497 * needed to give green light to compression.
1499 * For now just assume that compression at that level is not worth the
1500 * resources because:
1502 * 1. it is possible to defrag the data later
1504 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1505 * values, every bucket has counter at level ~54. The heuristic would
1506 * be confused. This can happen when data have some internal repeated
1507 * patterns like "abbacbbc...". This can be detected by analyzing
1508 * pairs of bytes, which is too costly.
1510 if (i
< ENTROPY_LVL_HIGH
) {
1519 __free_workspace(0, ws_list
, true);
1523 unsigned int btrfs_compress_str2level(const char *str
)
1525 if (strncmp(str
, "zlib", 4) != 0)
1528 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1529 if (str
[4] == ':' && '1' <= str
[5] && str
[5] <= '9' && str
[6] == 0)
1530 return str
[5] - '0';
1532 return BTRFS_ZLIB_DEFAULT_LEVEL
;