2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/log2.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
43 #include "ordered-data.h"
44 #include "compression.h"
45 #include "extent_io.h"
46 #include "extent_map.h"
48 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
50 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
51 unsigned long disk_size
)
53 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
55 return sizeof(struct compressed_bio
) +
56 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
59 static int check_compressed_csum(struct btrfs_inode
*inode
,
60 struct compressed_bio
*cb
,
68 u32
*cb_sum
= &cb
->sums
;
70 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
73 for (i
= 0; i
< cb
->nr_pages
; i
++) {
74 page
= cb
->compressed_pages
[i
];
77 kaddr
= kmap_atomic(page
);
78 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
79 btrfs_csum_final(csum
, (u8
*)&csum
);
82 if (csum
!= *cb_sum
) {
83 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
84 *cb_sum
, cb
->mirror_num
);
96 /* when we finish reading compressed pages from the disk, we
97 * decompress them and then run the bio end_io routines on the
98 * decompressed pages (in the inode address space).
100 * This allows the checksumming and other IO error handling routines
103 * The compressed pages are freed here, and it must be run
106 static void end_compressed_bio_read(struct bio
*bio
)
108 struct compressed_bio
*cb
= bio
->bi_private
;
112 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
118 /* if there are more bios still pending for this compressed
121 if (!refcount_dec_and_test(&cb
->pending_bios
))
125 * Record the correct mirror_num in cb->orig_bio so that
126 * read-repair can work properly.
128 ASSERT(btrfs_io_bio(cb
->orig_bio
));
129 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
130 cb
->mirror_num
= mirror
;
133 * Some IO in this cb have failed, just skip checksum as there
134 * is no way it could be correct.
140 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
141 (u64
)bio
->bi_iter
.bi_sector
<< 9);
145 /* ok, we're the last bio for this extent, lets start
148 ret
= btrfs_decompress_bio(cb
);
154 /* release the compressed pages */
156 for (index
= 0; index
< cb
->nr_pages
; index
++) {
157 page
= cb
->compressed_pages
[index
];
158 page
->mapping
= NULL
;
162 /* do io completion on the original bio */
164 bio_io_error(cb
->orig_bio
);
167 struct bio_vec
*bvec
;
170 * we have verified the checksum already, set page
171 * checked so the end_io handlers know about it
173 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
174 bio_for_each_segment_all(bvec
, cb
->orig_bio
, i
)
175 SetPageChecked(bvec
->bv_page
);
177 bio_endio(cb
->orig_bio
);
180 /* finally free the cb struct */
181 kfree(cb
->compressed_pages
);
188 * Clear the writeback bits on all of the file
189 * pages for a compressed write
191 static noinline
void end_compressed_writeback(struct inode
*inode
,
192 const struct compressed_bio
*cb
)
194 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
195 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
196 struct page
*pages
[16];
197 unsigned long nr_pages
= end_index
- index
+ 1;
202 mapping_set_error(inode
->i_mapping
, -EIO
);
204 while (nr_pages
> 0) {
205 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
207 nr_pages
, ARRAY_SIZE(pages
)), pages
);
213 for (i
= 0; i
< ret
; i
++) {
215 SetPageError(pages
[i
]);
216 end_page_writeback(pages
[i
]);
222 /* the inode may be gone now */
226 * do the cleanup once all the compressed pages hit the disk.
227 * This will clear writeback on the file pages and free the compressed
230 * This also calls the writeback end hooks for the file pages so that
231 * metadata and checksums can be updated in the file.
233 static void end_compressed_bio_write(struct bio
*bio
)
235 struct extent_io_tree
*tree
;
236 struct compressed_bio
*cb
= bio
->bi_private
;
244 /* if there are more bios still pending for this compressed
247 if (!refcount_dec_and_test(&cb
->pending_bios
))
250 /* ok, we're the last bio for this extent, step one is to
251 * call back into the FS and do all the end_io operations
254 tree
= &BTRFS_I(inode
)->io_tree
;
255 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
256 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
258 cb
->start
+ cb
->len
- 1,
261 BLK_STS_OK
: BLK_STS_NOTSUPP
);
262 cb
->compressed_pages
[0]->mapping
= NULL
;
264 end_compressed_writeback(inode
, cb
);
265 /* note, our inode could be gone now */
268 * release the compressed pages, these came from alloc_page and
269 * are not attached to the inode at all
272 for (index
= 0; index
< cb
->nr_pages
; index
++) {
273 page
= cb
->compressed_pages
[index
];
274 page
->mapping
= NULL
;
278 /* finally free the cb struct */
279 kfree(cb
->compressed_pages
);
286 * worker function to build and submit bios for previously compressed pages.
287 * The corresponding pages in the inode should be marked for writeback
288 * and the compressed pages should have a reference on them for dropping
289 * when the IO is complete.
291 * This also checksums the file bytes and gets things ready for
294 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
295 unsigned long len
, u64 disk_start
,
296 unsigned long compressed_len
,
297 struct page
**compressed_pages
,
298 unsigned long nr_pages
)
300 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
301 struct bio
*bio
= NULL
;
302 struct compressed_bio
*cb
;
303 unsigned long bytes_left
;
304 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
307 u64 first_byte
= disk_start
;
308 struct block_device
*bdev
;
310 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
312 WARN_ON(start
& ((u64
)PAGE_SIZE
- 1));
313 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
315 return BLK_STS_RESOURCE
;
316 refcount_set(&cb
->pending_bios
, 0);
322 cb
->compressed_pages
= compressed_pages
;
323 cb
->compressed_len
= compressed_len
;
325 cb
->nr_pages
= nr_pages
;
327 bdev
= fs_info
->fs_devices
->latest_bdev
;
329 bio
= btrfs_bio_alloc(bdev
, first_byte
);
330 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
331 bio
->bi_private
= cb
;
332 bio
->bi_end_io
= end_compressed_bio_write
;
333 refcount_set(&cb
->pending_bios
, 1);
335 /* create and submit bios for the compressed pages */
336 bytes_left
= compressed_len
;
337 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
340 page
= compressed_pages
[pg_index
];
341 page
->mapping
= inode
->i_mapping
;
342 if (bio
->bi_iter
.bi_size
)
343 submit
= io_tree
->ops
->merge_bio_hook(page
, 0,
347 page
->mapping
= NULL
;
348 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
353 * inc the count before we submit the bio so
354 * we know the end IO handler won't happen before
355 * we inc the count. Otherwise, the cb might get
356 * freed before we're done setting it up
358 refcount_inc(&cb
->pending_bios
);
359 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
360 BTRFS_WQ_ENDIO_DATA
);
361 BUG_ON(ret
); /* -ENOMEM */
364 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
365 BUG_ON(ret
); /* -ENOMEM */
368 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
370 bio
->bi_status
= ret
;
376 bio
= btrfs_bio_alloc(bdev
, first_byte
);
377 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
378 bio
->bi_private
= cb
;
379 bio
->bi_end_io
= end_compressed_bio_write
;
380 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
382 if (bytes_left
< PAGE_SIZE
) {
384 "bytes left %lu compress len %lu nr %lu",
385 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
387 bytes_left
-= PAGE_SIZE
;
388 first_byte
+= PAGE_SIZE
;
393 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
394 BUG_ON(ret
); /* -ENOMEM */
397 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
398 BUG_ON(ret
); /* -ENOMEM */
401 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
403 bio
->bi_status
= ret
;
411 static u64
bio_end_offset(struct bio
*bio
)
413 struct bio_vec
*last
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
415 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
418 static noinline
int add_ra_bio_pages(struct inode
*inode
,
420 struct compressed_bio
*cb
)
422 unsigned long end_index
;
423 unsigned long pg_index
;
425 u64 isize
= i_size_read(inode
);
428 unsigned long nr_pages
= 0;
429 struct extent_map
*em
;
430 struct address_space
*mapping
= inode
->i_mapping
;
431 struct extent_map_tree
*em_tree
;
432 struct extent_io_tree
*tree
;
436 last_offset
= bio_end_offset(cb
->orig_bio
);
437 em_tree
= &BTRFS_I(inode
)->extent_tree
;
438 tree
= &BTRFS_I(inode
)->io_tree
;
443 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
445 while (last_offset
< compressed_end
) {
446 pg_index
= last_offset
>> PAGE_SHIFT
;
448 if (pg_index
> end_index
)
452 page
= radix_tree_lookup(&mapping
->page_tree
, pg_index
);
454 if (page
&& !radix_tree_exceptional_entry(page
)) {
461 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
466 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
471 end
= last_offset
+ PAGE_SIZE
- 1;
473 * at this point, we have a locked page in the page cache
474 * for these bytes in the file. But, we have to make
475 * sure they map to this compressed extent on disk.
477 set_page_extent_mapped(page
);
478 lock_extent(tree
, last_offset
, end
);
479 read_lock(&em_tree
->lock
);
480 em
= lookup_extent_mapping(em_tree
, last_offset
,
482 read_unlock(&em_tree
->lock
);
484 if (!em
|| last_offset
< em
->start
||
485 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
486 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
488 unlock_extent(tree
, last_offset
, end
);
495 if (page
->index
== end_index
) {
497 size_t zero_offset
= isize
& (PAGE_SIZE
- 1);
501 zeros
= PAGE_SIZE
- zero_offset
;
502 userpage
= kmap_atomic(page
);
503 memset(userpage
+ zero_offset
, 0, zeros
);
504 flush_dcache_page(page
);
505 kunmap_atomic(userpage
);
509 ret
= bio_add_page(cb
->orig_bio
, page
,
512 if (ret
== PAGE_SIZE
) {
516 unlock_extent(tree
, last_offset
, end
);
522 last_offset
+= PAGE_SIZE
;
528 * for a compressed read, the bio we get passed has all the inode pages
529 * in it. We don't actually do IO on those pages but allocate new ones
530 * to hold the compressed pages on disk.
532 * bio->bi_iter.bi_sector points to the compressed extent on disk
533 * bio->bi_io_vec points to all of the inode pages
535 * After the compressed pages are read, we copy the bytes into the
536 * bio we were passed and then call the bio end_io calls
538 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
539 int mirror_num
, unsigned long bio_flags
)
541 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
542 struct extent_io_tree
*tree
;
543 struct extent_map_tree
*em_tree
;
544 struct compressed_bio
*cb
;
545 unsigned long compressed_len
;
546 unsigned long nr_pages
;
547 unsigned long pg_index
;
549 struct block_device
*bdev
;
550 struct bio
*comp_bio
;
551 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
554 struct extent_map
*em
;
555 blk_status_t ret
= BLK_STS_RESOURCE
;
559 tree
= &BTRFS_I(inode
)->io_tree
;
560 em_tree
= &BTRFS_I(inode
)->extent_tree
;
562 /* we need the actual starting offset of this extent in the file */
563 read_lock(&em_tree
->lock
);
564 em
= lookup_extent_mapping(em_tree
,
565 page_offset(bio
->bi_io_vec
->bv_page
),
567 read_unlock(&em_tree
->lock
);
569 return BLK_STS_IOERR
;
571 compressed_len
= em
->block_len
;
572 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
576 refcount_set(&cb
->pending_bios
, 0);
579 cb
->mirror_num
= mirror_num
;
582 cb
->start
= em
->orig_start
;
584 em_start
= em
->start
;
589 cb
->len
= bio
->bi_iter
.bi_size
;
590 cb
->compressed_len
= compressed_len
;
591 cb
->compress_type
= extent_compress_type(bio_flags
);
594 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
595 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
597 if (!cb
->compressed_pages
)
600 bdev
= fs_info
->fs_devices
->latest_bdev
;
602 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
603 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
605 if (!cb
->compressed_pages
[pg_index
]) {
606 faili
= pg_index
- 1;
607 ret
= BLK_STS_RESOURCE
;
611 faili
= nr_pages
- 1;
612 cb
->nr_pages
= nr_pages
;
614 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
616 /* include any pages we added in add_ra-bio_pages */
617 cb
->len
= bio
->bi_iter
.bi_size
;
619 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
620 bio_set_op_attrs (comp_bio
, REQ_OP_READ
, 0);
621 comp_bio
->bi_private
= cb
;
622 comp_bio
->bi_end_io
= end_compressed_bio_read
;
623 refcount_set(&cb
->pending_bios
, 1);
625 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
628 page
= cb
->compressed_pages
[pg_index
];
629 page
->mapping
= inode
->i_mapping
;
630 page
->index
= em_start
>> PAGE_SHIFT
;
632 if (comp_bio
->bi_iter
.bi_size
)
633 submit
= tree
->ops
->merge_bio_hook(page
, 0,
637 page
->mapping
= NULL
;
638 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
642 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
643 BTRFS_WQ_ENDIO_DATA
);
644 BUG_ON(ret
); /* -ENOMEM */
647 * inc the count before we submit the bio so
648 * we know the end IO handler won't happen before
649 * we inc the count. Otherwise, the cb might get
650 * freed before we're done setting it up
652 refcount_inc(&cb
->pending_bios
);
654 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
655 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
657 BUG_ON(ret
); /* -ENOMEM */
659 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
660 fs_info
->sectorsize
);
662 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
664 comp_bio
->bi_status
= ret
;
670 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
671 bio_set_op_attrs(comp_bio
, REQ_OP_READ
, 0);
672 comp_bio
->bi_private
= cb
;
673 comp_bio
->bi_end_io
= end_compressed_bio_read
;
675 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
677 cur_disk_byte
+= PAGE_SIZE
;
681 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
682 BUG_ON(ret
); /* -ENOMEM */
684 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
685 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
686 BUG_ON(ret
); /* -ENOMEM */
689 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
691 comp_bio
->bi_status
= ret
;
700 __free_page(cb
->compressed_pages
[faili
]);
704 kfree(cb
->compressed_pages
);
713 * Heuristic uses systematic sampling to collect data from the input data
714 * range, the logic can be tuned by the following constants:
716 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
717 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
719 #define SAMPLING_READ_SIZE (16)
720 #define SAMPLING_INTERVAL (256)
723 * For statistical analysis of the input data we consider bytes that form a
724 * Galois Field of 256 objects. Each object has an attribute count, ie. how
725 * many times the object appeared in the sample.
727 #define BUCKET_SIZE (256)
730 * The size of the sample is based on a statistical sampling rule of thumb.
731 * The common way is to perform sampling tests as long as the number of
732 * elements in each cell is at least 5.
734 * Instead of 5, we choose 32 to obtain more accurate results.
735 * If the data contain the maximum number of symbols, which is 256, we obtain a
736 * sample size bound by 8192.
738 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
739 * from up to 512 locations.
741 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
742 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
748 struct heuristic_ws
{
749 /* Partial copy of input data */
752 /* Buckets store counters for each byte value */
753 struct bucket_item
*bucket
;
754 struct list_head list
;
757 static void free_heuristic_ws(struct list_head
*ws
)
759 struct heuristic_ws
*workspace
;
761 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
763 kvfree(workspace
->sample
);
764 kfree(workspace
->bucket
);
768 static struct list_head
*alloc_heuristic_ws(void)
770 struct heuristic_ws
*ws
;
772 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
774 return ERR_PTR(-ENOMEM
);
776 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
780 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
784 INIT_LIST_HEAD(&ws
->list
);
787 free_heuristic_ws(&ws
->list
);
788 return ERR_PTR(-ENOMEM
);
791 struct workspaces_list
{
792 struct list_head idle_ws
;
794 /* Number of free workspaces */
796 /* Total number of allocated workspaces */
798 /* Waiters for a free workspace */
799 wait_queue_head_t ws_wait
;
802 static struct workspaces_list btrfs_comp_ws
[BTRFS_COMPRESS_TYPES
];
804 static struct workspaces_list btrfs_heuristic_ws
;
806 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
807 &btrfs_zlib_compress
,
809 &btrfs_zstd_compress
,
812 void __init
btrfs_init_compress(void)
814 struct list_head
*workspace
;
817 INIT_LIST_HEAD(&btrfs_heuristic_ws
.idle_ws
);
818 spin_lock_init(&btrfs_heuristic_ws
.ws_lock
);
819 atomic_set(&btrfs_heuristic_ws
.total_ws
, 0);
820 init_waitqueue_head(&btrfs_heuristic_ws
.ws_wait
);
822 workspace
= alloc_heuristic_ws();
823 if (IS_ERR(workspace
)) {
825 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
827 atomic_set(&btrfs_heuristic_ws
.total_ws
, 1);
828 btrfs_heuristic_ws
.free_ws
= 1;
829 list_add(workspace
, &btrfs_heuristic_ws
.idle_ws
);
832 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
833 INIT_LIST_HEAD(&btrfs_comp_ws
[i
].idle_ws
);
834 spin_lock_init(&btrfs_comp_ws
[i
].ws_lock
);
835 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 0);
836 init_waitqueue_head(&btrfs_comp_ws
[i
].ws_wait
);
839 * Preallocate one workspace for each compression type so
840 * we can guarantee forward progress in the worst case
842 workspace
= btrfs_compress_op
[i
]->alloc_workspace();
843 if (IS_ERR(workspace
)) {
844 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
846 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 1);
847 btrfs_comp_ws
[i
].free_ws
= 1;
848 list_add(workspace
, &btrfs_comp_ws
[i
].idle_ws
);
854 * This finds an available workspace or allocates a new one.
855 * If it's not possible to allocate a new one, waits until there's one.
856 * Preallocation makes a forward progress guarantees and we do not return
859 static struct list_head
*__find_workspace(int type
, bool heuristic
)
861 struct list_head
*workspace
;
862 int cpus
= num_online_cpus();
865 struct list_head
*idle_ws
;
868 wait_queue_head_t
*ws_wait
;
872 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
873 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
874 total_ws
= &btrfs_heuristic_ws
.total_ws
;
875 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
876 free_ws
= &btrfs_heuristic_ws
.free_ws
;
878 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
879 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
880 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
881 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
882 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
887 if (!list_empty(idle_ws
)) {
888 workspace
= idle_ws
->next
;
891 spin_unlock(ws_lock
);
895 if (atomic_read(total_ws
) > cpus
) {
898 spin_unlock(ws_lock
);
899 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
900 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
902 finish_wait(ws_wait
, &wait
);
905 atomic_inc(total_ws
);
906 spin_unlock(ws_lock
);
909 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
910 * to turn it off here because we might get called from the restricted
911 * context of btrfs_compress_bio/btrfs_compress_pages
913 nofs_flag
= memalloc_nofs_save();
915 workspace
= alloc_heuristic_ws();
917 workspace
= btrfs_compress_op
[idx
]->alloc_workspace();
918 memalloc_nofs_restore(nofs_flag
);
920 if (IS_ERR(workspace
)) {
921 atomic_dec(total_ws
);
925 * Do not return the error but go back to waiting. There's a
926 * workspace preallocated for each type and the compression
927 * time is bounded so we get to a workspace eventually. This
928 * makes our caller's life easier.
930 * To prevent silent and low-probability deadlocks (when the
931 * initial preallocation fails), check if there are any
934 if (atomic_read(total_ws
) == 0) {
935 static DEFINE_RATELIMIT_STATE(_rs
,
936 /* once per minute */ 60 * HZ
,
939 if (__ratelimit(&_rs
)) {
940 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
948 static struct list_head
*find_workspace(int type
)
950 return __find_workspace(type
, false);
954 * put a workspace struct back on the list or free it if we have enough
955 * idle ones sitting around
957 static void __free_workspace(int type
, struct list_head
*workspace
,
961 struct list_head
*idle_ws
;
964 wait_queue_head_t
*ws_wait
;
968 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
969 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
970 total_ws
= &btrfs_heuristic_ws
.total_ws
;
971 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
972 free_ws
= &btrfs_heuristic_ws
.free_ws
;
974 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
975 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
976 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
977 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
978 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
982 if (*free_ws
<= num_online_cpus()) {
983 list_add(workspace
, idle_ws
);
985 spin_unlock(ws_lock
);
988 spin_unlock(ws_lock
);
991 free_heuristic_ws(workspace
);
993 btrfs_compress_op
[idx
]->free_workspace(workspace
);
994 atomic_dec(total_ws
);
997 * Make sure counter is updated before we wake up waiters.
1000 if (waitqueue_active(ws_wait
))
1004 static void free_workspace(int type
, struct list_head
*ws
)
1006 return __free_workspace(type
, ws
, false);
1010 * cleanup function for module exit
1012 static void free_workspaces(void)
1014 struct list_head
*workspace
;
1017 while (!list_empty(&btrfs_heuristic_ws
.idle_ws
)) {
1018 workspace
= btrfs_heuristic_ws
.idle_ws
.next
;
1019 list_del(workspace
);
1020 free_heuristic_ws(workspace
);
1021 atomic_dec(&btrfs_heuristic_ws
.total_ws
);
1024 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
1025 while (!list_empty(&btrfs_comp_ws
[i
].idle_ws
)) {
1026 workspace
= btrfs_comp_ws
[i
].idle_ws
.next
;
1027 list_del(workspace
);
1028 btrfs_compress_op
[i
]->free_workspace(workspace
);
1029 atomic_dec(&btrfs_comp_ws
[i
].total_ws
);
1035 * Given an address space and start and length, compress the bytes into @pages
1036 * that are allocated on demand.
1038 * @type_level is encoded algorithm and level, where level 0 means whatever
1039 * default the algorithm chooses and is opaque here;
1040 * - compression algo are 0-3
1041 * - the level are bits 4-7
1043 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1044 * and returns number of actually allocated pages
1046 * @total_in is used to return the number of bytes actually read. It
1047 * may be smaller than the input length if we had to exit early because we
1048 * ran out of room in the pages array or because we cross the
1049 * max_out threshold.
1051 * @total_out is an in/out parameter, must be set to the input length and will
1052 * be also used to return the total number of compressed bytes
1054 * @max_out tells us the max number of bytes that we're allowed to
1057 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1058 u64 start
, struct page
**pages
,
1059 unsigned long *out_pages
,
1060 unsigned long *total_in
,
1061 unsigned long *total_out
)
1063 struct list_head
*workspace
;
1065 int type
= type_level
& 0xF;
1067 workspace
= find_workspace(type
);
1069 btrfs_compress_op
[type
- 1]->set_level(workspace
, type_level
);
1070 ret
= btrfs_compress_op
[type
-1]->compress_pages(workspace
, mapping
,
1073 total_in
, total_out
);
1074 free_workspace(type
, workspace
);
1079 * pages_in is an array of pages with compressed data.
1081 * disk_start is the starting logical offset of this array in the file
1083 * orig_bio contains the pages from the file that we want to decompress into
1085 * srclen is the number of bytes in pages_in
1087 * The basic idea is that we have a bio that was created by readpages.
1088 * The pages in the bio are for the uncompressed data, and they may not
1089 * be contiguous. They all correspond to the range of bytes covered by
1090 * the compressed extent.
1092 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1094 struct list_head
*workspace
;
1096 int type
= cb
->compress_type
;
1098 workspace
= find_workspace(type
);
1099 ret
= btrfs_compress_op
[type
- 1]->decompress_bio(workspace
, cb
);
1100 free_workspace(type
, workspace
);
1106 * a less complex decompression routine. Our compressed data fits in a
1107 * single page, and we want to read a single page out of it.
1108 * start_byte tells us the offset into the compressed data we're interested in
1110 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
1111 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1113 struct list_head
*workspace
;
1116 workspace
= find_workspace(type
);
1118 ret
= btrfs_compress_op
[type
-1]->decompress(workspace
, data_in
,
1119 dest_page
, start_byte
,
1122 free_workspace(type
, workspace
);
1126 void btrfs_exit_compress(void)
1132 * Copy uncompressed data from working buffer to pages.
1134 * buf_start is the byte offset we're of the start of our workspace buffer.
1136 * total_out is the last byte of the buffer
1138 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1139 unsigned long total_out
, u64 disk_start
,
1142 unsigned long buf_offset
;
1143 unsigned long current_buf_start
;
1144 unsigned long start_byte
;
1145 unsigned long prev_start_byte
;
1146 unsigned long working_bytes
= total_out
- buf_start
;
1147 unsigned long bytes
;
1149 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1152 * start byte is the first byte of the page we're currently
1153 * copying into relative to the start of the compressed data.
1155 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1157 /* we haven't yet hit data corresponding to this page */
1158 if (total_out
<= start_byte
)
1162 * the start of the data we care about is offset into
1163 * the middle of our working buffer
1165 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1166 buf_offset
= start_byte
- buf_start
;
1167 working_bytes
-= buf_offset
;
1171 current_buf_start
= buf_start
;
1173 /* copy bytes from the working buffer into the pages */
1174 while (working_bytes
> 0) {
1175 bytes
= min_t(unsigned long, bvec
.bv_len
,
1176 PAGE_SIZE
- buf_offset
);
1177 bytes
= min(bytes
, working_bytes
);
1179 kaddr
= kmap_atomic(bvec
.bv_page
);
1180 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1181 kunmap_atomic(kaddr
);
1182 flush_dcache_page(bvec
.bv_page
);
1184 buf_offset
+= bytes
;
1185 working_bytes
-= bytes
;
1186 current_buf_start
+= bytes
;
1188 /* check if we need to pick another page */
1189 bio_advance(bio
, bytes
);
1190 if (!bio
->bi_iter
.bi_size
)
1192 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1193 prev_start_byte
= start_byte
;
1194 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1197 * We need to make sure we're only adjusting
1198 * our offset into compression working buffer when
1199 * we're switching pages. Otherwise we can incorrectly
1200 * keep copying when we were actually done.
1202 if (start_byte
!= prev_start_byte
) {
1204 * make sure our new page is covered by this
1207 if (total_out
<= start_byte
)
1211 * the next page in the biovec might not be adjacent
1212 * to the last page, but it might still be found
1213 * inside this working buffer. bump our offset pointer
1215 if (total_out
> start_byte
&&
1216 current_buf_start
< start_byte
) {
1217 buf_offset
= start_byte
- buf_start
;
1218 working_bytes
= total_out
- start_byte
;
1219 current_buf_start
= buf_start
+ buf_offset
;
1228 * Shannon Entropy calculation
1230 * Pure byte distribution analysis fails to determine compressiability of data.
1231 * Try calculating entropy to estimate the average minimum number of bits
1232 * needed to encode the sampled data.
1234 * For convenience, return the percentage of needed bits, instead of amount of
1237 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1238 * and can be compressible with high probability
1240 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1242 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1244 #define ENTROPY_LVL_ACEPTABLE (65)
1245 #define ENTROPY_LVL_HIGH (80)
1248 * For increasead precision in shannon_entropy calculation,
1249 * let's do pow(n, M) to save more digits after comma:
1251 * - maximum int bit length is 64
1252 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1253 * - 13 * 4 = 52 < 64 -> M = 4
1257 static inline u32
ilog2_w(u64 n
)
1259 return ilog2(n
* n
* n
* n
);
1262 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1264 const u32 entropy_max
= 8 * ilog2_w(2);
1265 u32 entropy_sum
= 0;
1266 u32 p
, p_base
, sz_base
;
1269 sz_base
= ilog2_w(ws
->sample_size
);
1270 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1271 p
= ws
->bucket
[i
].count
;
1272 p_base
= ilog2_w(p
);
1273 entropy_sum
+= p
* (sz_base
- p_base
);
1276 entropy_sum
/= ws
->sample_size
;
1277 return entropy_sum
* 100 / entropy_max
;
1280 /* Compare buckets by size, ascending */
1281 static int bucket_comp_rev(const void *lv
, const void *rv
)
1283 const struct bucket_item
*l
= (const struct bucket_item
*)lv
;
1284 const struct bucket_item
*r
= (const struct bucket_item
*)rv
;
1286 return r
->count
- l
->count
;
1290 * Size of the core byte set - how many bytes cover 90% of the sample
1292 * There are several types of structured binary data that use nearly all byte
1293 * values. The distribution can be uniform and counts in all buckets will be
1294 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1296 * Other possibility is normal (Gaussian) distribution, where the data could
1297 * be potentially compressible, but we have to take a few more steps to decide
1300 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1301 * compression algo can easy fix that
1302 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1303 * probability is not compressible
1305 #define BYTE_CORE_SET_LOW (64)
1306 #define BYTE_CORE_SET_HIGH (200)
1308 static int byte_core_set_size(struct heuristic_ws
*ws
)
1311 u32 coreset_sum
= 0;
1312 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1313 struct bucket_item
*bucket
= ws
->bucket
;
1315 /* Sort in reverse order */
1316 sort(bucket
, BUCKET_SIZE
, sizeof(*bucket
), &bucket_comp_rev
, NULL
);
1318 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1319 coreset_sum
+= bucket
[i
].count
;
1321 if (coreset_sum
> core_set_threshold
)
1324 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1325 coreset_sum
+= bucket
[i
].count
;
1326 if (coreset_sum
> core_set_threshold
)
1334 * Count byte values in buckets.
1335 * This heuristic can detect textual data (configs, xml, json, html, etc).
1336 * Because in most text-like data byte set is restricted to limited number of
1337 * possible characters, and that restriction in most cases makes data easy to
1340 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1341 * less - compressible
1342 * more - need additional analysis
1344 #define BYTE_SET_THRESHOLD (64)
1346 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1349 u32 byte_set_size
= 0;
1351 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1352 if (ws
->bucket
[i
].count
> 0)
1357 * Continue collecting count of byte values in buckets. If the byte
1358 * set size is bigger then the threshold, it's pointless to continue,
1359 * the detection technique would fail for this type of data.
1361 for (; i
< BUCKET_SIZE
; i
++) {
1362 if (ws
->bucket
[i
].count
> 0) {
1364 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1365 return byte_set_size
;
1369 return byte_set_size
;
1372 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1374 const u32 half_of_sample
= ws
->sample_size
/ 2;
1375 const u8
*data
= ws
->sample
;
1377 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1380 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1381 struct heuristic_ws
*ws
)
1384 u64 index
, index_end
;
1385 u32 i
, curr_sample_pos
;
1389 * Compression handles the input data by chunks of 128KiB
1390 * (defined by BTRFS_MAX_UNCOMPRESSED)
1392 * We do the same for the heuristic and loop over the whole range.
1394 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1395 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1397 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1398 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1400 index
= start
>> PAGE_SHIFT
;
1401 index_end
= end
>> PAGE_SHIFT
;
1403 /* Don't miss unaligned end */
1404 if (!IS_ALIGNED(end
, PAGE_SIZE
))
1407 curr_sample_pos
= 0;
1408 while (index
< index_end
) {
1409 page
= find_get_page(inode
->i_mapping
, index
);
1410 in_data
= kmap(page
);
1411 /* Handle case where the start is not aligned to PAGE_SIZE */
1412 i
= start
% PAGE_SIZE
;
1413 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1414 /* Don't sample any garbage from the last page */
1415 if (start
> end
- SAMPLING_READ_SIZE
)
1417 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1418 SAMPLING_READ_SIZE
);
1419 i
+= SAMPLING_INTERVAL
;
1420 start
+= SAMPLING_INTERVAL
;
1421 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1429 ws
->sample_size
= curr_sample_pos
;
1433 * Compression heuristic.
1435 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1436 * quickly (compared to direct compression) detect data characteristics
1437 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1440 * The following types of analysis can be performed:
1441 * - detect mostly zero data
1442 * - detect data with low "byte set" size (text, etc)
1443 * - detect data with low/high "core byte" set
1445 * Return non-zero if the compression should be done, 0 otherwise.
1447 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1449 struct list_head
*ws_list
= __find_workspace(0, true);
1450 struct heuristic_ws
*ws
;
1455 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1457 heuristic_collect_sample(inode
, start
, end
, ws
);
1459 if (sample_repeated_patterns(ws
)) {
1464 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1466 for (i
= 0; i
< ws
->sample_size
; i
++) {
1467 byte
= ws
->sample
[i
];
1468 ws
->bucket
[byte
].count
++;
1471 i
= byte_set_size(ws
);
1472 if (i
< BYTE_SET_THRESHOLD
) {
1477 i
= byte_core_set_size(ws
);
1478 if (i
<= BYTE_CORE_SET_LOW
) {
1483 if (i
>= BYTE_CORE_SET_HIGH
) {
1488 i
= shannon_entropy(ws
);
1489 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1495 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1496 * needed to give green light to compression.
1498 * For now just assume that compression at that level is not worth the
1499 * resources because:
1501 * 1. it is possible to defrag the data later
1503 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1504 * values, every bucket has counter at level ~54. The heuristic would
1505 * be confused. This can happen when data have some internal repeated
1506 * patterns like "abbacbbc...". This can be detected by analyzing
1507 * pairs of bytes, which is too costly.
1509 if (i
< ENTROPY_LVL_HIGH
) {
1518 __free_workspace(0, ws_list
, true);
1522 unsigned int btrfs_compress_str2level(const char *str
)
1524 if (strncmp(str
, "zlib", 4) != 0)
1527 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1528 if (str
[4] == ':' && '1' <= str
[5] && str
[5] <= '9' && str
[6] == 0)
1529 return str
[5] - '0';