]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/bounce.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* bounce buffer handling for block devices
4 * - Split from highmem.c
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/export.h>
11 #include <linux/swap.h>
12 #include <linux/gfp.h>
13 #include <linux/bio.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempool.h>
16 #include <linux/blkdev.h>
17 #include <linux/backing-dev.h>
18 #include <linux/init.h>
19 #include <linux/hash.h>
20 #include <linux/highmem.h>
21 #include <linux/bootmem.h>
22 #include <linux/printk.h>
23 #include <asm/tlbflush.h>
25 #include <trace/events/block.h>
29 #define ISA_POOL_SIZE 16
31 static struct bio_set bounce_bio_set
, bounce_bio_split
;
32 static mempool_t page_pool
, isa_page_pool
;
34 #if defined(CONFIG_HIGHMEM)
35 static __init
int init_emergency_pool(void)
38 #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
39 if (max_pfn
<= max_low_pfn
)
43 ret
= mempool_init_page_pool(&page_pool
, POOL_SIZE
, 0);
45 pr_info("pool size: %d pages\n", POOL_SIZE
);
47 ret
= bioset_init(&bounce_bio_set
, BIO_POOL_SIZE
, 0, BIOSET_NEED_BVECS
);
49 if (bioset_integrity_create(&bounce_bio_set
, BIO_POOL_SIZE
))
52 ret
= bioset_init(&bounce_bio_split
, BIO_POOL_SIZE
, 0, 0);
58 __initcall(init_emergency_pool
);
63 * highmem version, map in to vec
65 static void bounce_copy_vec(struct bio_vec
*to
, unsigned char *vfrom
)
69 vto
= kmap_atomic(to
->bv_page
);
70 memcpy(vto
+ to
->bv_offset
, vfrom
, to
->bv_len
);
74 #else /* CONFIG_HIGHMEM */
76 #define bounce_copy_vec(to, vfrom) \
77 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
79 #endif /* CONFIG_HIGHMEM */
82 * allocate pages in the DMA region for the ISA pool
84 static void *mempool_alloc_pages_isa(gfp_t gfp_mask
, void *data
)
86 return mempool_alloc_pages(gfp_mask
| GFP_DMA
, data
);
90 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
91 * as the max address, so check if the pool has already been created.
93 int init_emergency_isa_pool(void)
97 if (mempool_initialized(&isa_page_pool
))
100 ret
= mempool_init(&isa_page_pool
, ISA_POOL_SIZE
, mempool_alloc_pages_isa
,
101 mempool_free_pages
, (void *) 0);
104 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE
);
109 * Simple bounce buffer support for highmem pages. Depending on the
110 * queue gfp mask set, *to may or may not be a highmem page. kmap it
111 * always, it will do the Right Thing
113 static void copy_to_high_bio_irq(struct bio
*to
, struct bio
*from
)
115 unsigned char *vfrom
;
116 struct bio_vec tovec
, fromvec
;
117 struct bvec_iter iter
;
119 * The bio of @from is created by bounce, so we can iterate
120 * its bvec from start to end, but the @from->bi_iter can't be
121 * trusted because it might be changed by splitting.
123 struct bvec_iter from_iter
= BVEC_ITER_ALL_INIT
;
125 bio_for_each_segment(tovec
, to
, iter
) {
126 fromvec
= bio_iter_iovec(from
, from_iter
);
127 if (tovec
.bv_page
!= fromvec
.bv_page
) {
129 * fromvec->bv_offset and fromvec->bv_len might have
130 * been modified by the block layer, so use the original
131 * copy, bounce_copy_vec already uses tovec->bv_len
133 vfrom
= page_address(fromvec
.bv_page
) +
136 bounce_copy_vec(&tovec
, vfrom
);
137 flush_dcache_page(tovec
.bv_page
);
139 bio_advance_iter(from
, &from_iter
, tovec
.bv_len
);
143 static void bounce_end_io(struct bio
*bio
, mempool_t
*pool
)
145 struct bio
*bio_orig
= bio
->bi_private
;
146 struct bio_vec
*bvec
, orig_vec
;
148 struct bvec_iter orig_iter
= bio_orig
->bi_iter
;
151 * free up bounce indirect pages used
153 bio_for_each_segment_all(bvec
, bio
, i
) {
154 orig_vec
= bio_iter_iovec(bio_orig
, orig_iter
);
155 if (bvec
->bv_page
!= orig_vec
.bv_page
) {
156 dec_zone_page_state(bvec
->bv_page
, NR_BOUNCE
);
157 mempool_free(bvec
->bv_page
, pool
);
159 bio_advance_iter(bio_orig
, &orig_iter
, orig_vec
.bv_len
);
162 bio_orig
->bi_status
= bio
->bi_status
;
167 static void bounce_end_io_write(struct bio
*bio
)
169 bounce_end_io(bio
, &page_pool
);
172 static void bounce_end_io_write_isa(struct bio
*bio
)
175 bounce_end_io(bio
, &isa_page_pool
);
178 static void __bounce_end_io_read(struct bio
*bio
, mempool_t
*pool
)
180 struct bio
*bio_orig
= bio
->bi_private
;
183 copy_to_high_bio_irq(bio_orig
, bio
);
185 bounce_end_io(bio
, pool
);
188 static void bounce_end_io_read(struct bio
*bio
)
190 __bounce_end_io_read(bio
, &page_pool
);
193 static void bounce_end_io_read_isa(struct bio
*bio
)
195 __bounce_end_io_read(bio
, &isa_page_pool
);
198 static struct bio
*bounce_clone_bio(struct bio
*bio_src
, gfp_t gfp_mask
,
201 struct bvec_iter iter
;
206 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
207 * bio_src->bi_io_vec to bio->bi_io_vec.
209 * We can't do that anymore, because:
211 * - The point of cloning the biovec is to produce a bio with a biovec
212 * the caller can modify: bi_idx and bi_bvec_done should be 0.
214 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
215 * we tried to clone the whole thing bio_alloc_bioset() would fail.
216 * But the clone should succeed as long as the number of biovecs we
217 * actually need to allocate is fewer than BIO_MAX_PAGES.
219 * - Lastly, bi_vcnt should not be looked at or relied upon by code
220 * that does not own the bio - reason being drivers don't use it for
221 * iterating over the biovec anymore, so expecting it to be kept up
222 * to date (i.e. for clones that share the parent biovec) is just
223 * asking for trouble and would force extra work on
224 * __bio_clone_fast() anyways.
227 bio
= bio_alloc_bioset(gfp_mask
, bio_segments(bio_src
), bs
);
230 bio
->bi_disk
= bio_src
->bi_disk
;
231 bio
->bi_opf
= bio_src
->bi_opf
;
232 bio
->bi_write_hint
= bio_src
->bi_write_hint
;
233 bio
->bi_iter
.bi_sector
= bio_src
->bi_iter
.bi_sector
;
234 bio
->bi_iter
.bi_size
= bio_src
->bi_iter
.bi_size
;
236 switch (bio_op(bio
)) {
238 case REQ_OP_SECURE_ERASE
:
239 case REQ_OP_WRITE_ZEROES
:
241 case REQ_OP_WRITE_SAME
:
242 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bio_src
->bi_io_vec
[0];
245 bio_for_each_segment(bv
, bio_src
, iter
)
246 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bv
;
250 if (bio_integrity(bio_src
)) {
253 ret
= bio_integrity_clone(bio
, bio_src
, gfp_mask
);
260 bio_clone_blkcg_association(bio
, bio_src
);
265 static void __blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
,
269 int rw
= bio_data_dir(*bio_orig
);
270 struct bio_vec
*to
, from
;
271 struct bvec_iter iter
;
275 bool passthrough
= bio_is_passthrough(*bio_orig
);
277 bio_for_each_segment(from
, *bio_orig
, iter
) {
278 if (i
++ < BIO_MAX_PAGES
)
279 sectors
+= from
.bv_len
>> 9;
280 if (page_to_pfn(from
.bv_page
) > q
->limits
.bounce_pfn
)
286 if (!passthrough
&& sectors
< bio_sectors(*bio_orig
)) {
287 bio
= bio_split(*bio_orig
, sectors
, GFP_NOIO
, &bounce_bio_split
);
288 bio_chain(bio
, *bio_orig
);
289 generic_make_request(*bio_orig
);
292 bio
= bounce_clone_bio(*bio_orig
, GFP_NOIO
, passthrough
? NULL
:
295 bio_for_each_segment_all(to
, bio
, i
) {
296 struct page
*page
= to
->bv_page
;
298 if (page_to_pfn(page
) <= q
->limits
.bounce_pfn
)
301 to
->bv_page
= mempool_alloc(pool
, q
->bounce_gfp
);
302 inc_zone_page_state(to
->bv_page
, NR_BOUNCE
);
307 flush_dcache_page(page
);
309 vto
= page_address(to
->bv_page
) + to
->bv_offset
;
310 vfrom
= kmap_atomic(page
) + to
->bv_offset
;
311 memcpy(vto
, vfrom
, to
->bv_len
);
312 kunmap_atomic(vfrom
);
316 trace_block_bio_bounce(q
, *bio_orig
);
318 bio
->bi_flags
|= (1 << BIO_BOUNCED
);
320 if (pool
== &page_pool
) {
321 bio
->bi_end_io
= bounce_end_io_write
;
323 bio
->bi_end_io
= bounce_end_io_read
;
325 bio
->bi_end_io
= bounce_end_io_write_isa
;
327 bio
->bi_end_io
= bounce_end_io_read_isa
;
330 bio
->bi_private
= *bio_orig
;
334 void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
)
339 * Data-less bio, nothing to bounce
341 if (!bio_has_data(*bio_orig
))
345 * for non-isa bounce case, just check if the bounce pfn is equal
346 * to or bigger than the highest pfn in the system -- in that case,
347 * don't waste time iterating over bio segments
349 if (!(q
->bounce_gfp
& GFP_DMA
)) {
350 if (q
->limits
.bounce_pfn
>= blk_max_pfn
)
354 BUG_ON(!mempool_initialized(&isa_page_pool
));
355 pool
= &isa_page_pool
;
361 __blk_queue_bounce(q
, bio_orig
, pool
);