]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/bounce.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / block / bounce.c
CommitLineData
831058de
DH
1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
b1de0d13
MH
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
831058de 8#include <linux/mm.h>
b95f1b31 9#include <linux/export.h>
831058de 10#include <linux/swap.h>
5a0e3ad6 11#include <linux/gfp.h>
831058de
DH
12#include <linux/bio.h>
13#include <linux/pagemap.h>
14#include <linux/mempool.h>
15#include <linux/blkdev.h>
66114cad 16#include <linux/backing-dev.h>
831058de
DH
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/highmem.h>
3bcfeaf9 20#include <linux/bootmem.h>
b1de0d13 21#include <linux/printk.h>
831058de
DH
22#include <asm/tlbflush.h>
23
55782138 24#include <trace/events/block.h>
3bce016a 25#include "blk.h"
55782138 26
831058de
DH
27#define POOL_SIZE 64
28#define ISA_POOL_SIZE 16
29
e0fc443a 30static struct bio_set *bounce_bio_set, *bounce_bio_split;
831058de
DH
31static mempool_t *page_pool, *isa_page_pool;
32
f1006257 33#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
831058de
DH
34static __init int init_emergency_pool(void)
35{
f1006257 36#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
3bcfeaf9 37 if (max_pfn <= max_low_pfn)
831058de 38 return 0;
3bcfeaf9 39#endif
831058de
DH
40
41 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
42 BUG_ON(!page_pool);
b1de0d13 43 pr_info("pool size: %d pages\n", POOL_SIZE);
831058de 44
a8821f3f
N
45 bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
46 BUG_ON(!bounce_bio_set);
47 if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
48 BUG_ON(1);
49
50 bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
51 BUG_ON(!bounce_bio_split);
52
831058de
DH
53 return 0;
54}
55
56__initcall(init_emergency_pool);
f1006257 57#endif
831058de 58
f1006257 59#ifdef CONFIG_HIGHMEM
831058de
DH
60/*
61 * highmem version, map in to vec
62 */
63static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
64{
65 unsigned long flags;
66 unsigned char *vto;
67
68 local_irq_save(flags);
9b04c5fe 69 vto = kmap_atomic(to->bv_page);
831058de 70 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
9b04c5fe 71 kunmap_atomic(vto);
831058de
DH
72 local_irq_restore(flags);
73}
74
75#else /* CONFIG_HIGHMEM */
76
77#define bounce_copy_vec(to, vfrom) \
78 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
79
80#endif /* CONFIG_HIGHMEM */
81
82/*
83 * allocate pages in the DMA region for the ISA pool
84 */
85static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
86{
87 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
88}
89
90/*
91 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
92 * as the max address, so check if the pool has already been created.
93 */
94int init_emergency_isa_pool(void)
95{
96 if (isa_page_pool)
97 return 0;
98
99 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
100 mempool_free_pages, (void *) 0);
101 BUG_ON(!isa_page_pool);
102
b1de0d13 103 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
831058de
DH
104 return 0;
105}
106
107/*
108 * Simple bounce buffer support for highmem pages. Depending on the
109 * queue gfp mask set, *to may or may not be a highmem page. kmap it
110 * always, it will do the Right Thing
111 */
112static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
113{
114 unsigned char *vfrom;
7988613b
KO
115 struct bio_vec tovec, *fromvec = from->bi_io_vec;
116 struct bvec_iter iter;
117
118 bio_for_each_segment(tovec, to, iter) {
119 if (tovec.bv_page != fromvec->bv_page) {
120 /*
121 * fromvec->bv_offset and fromvec->bv_len might have
122 * been modified by the block layer, so use the original
123 * copy, bounce_copy_vec already uses tovec->bv_len
124 */
125 vfrom = page_address(fromvec->bv_page) +
126 tovec.bv_offset;
127
128 bounce_copy_vec(&tovec, vfrom);
129 flush_dcache_page(tovec.bv_page);
130 }
831058de 131
7988613b 132 fromvec++;
831058de
DH
133 }
134}
135
4246a0b6 136static void bounce_end_io(struct bio *bio, mempool_t *pool)
831058de
DH
137{
138 struct bio *bio_orig = bio->bi_private;
139 struct bio_vec *bvec, *org_vec;
140 int i;
99451879 141 int start = bio_orig->bi_iter.bi_idx;
831058de 142
831058de
DH
143 /*
144 * free up bounce indirect pages used
145 */
d74c6d51 146 bio_for_each_segment_all(bvec, bio, i) {
99451879
ML
147 org_vec = bio_orig->bi_io_vec + i + start;
148
831058de
DH
149 if (bvec->bv_page == org_vec->bv_page)
150 continue;
151
152 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
153 mempool_free(bvec->bv_page, pool);
154 }
155
4e4cbee9 156 bio_orig->bi_status = bio->bi_status;
4246a0b6 157 bio_endio(bio_orig);
831058de
DH
158 bio_put(bio);
159}
160
4246a0b6 161static void bounce_end_io_write(struct bio *bio)
831058de 162{
4246a0b6 163 bounce_end_io(bio, page_pool);
831058de
DH
164}
165
4246a0b6 166static void bounce_end_io_write_isa(struct bio *bio)
831058de 167{
831058de 168
4246a0b6 169 bounce_end_io(bio, isa_page_pool);
831058de
DH
170}
171
4246a0b6 172static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
831058de
DH
173{
174 struct bio *bio_orig = bio->bi_private;
175
4e4cbee9 176 if (!bio->bi_status)
831058de
DH
177 copy_to_high_bio_irq(bio_orig, bio);
178
4246a0b6 179 bounce_end_io(bio, pool);
831058de
DH
180}
181
4246a0b6 182static void bounce_end_io_read(struct bio *bio)
831058de 183{
4246a0b6 184 __bounce_end_io_read(bio, page_pool);
831058de
DH
185}
186
4246a0b6 187static void bounce_end_io_read_isa(struct bio *bio)
831058de 188{
4246a0b6 189 __bounce_end_io_read(bio, isa_page_pool);
831058de
DH
190}
191
165125e1 192static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
a3ad0a9d 193 mempool_t *pool)
831058de 194{
6bc454d1
KO
195 struct bio *bio;
196 int rw = bio_data_dir(*bio_orig);
7988613b
KO
197 struct bio_vec *to, from;
198 struct bvec_iter iter;
a8821f3f
N
199 unsigned i = 0;
200 bool bounce = false;
201 int sectors = 0;
831058de 202
a8821f3f
N
203 bio_for_each_segment(from, *bio_orig, iter) {
204 if (i++ < BIO_MAX_PAGES)
205 sectors += from.bv_len >> 9;
1c4bc3ab 206 if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
a8821f3f
N
207 bounce = true;
208 }
209 if (!bounce)
210 return;
211
212 if (sectors < bio_sectors(*bio_orig)) {
213 bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
214 bio_chain(bio, *bio_orig);
215 generic_make_request(*bio_orig);
216 *bio_orig = bio;
217 }
218 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
831058de 219
cb34e057 220 bio_for_each_segment_all(to, bio, i) {
6bc454d1 221 struct page *page = to->bv_page;
f735b5ee 222
1c4bc3ab 223 if (page_to_pfn(page) <= q->limits.bounce_pfn)
6bc454d1 224 continue;
831058de 225
6bc454d1 226 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
393a3397 227 inc_zone_page_state(to->bv_page, NR_BOUNCE);
831058de
DH
228
229 if (rw == WRITE) {
230 char *vto, *vfrom;
231
6bc454d1
KO
232 flush_dcache_page(page);
233
831058de 234 vto = page_address(to->bv_page) + to->bv_offset;
6bc454d1 235 vfrom = kmap_atomic(page) + to->bv_offset;
831058de 236 memcpy(vto, vfrom, to->bv_len);
6bc454d1 237 kunmap_atomic(vfrom);
831058de
DH
238 }
239 }
240
5f3ea37c 241 trace_block_bio_bounce(q, *bio_orig);
c43a5082 242
831058de 243 bio->bi_flags |= (1 << BIO_BOUNCED);
831058de
DH
244
245 if (pool == page_pool) {
246 bio->bi_end_io = bounce_end_io_write;
247 if (rw == READ)
248 bio->bi_end_io = bounce_end_io_read;
249 } else {
250 bio->bi_end_io = bounce_end_io_write_isa;
251 if (rw == READ)
252 bio->bi_end_io = bounce_end_io_read_isa;
253 }
254
255 bio->bi_private = *bio_orig;
256 *bio_orig = bio;
257}
258
165125e1 259void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
831058de
DH
260{
261 mempool_t *pool;
262
bf2de6f5
JA
263 /*
264 * Data-less bio, nothing to bounce
265 */
36144077 266 if (!bio_has_data(*bio_orig))
bf2de6f5
JA
267 return;
268
831058de
DH
269 /*
270 * for non-isa bounce case, just check if the bounce pfn is equal
271 * to or bigger than the highest pfn in the system -- in that case,
272 * don't waste time iterating over bio segments
273 */
274 if (!(q->bounce_gfp & GFP_DMA)) {
1c4bc3ab 275 if (q->limits.bounce_pfn >= blk_max_pfn)
831058de
DH
276 return;
277 pool = page_pool;
278 } else {
279 BUG_ON(!isa_page_pool);
280 pool = isa_page_pool;
281 }
282
831058de
DH
283 /*
284 * slow path
285 */
a3ad0a9d 286 __blk_queue_bounce(q, bio_orig, pool);
831058de 287}