]>
Commit | Line | Data |
---|---|---|
1 | /* bounce buffer handling for block devices | |
2 | * | |
3 | * - Split from highmem.c | |
4 | */ | |
5 | ||
6 | #include <linux/mm.h> | |
7 | #include <linux/export.h> | |
8 | #include <linux/swap.h> | |
9 | #include <linux/gfp.h> | |
10 | #include <linux/bio.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/mempool.h> | |
13 | #include <linux/blkdev.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/hash.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/bootmem.h> | |
18 | #include <asm/tlbflush.h> | |
19 | ||
20 | #include <trace/events/block.h> | |
21 | ||
22 | #define POOL_SIZE 64 | |
23 | #define ISA_POOL_SIZE 16 | |
24 | ||
25 | static mempool_t *page_pool, *isa_page_pool; | |
26 | ||
27 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) | |
28 | static __init int init_emergency_pool(void) | |
29 | { | |
30 | #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) | |
31 | if (max_pfn <= max_low_pfn) | |
32 | return 0; | |
33 | #endif | |
34 | ||
35 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); | |
36 | BUG_ON(!page_pool); | |
37 | printk("bounce pool size: %d pages\n", POOL_SIZE); | |
38 | ||
39 | return 0; | |
40 | } | |
41 | ||
42 | __initcall(init_emergency_pool); | |
43 | #endif | |
44 | ||
45 | #ifdef CONFIG_HIGHMEM | |
46 | /* | |
47 | * highmem version, map in to vec | |
48 | */ | |
49 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) | |
50 | { | |
51 | unsigned long flags; | |
52 | unsigned char *vto; | |
53 | ||
54 | local_irq_save(flags); | |
55 | vto = kmap_atomic(to->bv_page); | |
56 | memcpy(vto + to->bv_offset, vfrom, to->bv_len); | |
57 | kunmap_atomic(vto); | |
58 | local_irq_restore(flags); | |
59 | } | |
60 | ||
61 | #else /* CONFIG_HIGHMEM */ | |
62 | ||
63 | #define bounce_copy_vec(to, vfrom) \ | |
64 | memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) | |
65 | ||
66 | #endif /* CONFIG_HIGHMEM */ | |
67 | ||
68 | /* | |
69 | * allocate pages in the DMA region for the ISA pool | |
70 | */ | |
71 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) | |
72 | { | |
73 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); | |
74 | } | |
75 | ||
76 | /* | |
77 | * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA | |
78 | * as the max address, so check if the pool has already been created. | |
79 | */ | |
80 | int init_emergency_isa_pool(void) | |
81 | { | |
82 | if (isa_page_pool) | |
83 | return 0; | |
84 | ||
85 | isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, | |
86 | mempool_free_pages, (void *) 0); | |
87 | BUG_ON(!isa_page_pool); | |
88 | ||
89 | printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); | |
90 | return 0; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Simple bounce buffer support for highmem pages. Depending on the | |
95 | * queue gfp mask set, *to may or may not be a highmem page. kmap it | |
96 | * always, it will do the Right Thing | |
97 | */ | |
98 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | |
99 | { | |
100 | unsigned char *vfrom; | |
101 | struct bio_vec *tovec, *fromvec; | |
102 | int i; | |
103 | ||
104 | bio_for_each_segment(tovec, to, i) { | |
105 | fromvec = from->bi_io_vec + i; | |
106 | ||
107 | /* | |
108 | * not bounced | |
109 | */ | |
110 | if (tovec->bv_page == fromvec->bv_page) | |
111 | continue; | |
112 | ||
113 | /* | |
114 | * fromvec->bv_offset and fromvec->bv_len might have been | |
115 | * modified by the block layer, so use the original copy, | |
116 | * bounce_copy_vec already uses tovec->bv_len | |
117 | */ | |
118 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | |
119 | ||
120 | bounce_copy_vec(tovec, vfrom); | |
121 | flush_dcache_page(tovec->bv_page); | |
122 | } | |
123 | } | |
124 | ||
125 | static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) | |
126 | { | |
127 | struct bio *bio_orig = bio->bi_private; | |
128 | struct bio_vec *bvec, *org_vec; | |
129 | int i; | |
130 | ||
131 | if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) | |
132 | set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); | |
133 | ||
134 | /* | |
135 | * free up bounce indirect pages used | |
136 | */ | |
137 | bio_for_each_segment_all(bvec, bio, i) { | |
138 | org_vec = bio_orig->bi_io_vec + i; | |
139 | if (bvec->bv_page == org_vec->bv_page) | |
140 | continue; | |
141 | ||
142 | dec_zone_page_state(bvec->bv_page, NR_BOUNCE); | |
143 | mempool_free(bvec->bv_page, pool); | |
144 | } | |
145 | ||
146 | bio_endio(bio_orig, err); | |
147 | bio_put(bio); | |
148 | } | |
149 | ||
150 | static void bounce_end_io_write(struct bio *bio, int err) | |
151 | { | |
152 | bounce_end_io(bio, page_pool, err); | |
153 | } | |
154 | ||
155 | static void bounce_end_io_write_isa(struct bio *bio, int err) | |
156 | { | |
157 | ||
158 | bounce_end_io(bio, isa_page_pool, err); | |
159 | } | |
160 | ||
161 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) | |
162 | { | |
163 | struct bio *bio_orig = bio->bi_private; | |
164 | ||
165 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
166 | copy_to_high_bio_irq(bio_orig, bio); | |
167 | ||
168 | bounce_end_io(bio, pool, err); | |
169 | } | |
170 | ||
171 | static void bounce_end_io_read(struct bio *bio, int err) | |
172 | { | |
173 | __bounce_end_io_read(bio, page_pool, err); | |
174 | } | |
175 | ||
176 | static void bounce_end_io_read_isa(struct bio *bio, int err) | |
177 | { | |
178 | __bounce_end_io_read(bio, isa_page_pool, err); | |
179 | } | |
180 | ||
181 | #ifdef CONFIG_NEED_BOUNCE_POOL | |
182 | static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) | |
183 | { | |
184 | struct page *page; | |
185 | struct backing_dev_info *bdi; | |
186 | struct address_space *mapping; | |
187 | struct bio_vec *from; | |
188 | int i; | |
189 | ||
190 | if (bio_data_dir(bio) != WRITE) | |
191 | return 0; | |
192 | ||
193 | if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) | |
194 | return 0; | |
195 | ||
196 | /* | |
197 | * Based on the first page that has a valid mapping, decide whether or | |
198 | * not we have to employ bounce buffering to guarantee stable pages. | |
199 | */ | |
200 | bio_for_each_segment(from, bio, i) { | |
201 | page = from->bv_page; | |
202 | mapping = page_mapping(page); | |
203 | if (!mapping) | |
204 | continue; | |
205 | bdi = mapping->backing_dev_info; | |
206 | return mapping->host->i_sb->s_flags & MS_SNAP_STABLE; | |
207 | } | |
208 | ||
209 | return 0; | |
210 | } | |
211 | #else | |
212 | static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) | |
213 | { | |
214 | return 0; | |
215 | } | |
216 | #endif /* CONFIG_NEED_BOUNCE_POOL */ | |
217 | ||
218 | static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, | |
219 | mempool_t *pool, int force) | |
220 | { | |
221 | struct bio *bio; | |
222 | int rw = bio_data_dir(*bio_orig); | |
223 | struct bio_vec *to, *from; | |
224 | unsigned i; | |
225 | ||
226 | bio_for_each_segment(from, *bio_orig, i) | |
227 | if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) | |
228 | goto bounce; | |
229 | ||
230 | return; | |
231 | bounce: | |
232 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); | |
233 | ||
234 | bio_for_each_segment(to, bio, i) { | |
235 | struct page *page = to->bv_page; | |
236 | ||
237 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) | |
238 | continue; | |
239 | ||
240 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | |
241 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); | |
242 | ||
243 | if (rw == WRITE) { | |
244 | char *vto, *vfrom; | |
245 | ||
246 | flush_dcache_page(page); | |
247 | ||
248 | vto = page_address(to->bv_page) + to->bv_offset; | |
249 | vfrom = kmap_atomic(page) + to->bv_offset; | |
250 | memcpy(vto, vfrom, to->bv_len); | |
251 | kunmap_atomic(vfrom); | |
252 | } | |
253 | } | |
254 | ||
255 | trace_block_bio_bounce(q, *bio_orig); | |
256 | ||
257 | bio->bi_flags |= (1 << BIO_BOUNCED); | |
258 | ||
259 | if (pool == page_pool) { | |
260 | bio->bi_end_io = bounce_end_io_write; | |
261 | if (rw == READ) | |
262 | bio->bi_end_io = bounce_end_io_read; | |
263 | } else { | |
264 | bio->bi_end_io = bounce_end_io_write_isa; | |
265 | if (rw == READ) | |
266 | bio->bi_end_io = bounce_end_io_read_isa; | |
267 | } | |
268 | ||
269 | bio->bi_private = *bio_orig; | |
270 | *bio_orig = bio; | |
271 | } | |
272 | ||
273 | void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) | |
274 | { | |
275 | int must_bounce; | |
276 | mempool_t *pool; | |
277 | ||
278 | /* | |
279 | * Data-less bio, nothing to bounce | |
280 | */ | |
281 | if (!bio_has_data(*bio_orig)) | |
282 | return; | |
283 | ||
284 | must_bounce = must_snapshot_stable_pages(q, *bio_orig); | |
285 | ||
286 | /* | |
287 | * for non-isa bounce case, just check if the bounce pfn is equal | |
288 | * to or bigger than the highest pfn in the system -- in that case, | |
289 | * don't waste time iterating over bio segments | |
290 | */ | |
291 | if (!(q->bounce_gfp & GFP_DMA)) { | |
292 | if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) | |
293 | return; | |
294 | pool = page_pool; | |
295 | } else { | |
296 | BUG_ON(!isa_page_pool); | |
297 | pool = isa_page_pool; | |
298 | } | |
299 | ||
300 | /* | |
301 | * slow path | |
302 | */ | |
303 | __blk_queue_bounce(q, bio_orig, pool, must_bounce); | |
304 | } | |
305 | ||
306 | EXPORT_SYMBOL(blk_queue_bounce); |