]>
Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
54efd50b KO |
12 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
13 | struct bio *bio, | |
bdced438 ML |
14 | struct bio_set *bs, |
15 | unsigned *nsegs) | |
54efd50b KO |
16 | { |
17 | unsigned int max_discard_sectors, granularity; | |
18 | int alignment; | |
19 | sector_t tmp; | |
20 | unsigned split_sectors; | |
21 | ||
bdced438 ML |
22 | *nsegs = 1; |
23 | ||
54efd50b KO |
24 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
25 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
26 | ||
27 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | |
28 | max_discard_sectors -= max_discard_sectors % granularity; | |
29 | ||
30 | if (unlikely(!max_discard_sectors)) { | |
31 | /* XXX: warn */ | |
32 | return NULL; | |
33 | } | |
34 | ||
35 | if (bio_sectors(bio) <= max_discard_sectors) | |
36 | return NULL; | |
37 | ||
38 | split_sectors = max_discard_sectors; | |
39 | ||
40 | /* | |
41 | * If the next starting sector would be misaligned, stop the discard at | |
42 | * the previous aligned sector. | |
43 | */ | |
44 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
45 | ||
46 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
47 | tmp = sector_div(tmp, granularity); | |
48 | ||
49 | if (split_sectors > tmp) | |
50 | split_sectors -= tmp; | |
51 | ||
52 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
53 | } | |
54 | ||
55 | static struct bio *blk_bio_write_same_split(struct request_queue *q, | |
56 | struct bio *bio, | |
bdced438 ML |
57 | struct bio_set *bs, |
58 | unsigned *nsegs) | |
54efd50b | 59 | { |
bdced438 ML |
60 | *nsegs = 1; |
61 | ||
54efd50b KO |
62 | if (!q->limits.max_write_same_sectors) |
63 | return NULL; | |
64 | ||
65 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
66 | return NULL; | |
67 | ||
68 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
69 | } | |
70 | ||
bfc5caf7 ML |
71 | static inline unsigned get_max_io_size(struct request_queue *q, |
72 | struct bio *bio) | |
73 | { | |
74 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
75 | unsigned mask = queue_logical_block_size(q) - 1; | |
76 | ||
77 | /* aligned to logical block size */ | |
78 | sectors &= ~(mask >> 9); | |
79 | ||
80 | return sectors; | |
81 | } | |
82 | ||
54efd50b KO |
83 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
84 | struct bio *bio, | |
bdced438 ML |
85 | struct bio_set *bs, |
86 | unsigned *segs) | |
54efd50b | 87 | { |
5014c311 | 88 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 89 | struct bvec_iter iter; |
8ae12666 | 90 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
02e70742 ML |
91 | unsigned front_seg_size = bio->bi_seg_front_size; |
92 | bool do_split = true; | |
93 | struct bio *new = NULL; | |
bfc5caf7 | 94 | const unsigned max_sectors = get_max_io_size(q, bio); |
78d2f489 | 95 | unsigned bvecs = 0; |
54efd50b | 96 | |
54efd50b | 97 | bio_for_each_segment(bv, bio, iter) { |
78d2f489 ML |
98 | /* |
99 | * With arbitrary bio size, the incoming bio may be very | |
100 | * big. We have to split the bio into small bios so that | |
101 | * each holds at most BIO_MAX_PAGES bvecs because | |
102 | * bio_clone() can fail to allocate big bvecs. | |
103 | * | |
104 | * It should have been better to apply the limit per | |
105 | * request queue in which bio_clone() is involved, | |
106 | * instead of globally. The biggest blocker is the | |
107 | * bio_clone() in bio bounce. | |
108 | * | |
109 | * If bio is splitted by this reason, we should have | |
110 | * allowed to continue bios merging, but don't do | |
111 | * that now for making the change simple. | |
112 | * | |
113 | * TODO: deal with bio bounce's bio_clone() gracefully | |
114 | * and convert the global limit into per-queue limit. | |
115 | */ | |
116 | if (bvecs++ >= BIO_MAX_PAGES) | |
117 | goto split; | |
118 | ||
54efd50b KO |
119 | /* |
120 | * If the queue doesn't support SG gaps and adding this | |
121 | * offset would create a gap, disallow it. | |
122 | */ | |
5014c311 | 123 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
124 | goto split; |
125 | ||
bfc5caf7 | 126 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
d2081cfe KB |
127 | /* |
128 | * Consider this a new segment if we're splitting in | |
129 | * the middle of this vector. | |
130 | */ | |
131 | if (nsegs < queue_max_segments(q) && | |
bfc5caf7 | 132 | sectors < max_sectors) { |
d2081cfe | 133 | nsegs++; |
bfc5caf7 | 134 | sectors = max_sectors; |
d2081cfe | 135 | } |
bfc5caf7 ML |
136 | if (sectors) |
137 | goto split; | |
138 | /* Make this single bvec as the 1st segment */ | |
d2081cfe KB |
139 | } |
140 | ||
5014c311 | 141 | if (bvprvp && blk_queue_cluster(q)) { |
54efd50b KO |
142 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
143 | goto new_segment; | |
5014c311 | 144 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
54efd50b | 145 | goto new_segment; |
5014c311 | 146 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
54efd50b KO |
147 | goto new_segment; |
148 | ||
149 | seg_size += bv.bv_len; | |
150 | bvprv = bv; | |
578270bf | 151 | bvprvp = &bvprv; |
52cc6eea | 152 | sectors += bv.bv_len >> 9; |
a88d32af ML |
153 | |
154 | if (nsegs == 1 && seg_size > front_seg_size) | |
155 | front_seg_size = seg_size; | |
54efd50b KO |
156 | continue; |
157 | } | |
158 | new_segment: | |
159 | if (nsegs == queue_max_segments(q)) | |
160 | goto split; | |
161 | ||
162 | nsegs++; | |
163 | bvprv = bv; | |
578270bf | 164 | bvprvp = &bvprv; |
54efd50b | 165 | seg_size = bv.bv_len; |
52cc6eea | 166 | sectors += bv.bv_len >> 9; |
02e70742 ML |
167 | |
168 | if (nsegs == 1 && seg_size > front_seg_size) | |
169 | front_seg_size = seg_size; | |
54efd50b KO |
170 | } |
171 | ||
02e70742 | 172 | do_split = false; |
54efd50b | 173 | split: |
bdced438 | 174 | *segs = nsegs; |
02e70742 ML |
175 | |
176 | if (do_split) { | |
177 | new = bio_split(bio, sectors, GFP_NOIO, bs); | |
178 | if (new) | |
179 | bio = new; | |
180 | } | |
181 | ||
182 | bio->bi_seg_front_size = front_seg_size; | |
183 | if (seg_size > bio->bi_seg_back_size) | |
184 | bio->bi_seg_back_size = seg_size; | |
185 | ||
186 | return do_split ? new : NULL; | |
54efd50b KO |
187 | } |
188 | ||
189 | void blk_queue_split(struct request_queue *q, struct bio **bio, | |
190 | struct bio_set *bs) | |
191 | { | |
bdced438 ML |
192 | struct bio *split, *res; |
193 | unsigned nsegs; | |
54efd50b KO |
194 | |
195 | if ((*bio)->bi_rw & REQ_DISCARD) | |
bdced438 | 196 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
54efd50b | 197 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) |
bdced438 | 198 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
54efd50b | 199 | else |
bdced438 ML |
200 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
201 | ||
202 | /* physical segments can be figured out during splitting */ | |
203 | res = split ? split : *bio; | |
204 | res->bi_phys_segments = nsegs; | |
205 | bio_set_flag(res, BIO_SEG_VALID); | |
54efd50b KO |
206 | |
207 | if (split) { | |
6ac45aeb ML |
208 | /* there isn't chance to merge the splitted bio */ |
209 | split->bi_rw |= REQ_NOMERGE; | |
210 | ||
54efd50b KO |
211 | bio_chain(split, *bio); |
212 | generic_make_request(*bio); | |
213 | *bio = split; | |
214 | } | |
215 | } | |
216 | EXPORT_SYMBOL(blk_queue_split); | |
217 | ||
1e428079 | 218 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
07388549 ML |
219 | struct bio *bio, |
220 | bool no_sg_merge) | |
d6d48196 | 221 | { |
7988613b | 222 | struct bio_vec bv, bvprv = { NULL }; |
54efd50b | 223 | int cluster, prev = 0; |
1e428079 | 224 | unsigned int seg_size, nr_phys_segs; |
59247eae | 225 | struct bio *fbio, *bbio; |
7988613b | 226 | struct bvec_iter iter; |
d6d48196 | 227 | |
1e428079 JA |
228 | if (!bio) |
229 | return 0; | |
d6d48196 | 230 | |
5cb8850c KO |
231 | /* |
232 | * This should probably be returning 0, but blk_add_request_payload() | |
233 | * (Christoph!!!!) | |
234 | */ | |
235 | if (bio->bi_rw & REQ_DISCARD) | |
236 | return 1; | |
237 | ||
238 | if (bio->bi_rw & REQ_WRITE_SAME) | |
239 | return 1; | |
240 | ||
1e428079 | 241 | fbio = bio; |
e692cb66 | 242 | cluster = blk_queue_cluster(q); |
5df97b91 | 243 | seg_size = 0; |
2c8919de | 244 | nr_phys_segs = 0; |
1e428079 | 245 | for_each_bio(bio) { |
7988613b | 246 | bio_for_each_segment(bv, bio, iter) { |
05f1dd53 JA |
247 | /* |
248 | * If SG merging is disabled, each bio vector is | |
249 | * a segment | |
250 | */ | |
251 | if (no_sg_merge) | |
252 | goto new_segment; | |
253 | ||
54efd50b | 254 | if (prev && cluster) { |
7988613b | 255 | if (seg_size + bv.bv_len |
ae03bf63 | 256 | > queue_max_segment_size(q)) |
1e428079 | 257 | goto new_segment; |
7988613b | 258 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
1e428079 | 259 | goto new_segment; |
7988613b | 260 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
1e428079 | 261 | goto new_segment; |
d6d48196 | 262 | |
7988613b | 263 | seg_size += bv.bv_len; |
1e428079 JA |
264 | bvprv = bv; |
265 | continue; | |
266 | } | |
d6d48196 | 267 | new_segment: |
1e428079 JA |
268 | if (nr_phys_segs == 1 && seg_size > |
269 | fbio->bi_seg_front_size) | |
270 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 271 | |
1e428079 JA |
272 | nr_phys_segs++; |
273 | bvprv = bv; | |
54efd50b | 274 | prev = 1; |
7988613b | 275 | seg_size = bv.bv_len; |
1e428079 | 276 | } |
59247eae | 277 | bbio = bio; |
d6d48196 JA |
278 | } |
279 | ||
59247eae JA |
280 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
281 | fbio->bi_seg_front_size = seg_size; | |
282 | if (seg_size > bbio->bi_seg_back_size) | |
283 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
284 | |
285 | return nr_phys_segs; | |
286 | } | |
287 | ||
288 | void blk_recalc_rq_segments(struct request *rq) | |
289 | { | |
07388549 ML |
290 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
291 | &rq->q->queue_flags); | |
292 | ||
293 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | |
294 | no_sg_merge); | |
d6d48196 JA |
295 | } |
296 | ||
297 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
298 | { | |
7f60dcaa ML |
299 | unsigned short seg_cnt; |
300 | ||
301 | /* estimate segment number by bi_vcnt for non-cloned bio */ | |
302 | if (bio_flagged(bio, BIO_CLONED)) | |
303 | seg_cnt = bio_segments(bio); | |
304 | else | |
305 | seg_cnt = bio->bi_vcnt; | |
764f612c | 306 | |
7f60dcaa ML |
307 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
308 | (seg_cnt < queue_max_segments(q))) | |
309 | bio->bi_phys_segments = seg_cnt; | |
05f1dd53 JA |
310 | else { |
311 | struct bio *nxt = bio->bi_next; | |
312 | ||
313 | bio->bi_next = NULL; | |
7f60dcaa | 314 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
05f1dd53 JA |
315 | bio->bi_next = nxt; |
316 | } | |
1e428079 | 317 | |
b7c44ed9 | 318 | bio_set_flag(bio, BIO_SEG_VALID); |
d6d48196 JA |
319 | } |
320 | EXPORT_SYMBOL(blk_recount_segments); | |
321 | ||
322 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
323 | struct bio *nxt) | |
324 | { | |
2b8221e1 | 325 | struct bio_vec end_bv = { NULL }, nxt_bv; |
f619d254 KO |
326 | struct bvec_iter iter; |
327 | ||
e692cb66 | 328 | if (!blk_queue_cluster(q)) |
d6d48196 JA |
329 | return 0; |
330 | ||
86771427 | 331 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 332 | queue_max_segment_size(q)) |
d6d48196 JA |
333 | return 0; |
334 | ||
e17fc0a1 DW |
335 | if (!bio_has_data(bio)) |
336 | return 1; | |
337 | ||
f619d254 KO |
338 | bio_for_each_segment(end_bv, bio, iter) |
339 | if (end_bv.bv_len == iter.bi_size) | |
340 | break; | |
341 | ||
342 | nxt_bv = bio_iovec(nxt); | |
343 | ||
344 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | |
e17fc0a1 DW |
345 | return 0; |
346 | ||
d6d48196 | 347 | /* |
e17fc0a1 | 348 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
349 | * these two to be merged into one |
350 | */ | |
f619d254 | 351 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
d6d48196 JA |
352 | return 1; |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
7988613b | 357 | static inline void |
963ab9e5 | 358 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
7988613b | 359 | struct scatterlist *sglist, struct bio_vec *bvprv, |
963ab9e5 AH |
360 | struct scatterlist **sg, int *nsegs, int *cluster) |
361 | { | |
362 | ||
363 | int nbytes = bvec->bv_len; | |
364 | ||
7988613b | 365 | if (*sg && *cluster) { |
963ab9e5 AH |
366 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
367 | goto new_segment; | |
368 | ||
7988613b | 369 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
963ab9e5 | 370 | goto new_segment; |
7988613b | 371 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
963ab9e5 AH |
372 | goto new_segment; |
373 | ||
374 | (*sg)->length += nbytes; | |
375 | } else { | |
376 | new_segment: | |
377 | if (!*sg) | |
378 | *sg = sglist; | |
379 | else { | |
380 | /* | |
381 | * If the driver previously mapped a shorter | |
382 | * list, we could see a termination bit | |
383 | * prematurely unless it fully inits the sg | |
384 | * table on each mapping. We KNOW that there | |
385 | * must be more entries here or the driver | |
386 | * would be buggy, so force clear the | |
387 | * termination bit to avoid doing a full | |
388 | * sg_init_table() in drivers for each command. | |
389 | */ | |
c8164d89 | 390 | sg_unmark_end(*sg); |
963ab9e5 AH |
391 | *sg = sg_next(*sg); |
392 | } | |
393 | ||
394 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
395 | (*nsegs)++; | |
396 | } | |
7988613b | 397 | *bvprv = *bvec; |
963ab9e5 AH |
398 | } |
399 | ||
5cb8850c KO |
400 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
401 | struct scatterlist *sglist, | |
402 | struct scatterlist **sg) | |
d6d48196 | 403 | { |
2b8221e1 | 404 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 405 | struct bvec_iter iter; |
d6d48196 JA |
406 | int nsegs, cluster; |
407 | ||
408 | nsegs = 0; | |
e692cb66 | 409 | cluster = blk_queue_cluster(q); |
d6d48196 | 410 | |
5cb8850c KO |
411 | if (bio->bi_rw & REQ_DISCARD) { |
412 | /* | |
413 | * This is a hack - drivers should be neither modifying the | |
414 | * biovec, nor relying on bi_vcnt - but because of | |
415 | * blk_add_request_payload(), a discard bio may or may not have | |
416 | * a payload we need to set up here (thank you Christoph) and | |
417 | * bi_vcnt is really the only way of telling if we need to. | |
418 | */ | |
419 | ||
420 | if (bio->bi_vcnt) | |
421 | goto single_segment; | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | if (bio->bi_rw & REQ_WRITE_SAME) { | |
427 | single_segment: | |
428 | *sg = sglist; | |
429 | bvec = bio_iovec(bio); | |
430 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
431 | return 1; | |
432 | } | |
433 | ||
434 | for_each_bio(bio) | |
435 | bio_for_each_segment(bvec, bio, iter) | |
436 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | |
437 | &nsegs, &cluster); | |
d6d48196 | 438 | |
5cb8850c KO |
439 | return nsegs; |
440 | } | |
441 | ||
442 | /* | |
443 | * map a request to scatterlist, return number of sg entries setup. Caller | |
444 | * must make sure sg can hold rq->nr_phys_segments entries | |
445 | */ | |
446 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
447 | struct scatterlist *sglist) | |
448 | { | |
449 | struct scatterlist *sg = NULL; | |
450 | int nsegs = 0; | |
451 | ||
452 | if (rq->bio) | |
453 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); | |
f18573ab FT |
454 | |
455 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
2e46e8b2 TH |
456 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
457 | unsigned int pad_len = | |
458 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
459 | |
460 | sg->length += pad_len; | |
461 | rq->extra_len += pad_len; | |
462 | } | |
463 | ||
2fb98e84 | 464 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
7b6d91da | 465 | if (rq->cmd_flags & REQ_WRITE) |
db0a2e00 TH |
466 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
467 | ||
da81ed16 | 468 | sg_unmark_end(sg); |
d6d48196 JA |
469 | sg = sg_next(sg); |
470 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
471 | q->dma_drain_size, | |
472 | ((unsigned long)q->dma_drain_buffer) & | |
473 | (PAGE_SIZE - 1)); | |
474 | nsegs++; | |
7a85f889 | 475 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
476 | } |
477 | ||
478 | if (sg) | |
479 | sg_mark_end(sg); | |
480 | ||
12e57f59 ML |
481 | /* |
482 | * Something must have been wrong if the figured number of | |
483 | * segment is bigger than number of req's physical segments | |
484 | */ | |
485 | WARN_ON(nsegs > rq->nr_phys_segments); | |
486 | ||
d6d48196 JA |
487 | return nsegs; |
488 | } | |
d6d48196 JA |
489 | EXPORT_SYMBOL(blk_rq_map_sg); |
490 | ||
d6d48196 JA |
491 | static inline int ll_new_hw_segment(struct request_queue *q, |
492 | struct request *req, | |
493 | struct bio *bio) | |
494 | { | |
d6d48196 JA |
495 | int nr_phys_segs = bio_phys_segments(q, bio); |
496 | ||
13f05c8d MP |
497 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
498 | goto no_merge; | |
499 | ||
4eaf99be | 500 | if (blk_integrity_merge_bio(q, req, bio) == false) |
13f05c8d | 501 | goto no_merge; |
d6d48196 JA |
502 | |
503 | /* | |
504 | * This will form the start of a new hw segment. Bump both | |
505 | * counters. | |
506 | */ | |
d6d48196 JA |
507 | req->nr_phys_segments += nr_phys_segs; |
508 | return 1; | |
13f05c8d MP |
509 | |
510 | no_merge: | |
511 | req->cmd_flags |= REQ_NOMERGE; | |
512 | if (req == q->last_merge) | |
513 | q->last_merge = NULL; | |
514 | return 0; | |
d6d48196 JA |
515 | } |
516 | ||
517 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
518 | struct bio *bio) | |
519 | { | |
5e7c4274 JA |
520 | if (req_gap_back_merge(req, bio)) |
521 | return 0; | |
7f39add3 SG |
522 | if (blk_integrity_rq(req) && |
523 | integrity_req_gap_back_merge(req, bio)) | |
524 | return 0; | |
f31dc1cd MP |
525 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
526 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
527 | req->cmd_flags |= REQ_NOMERGE; |
528 | if (req == q->last_merge) | |
529 | q->last_merge = NULL; | |
530 | return 0; | |
531 | } | |
2cdf79ca | 532 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 533 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 534 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 535 | blk_recount_segments(q, bio); |
d6d48196 JA |
536 | |
537 | return ll_new_hw_segment(q, req, bio); | |
538 | } | |
539 | ||
6728cb0e | 540 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
541 | struct bio *bio) |
542 | { | |
5e7c4274 JA |
543 | |
544 | if (req_gap_front_merge(req, bio)) | |
545 | return 0; | |
7f39add3 SG |
546 | if (blk_integrity_rq(req) && |
547 | integrity_req_gap_front_merge(req, bio)) | |
548 | return 0; | |
f31dc1cd MP |
549 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
550 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
551 | req->cmd_flags |= REQ_NOMERGE; |
552 | if (req == q->last_merge) | |
553 | q->last_merge = NULL; | |
554 | return 0; | |
555 | } | |
2cdf79ca | 556 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 557 | blk_recount_segments(q, bio); |
2cdf79ca | 558 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 559 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
560 | |
561 | return ll_new_hw_segment(q, req, bio); | |
562 | } | |
563 | ||
e7e24500 JA |
564 | /* |
565 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
566 | * does not indicate a prepared command that we cannot merge with. | |
567 | */ | |
568 | static bool req_no_special_merge(struct request *req) | |
569 | { | |
570 | struct request_queue *q = req->q; | |
571 | ||
572 | return !q->mq_ops && req->special; | |
573 | } | |
574 | ||
d6d48196 JA |
575 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
576 | struct request *next) | |
577 | { | |
578 | int total_phys_segments; | |
86771427 FT |
579 | unsigned int seg_size = |
580 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
581 | |
582 | /* | |
583 | * First check if the either of the requests are re-queued | |
584 | * requests. Can't merge them if they are. | |
585 | */ | |
e7e24500 | 586 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
d6d48196 JA |
587 | return 0; |
588 | ||
5e7c4274 | 589 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
590 | return 0; |
591 | ||
d6d48196 JA |
592 | /* |
593 | * Will it become too large? | |
594 | */ | |
f31dc1cd MP |
595 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
596 | blk_rq_get_max_sectors(req)) | |
d6d48196 JA |
597 | return 0; |
598 | ||
599 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
600 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
601 | if (req->nr_phys_segments == 1) | |
602 | req->bio->bi_seg_front_size = seg_size; | |
603 | if (next->nr_phys_segments == 1) | |
604 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 605 | total_phys_segments--; |
86771427 | 606 | } |
d6d48196 | 607 | |
8a78362c | 608 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
609 | return 0; |
610 | ||
4eaf99be | 611 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
612 | return 0; |
613 | ||
d6d48196 JA |
614 | /* Merge is OK... */ |
615 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
616 | return 1; |
617 | } | |
618 | ||
80a761fd TH |
619 | /** |
620 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
621 | * @rq: request to mark as mixed merge | |
622 | * | |
623 | * Description: | |
624 | * @rq is about to be mixed merged. Make sure the attributes | |
625 | * which can be mixed are set in each bio and mark @rq as mixed | |
626 | * merged. | |
627 | */ | |
628 | void blk_rq_set_mixed_merge(struct request *rq) | |
629 | { | |
630 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
631 | struct bio *bio; | |
632 | ||
633 | if (rq->cmd_flags & REQ_MIXED_MERGE) | |
634 | return; | |
635 | ||
636 | /* | |
637 | * @rq will no longer represent mixable attributes for all the | |
638 | * contained bios. It will just track those of the first one. | |
639 | * Distributes the attributs to each bio. | |
640 | */ | |
641 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
642 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | |
643 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | |
644 | bio->bi_rw |= ff; | |
645 | } | |
646 | rq->cmd_flags |= REQ_MIXED_MERGE; | |
647 | } | |
648 | ||
26308eab JM |
649 | static void blk_account_io_merge(struct request *req) |
650 | { | |
651 | if (blk_do_io_stat(req)) { | |
652 | struct hd_struct *part; | |
653 | int cpu; | |
654 | ||
655 | cpu = part_stat_lock(); | |
09e099d4 | 656 | part = req->part; |
26308eab JM |
657 | |
658 | part_round_stats(cpu, part); | |
316d315b | 659 | part_dec_in_flight(part, rq_data_dir(req)); |
26308eab | 660 | |
6c23a968 | 661 | hd_struct_put(part); |
26308eab JM |
662 | part_stat_unlock(); |
663 | } | |
664 | } | |
665 | ||
d6d48196 JA |
666 | /* |
667 | * Has to be called with the request spinlock acquired | |
668 | */ | |
669 | static int attempt_merge(struct request_queue *q, struct request *req, | |
670 | struct request *next) | |
671 | { | |
672 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
673 | return 0; | |
674 | ||
f31dc1cd MP |
675 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
676 | return 0; | |
677 | ||
d6d48196 JA |
678 | /* |
679 | * not contiguous | |
680 | */ | |
83096ebf | 681 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
d6d48196 JA |
682 | return 0; |
683 | ||
684 | if (rq_data_dir(req) != rq_data_dir(next) | |
685 | || req->rq_disk != next->rq_disk | |
e7e24500 | 686 | || req_no_special_merge(next)) |
d6d48196 JA |
687 | return 0; |
688 | ||
4363ac7c MP |
689 | if (req->cmd_flags & REQ_WRITE_SAME && |
690 | !blk_write_same_mergeable(req->bio, next->bio)) | |
691 | return 0; | |
692 | ||
d6d48196 JA |
693 | /* |
694 | * If we are allowed to merge, then append bio list | |
695 | * from next to rq and release next. merge_requests_fn | |
696 | * will have updated segment counts, update sector | |
697 | * counts here. | |
698 | */ | |
699 | if (!ll_merge_requests_fn(q, req, next)) | |
700 | return 0; | |
701 | ||
80a761fd TH |
702 | /* |
703 | * If failfast settings disagree or any of the two is already | |
704 | * a mixed merge, mark both as mixed before proceeding. This | |
705 | * makes sure that all involved bios have mixable attributes | |
706 | * set properly. | |
707 | */ | |
708 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || | |
709 | (req->cmd_flags & REQ_FAILFAST_MASK) != | |
710 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
711 | blk_rq_set_mixed_merge(req); | |
712 | blk_rq_set_mixed_merge(next); | |
713 | } | |
714 | ||
d6d48196 JA |
715 | /* |
716 | * At this point we have either done a back merge | |
717 | * or front merge. We need the smaller start_time of | |
718 | * the merged requests to be the current request | |
719 | * for accounting purposes. | |
720 | */ | |
721 | if (time_after(req->start_time, next->start_time)) | |
722 | req->start_time = next->start_time; | |
723 | ||
724 | req->biotail->bi_next = next->bio; | |
725 | req->biotail = next->biotail; | |
726 | ||
a2dec7b3 | 727 | req->__data_len += blk_rq_bytes(next); |
d6d48196 JA |
728 | |
729 | elv_merge_requests(q, req, next); | |
730 | ||
42dad764 JM |
731 | /* |
732 | * 'next' is going away, so update stats accordingly | |
733 | */ | |
734 | blk_account_io_merge(next); | |
d6d48196 JA |
735 | |
736 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
737 | if (blk_rq_cpu_valid(next)) |
738 | req->cpu = next->cpu; | |
d6d48196 | 739 | |
1cd96c24 BH |
740 | /* owner-ship of bio passed from next to req */ |
741 | next->bio = NULL; | |
d6d48196 JA |
742 | __blk_put_request(q, next); |
743 | return 1; | |
744 | } | |
745 | ||
746 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
747 | { | |
748 | struct request *next = elv_latter_request(q, rq); | |
749 | ||
750 | if (next) | |
751 | return attempt_merge(q, rq, next); | |
752 | ||
753 | return 0; | |
754 | } | |
755 | ||
756 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
757 | { | |
758 | struct request *prev = elv_former_request(q, rq); | |
759 | ||
760 | if (prev) | |
761 | return attempt_merge(q, prev, rq); | |
762 | ||
763 | return 0; | |
764 | } | |
5e84ea3a JA |
765 | |
766 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
767 | struct request *next) | |
768 | { | |
769 | return attempt_merge(q, rq, next); | |
770 | } | |
050c8ea8 TH |
771 | |
772 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
773 | { | |
e2a60da7 | 774 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
775 | return false; |
776 | ||
f31dc1cd MP |
777 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
778 | return false; | |
779 | ||
050c8ea8 TH |
780 | /* different data direction or already started, don't merge */ |
781 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
782 | return false; | |
783 | ||
784 | /* must be same device and not a special request */ | |
e7e24500 | 785 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
050c8ea8 TH |
786 | return false; |
787 | ||
788 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 789 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
790 | return false; |
791 | ||
4363ac7c MP |
792 | /* must be using the same buffer */ |
793 | if (rq->cmd_flags & REQ_WRITE_SAME && | |
794 | !blk_write_same_mergeable(rq->bio, bio)) | |
795 | return false; | |
796 | ||
050c8ea8 TH |
797 | return true; |
798 | } | |
799 | ||
800 | int blk_try_merge(struct request *rq, struct bio *bio) | |
801 | { | |
4f024f37 | 802 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
050c8ea8 | 803 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 804 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
805 | return ELEVATOR_FRONT_MERGE; |
806 | return ELEVATOR_NO_MERGE; | |
807 | } |