]>
Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
54efd50b KO |
12 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
13 | struct bio *bio, | |
14 | struct bio_set *bs) | |
15 | { | |
16 | unsigned int max_discard_sectors, granularity; | |
17 | int alignment; | |
18 | sector_t tmp; | |
19 | unsigned split_sectors; | |
20 | ||
21 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | |
22 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
23 | ||
24 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | |
25 | max_discard_sectors -= max_discard_sectors % granularity; | |
26 | ||
27 | if (unlikely(!max_discard_sectors)) { | |
28 | /* XXX: warn */ | |
29 | return NULL; | |
30 | } | |
31 | ||
32 | if (bio_sectors(bio) <= max_discard_sectors) | |
33 | return NULL; | |
34 | ||
35 | split_sectors = max_discard_sectors; | |
36 | ||
37 | /* | |
38 | * If the next starting sector would be misaligned, stop the discard at | |
39 | * the previous aligned sector. | |
40 | */ | |
41 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
42 | ||
43 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
44 | tmp = sector_div(tmp, granularity); | |
45 | ||
46 | if (split_sectors > tmp) | |
47 | split_sectors -= tmp; | |
48 | ||
49 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
50 | } | |
51 | ||
52 | static struct bio *blk_bio_write_same_split(struct request_queue *q, | |
53 | struct bio *bio, | |
54 | struct bio_set *bs) | |
55 | { | |
56 | if (!q->limits.max_write_same_sectors) | |
57 | return NULL; | |
58 | ||
59 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
60 | return NULL; | |
61 | ||
62 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
63 | } | |
64 | ||
65 | static struct bio *blk_bio_segment_split(struct request_queue *q, | |
66 | struct bio *bio, | |
67 | struct bio_set *bs) | |
68 | { | |
69 | struct bio *split; | |
70 | struct bio_vec bv, bvprv; | |
71 | struct bvec_iter iter; | |
72 | unsigned seg_size = 0, nsegs = 0; | |
73 | int prev = 0; | |
74 | ||
75 | struct bvec_merge_data bvm = { | |
76 | .bi_bdev = bio->bi_bdev, | |
77 | .bi_sector = bio->bi_iter.bi_sector, | |
78 | .bi_size = 0, | |
79 | .bi_rw = bio->bi_rw, | |
80 | }; | |
81 | ||
82 | bio_for_each_segment(bv, bio, iter) { | |
83 | if (q->merge_bvec_fn && | |
84 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | |
85 | goto split; | |
86 | ||
87 | bvm.bi_size += bv.bv_len; | |
88 | ||
89 | if (bvm.bi_size >> 9 > queue_max_sectors(q)) | |
90 | goto split; | |
91 | ||
92 | /* | |
93 | * If the queue doesn't support SG gaps and adding this | |
94 | * offset would create a gap, disallow it. | |
95 | */ | |
96 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && | |
97 | prev && bvec_gap_to_prev(&bvprv, bv.bv_offset)) | |
98 | goto split; | |
99 | ||
100 | if (prev && blk_queue_cluster(q)) { | |
101 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | |
102 | goto new_segment; | |
103 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | |
104 | goto new_segment; | |
105 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | |
106 | goto new_segment; | |
107 | ||
108 | seg_size += bv.bv_len; | |
109 | bvprv = bv; | |
110 | prev = 1; | |
111 | continue; | |
112 | } | |
113 | new_segment: | |
114 | if (nsegs == queue_max_segments(q)) | |
115 | goto split; | |
116 | ||
117 | nsegs++; | |
118 | bvprv = bv; | |
119 | prev = 1; | |
120 | seg_size = bv.bv_len; | |
121 | } | |
122 | ||
123 | return NULL; | |
124 | split: | |
125 | split = bio_clone_bioset(bio, GFP_NOIO, bs); | |
126 | ||
127 | split->bi_iter.bi_size -= iter.bi_size; | |
128 | bio->bi_iter = iter; | |
129 | ||
130 | if (bio_integrity(bio)) { | |
131 | bio_integrity_advance(bio, split->bi_iter.bi_size); | |
132 | bio_integrity_trim(split, 0, bio_sectors(split)); | |
133 | } | |
134 | ||
135 | return split; | |
136 | } | |
137 | ||
138 | void blk_queue_split(struct request_queue *q, struct bio **bio, | |
139 | struct bio_set *bs) | |
140 | { | |
141 | struct bio *split; | |
142 | ||
143 | if ((*bio)->bi_rw & REQ_DISCARD) | |
144 | split = blk_bio_discard_split(q, *bio, bs); | |
145 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) | |
146 | split = blk_bio_write_same_split(q, *bio, bs); | |
147 | else | |
148 | split = blk_bio_segment_split(q, *bio, q->bio_split); | |
149 | ||
150 | if (split) { | |
151 | bio_chain(split, *bio); | |
152 | generic_make_request(*bio); | |
153 | *bio = split; | |
154 | } | |
155 | } | |
156 | EXPORT_SYMBOL(blk_queue_split); | |
157 | ||
1e428079 | 158 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
07388549 ML |
159 | struct bio *bio, |
160 | bool no_sg_merge) | |
d6d48196 | 161 | { |
7988613b | 162 | struct bio_vec bv, bvprv = { NULL }; |
54efd50b | 163 | int cluster, prev = 0; |
1e428079 | 164 | unsigned int seg_size, nr_phys_segs; |
59247eae | 165 | struct bio *fbio, *bbio; |
7988613b | 166 | struct bvec_iter iter; |
d6d48196 | 167 | |
1e428079 JA |
168 | if (!bio) |
169 | return 0; | |
d6d48196 | 170 | |
5cb8850c KO |
171 | /* |
172 | * This should probably be returning 0, but blk_add_request_payload() | |
173 | * (Christoph!!!!) | |
174 | */ | |
175 | if (bio->bi_rw & REQ_DISCARD) | |
176 | return 1; | |
177 | ||
178 | if (bio->bi_rw & REQ_WRITE_SAME) | |
179 | return 1; | |
180 | ||
1e428079 | 181 | fbio = bio; |
e692cb66 | 182 | cluster = blk_queue_cluster(q); |
5df97b91 | 183 | seg_size = 0; |
2c8919de | 184 | nr_phys_segs = 0; |
1e428079 | 185 | for_each_bio(bio) { |
7988613b | 186 | bio_for_each_segment(bv, bio, iter) { |
05f1dd53 JA |
187 | /* |
188 | * If SG merging is disabled, each bio vector is | |
189 | * a segment | |
190 | */ | |
191 | if (no_sg_merge) | |
192 | goto new_segment; | |
193 | ||
54efd50b | 194 | if (prev && cluster) { |
7988613b | 195 | if (seg_size + bv.bv_len |
ae03bf63 | 196 | > queue_max_segment_size(q)) |
1e428079 | 197 | goto new_segment; |
7988613b | 198 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
1e428079 | 199 | goto new_segment; |
7988613b | 200 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
1e428079 | 201 | goto new_segment; |
d6d48196 | 202 | |
7988613b | 203 | seg_size += bv.bv_len; |
1e428079 JA |
204 | bvprv = bv; |
205 | continue; | |
206 | } | |
d6d48196 | 207 | new_segment: |
1e428079 JA |
208 | if (nr_phys_segs == 1 && seg_size > |
209 | fbio->bi_seg_front_size) | |
210 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 211 | |
1e428079 JA |
212 | nr_phys_segs++; |
213 | bvprv = bv; | |
54efd50b | 214 | prev = 1; |
7988613b | 215 | seg_size = bv.bv_len; |
1e428079 | 216 | } |
59247eae | 217 | bbio = bio; |
d6d48196 JA |
218 | } |
219 | ||
59247eae JA |
220 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
221 | fbio->bi_seg_front_size = seg_size; | |
222 | if (seg_size > bbio->bi_seg_back_size) | |
223 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
224 | |
225 | return nr_phys_segs; | |
226 | } | |
227 | ||
228 | void blk_recalc_rq_segments(struct request *rq) | |
229 | { | |
07388549 ML |
230 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
231 | &rq->q->queue_flags); | |
232 | ||
233 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | |
234 | no_sg_merge); | |
d6d48196 JA |
235 | } |
236 | ||
237 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
238 | { | |
7f60dcaa ML |
239 | unsigned short seg_cnt; |
240 | ||
241 | /* estimate segment number by bi_vcnt for non-cloned bio */ | |
242 | if (bio_flagged(bio, BIO_CLONED)) | |
243 | seg_cnt = bio_segments(bio); | |
244 | else | |
245 | seg_cnt = bio->bi_vcnt; | |
764f612c | 246 | |
7f60dcaa ML |
247 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
248 | (seg_cnt < queue_max_segments(q))) | |
249 | bio->bi_phys_segments = seg_cnt; | |
05f1dd53 JA |
250 | else { |
251 | struct bio *nxt = bio->bi_next; | |
252 | ||
253 | bio->bi_next = NULL; | |
7f60dcaa | 254 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
05f1dd53 JA |
255 | bio->bi_next = nxt; |
256 | } | |
1e428079 | 257 | |
b7c44ed9 | 258 | bio_set_flag(bio, BIO_SEG_VALID); |
d6d48196 JA |
259 | } |
260 | EXPORT_SYMBOL(blk_recount_segments); | |
261 | ||
262 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
263 | struct bio *nxt) | |
264 | { | |
2b8221e1 | 265 | struct bio_vec end_bv = { NULL }, nxt_bv; |
f619d254 KO |
266 | struct bvec_iter iter; |
267 | ||
e692cb66 | 268 | if (!blk_queue_cluster(q)) |
d6d48196 JA |
269 | return 0; |
270 | ||
86771427 | 271 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 272 | queue_max_segment_size(q)) |
d6d48196 JA |
273 | return 0; |
274 | ||
e17fc0a1 DW |
275 | if (!bio_has_data(bio)) |
276 | return 1; | |
277 | ||
f619d254 KO |
278 | bio_for_each_segment(end_bv, bio, iter) |
279 | if (end_bv.bv_len == iter.bi_size) | |
280 | break; | |
281 | ||
282 | nxt_bv = bio_iovec(nxt); | |
283 | ||
284 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | |
e17fc0a1 DW |
285 | return 0; |
286 | ||
d6d48196 | 287 | /* |
e17fc0a1 | 288 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
289 | * these two to be merged into one |
290 | */ | |
f619d254 | 291 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
d6d48196 JA |
292 | return 1; |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
7988613b | 297 | static inline void |
963ab9e5 | 298 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
7988613b | 299 | struct scatterlist *sglist, struct bio_vec *bvprv, |
963ab9e5 AH |
300 | struct scatterlist **sg, int *nsegs, int *cluster) |
301 | { | |
302 | ||
303 | int nbytes = bvec->bv_len; | |
304 | ||
7988613b | 305 | if (*sg && *cluster) { |
963ab9e5 AH |
306 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
307 | goto new_segment; | |
308 | ||
7988613b | 309 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
963ab9e5 | 310 | goto new_segment; |
7988613b | 311 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
963ab9e5 AH |
312 | goto new_segment; |
313 | ||
314 | (*sg)->length += nbytes; | |
315 | } else { | |
316 | new_segment: | |
317 | if (!*sg) | |
318 | *sg = sglist; | |
319 | else { | |
320 | /* | |
321 | * If the driver previously mapped a shorter | |
322 | * list, we could see a termination bit | |
323 | * prematurely unless it fully inits the sg | |
324 | * table on each mapping. We KNOW that there | |
325 | * must be more entries here or the driver | |
326 | * would be buggy, so force clear the | |
327 | * termination bit to avoid doing a full | |
328 | * sg_init_table() in drivers for each command. | |
329 | */ | |
c8164d89 | 330 | sg_unmark_end(*sg); |
963ab9e5 AH |
331 | *sg = sg_next(*sg); |
332 | } | |
333 | ||
334 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
335 | (*nsegs)++; | |
336 | } | |
7988613b | 337 | *bvprv = *bvec; |
963ab9e5 AH |
338 | } |
339 | ||
5cb8850c KO |
340 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
341 | struct scatterlist *sglist, | |
342 | struct scatterlist **sg) | |
d6d48196 | 343 | { |
2b8221e1 | 344 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 345 | struct bvec_iter iter; |
d6d48196 JA |
346 | int nsegs, cluster; |
347 | ||
348 | nsegs = 0; | |
e692cb66 | 349 | cluster = blk_queue_cluster(q); |
d6d48196 | 350 | |
5cb8850c KO |
351 | if (bio->bi_rw & REQ_DISCARD) { |
352 | /* | |
353 | * This is a hack - drivers should be neither modifying the | |
354 | * biovec, nor relying on bi_vcnt - but because of | |
355 | * blk_add_request_payload(), a discard bio may or may not have | |
356 | * a payload we need to set up here (thank you Christoph) and | |
357 | * bi_vcnt is really the only way of telling if we need to. | |
358 | */ | |
359 | ||
360 | if (bio->bi_vcnt) | |
361 | goto single_segment; | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | if (bio->bi_rw & REQ_WRITE_SAME) { | |
367 | single_segment: | |
368 | *sg = sglist; | |
369 | bvec = bio_iovec(bio); | |
370 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
371 | return 1; | |
372 | } | |
373 | ||
374 | for_each_bio(bio) | |
375 | bio_for_each_segment(bvec, bio, iter) | |
376 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | |
377 | &nsegs, &cluster); | |
d6d48196 | 378 | |
5cb8850c KO |
379 | return nsegs; |
380 | } | |
381 | ||
382 | /* | |
383 | * map a request to scatterlist, return number of sg entries setup. Caller | |
384 | * must make sure sg can hold rq->nr_phys_segments entries | |
385 | */ | |
386 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
387 | struct scatterlist *sglist) | |
388 | { | |
389 | struct scatterlist *sg = NULL; | |
390 | int nsegs = 0; | |
391 | ||
392 | if (rq->bio) | |
393 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); | |
f18573ab FT |
394 | |
395 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
2e46e8b2 TH |
396 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
397 | unsigned int pad_len = | |
398 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
399 | |
400 | sg->length += pad_len; | |
401 | rq->extra_len += pad_len; | |
402 | } | |
403 | ||
2fb98e84 | 404 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
7b6d91da | 405 | if (rq->cmd_flags & REQ_WRITE) |
db0a2e00 TH |
406 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
407 | ||
d6d48196 JA |
408 | sg->page_link &= ~0x02; |
409 | sg = sg_next(sg); | |
410 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
411 | q->dma_drain_size, | |
412 | ((unsigned long)q->dma_drain_buffer) & | |
413 | (PAGE_SIZE - 1)); | |
414 | nsegs++; | |
7a85f889 | 415 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
416 | } |
417 | ||
418 | if (sg) | |
419 | sg_mark_end(sg); | |
420 | ||
421 | return nsegs; | |
422 | } | |
d6d48196 JA |
423 | EXPORT_SYMBOL(blk_rq_map_sg); |
424 | ||
d6d48196 JA |
425 | static inline int ll_new_hw_segment(struct request_queue *q, |
426 | struct request *req, | |
427 | struct bio *bio) | |
428 | { | |
d6d48196 JA |
429 | int nr_phys_segs = bio_phys_segments(q, bio); |
430 | ||
13f05c8d MP |
431 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
432 | goto no_merge; | |
433 | ||
4eaf99be | 434 | if (blk_integrity_merge_bio(q, req, bio) == false) |
13f05c8d | 435 | goto no_merge; |
d6d48196 JA |
436 | |
437 | /* | |
438 | * This will form the start of a new hw segment. Bump both | |
439 | * counters. | |
440 | */ | |
d6d48196 JA |
441 | req->nr_phys_segments += nr_phys_segs; |
442 | return 1; | |
13f05c8d MP |
443 | |
444 | no_merge: | |
445 | req->cmd_flags |= REQ_NOMERGE; | |
446 | if (req == q->last_merge) | |
447 | q->last_merge = NULL; | |
448 | return 0; | |
d6d48196 JA |
449 | } |
450 | ||
451 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
452 | struct bio *bio) | |
453 | { | |
f31dc1cd MP |
454 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
455 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
456 | req->cmd_flags |= REQ_NOMERGE; |
457 | if (req == q->last_merge) | |
458 | q->last_merge = NULL; | |
459 | return 0; | |
460 | } | |
2cdf79ca | 461 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 462 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 463 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 464 | blk_recount_segments(q, bio); |
d6d48196 JA |
465 | |
466 | return ll_new_hw_segment(q, req, bio); | |
467 | } | |
468 | ||
6728cb0e | 469 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
470 | struct bio *bio) |
471 | { | |
f31dc1cd MP |
472 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
473 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
474 | req->cmd_flags |= REQ_NOMERGE; |
475 | if (req == q->last_merge) | |
476 | q->last_merge = NULL; | |
477 | return 0; | |
478 | } | |
2cdf79ca | 479 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 480 | blk_recount_segments(q, bio); |
2cdf79ca | 481 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 482 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
483 | |
484 | return ll_new_hw_segment(q, req, bio); | |
485 | } | |
486 | ||
e7e24500 JA |
487 | /* |
488 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
489 | * does not indicate a prepared command that we cannot merge with. | |
490 | */ | |
491 | static bool req_no_special_merge(struct request *req) | |
492 | { | |
493 | struct request_queue *q = req->q; | |
494 | ||
495 | return !q->mq_ops && req->special; | |
496 | } | |
497 | ||
854fbb9c KB |
498 | static int req_gap_to_prev(struct request *req, struct request *next) |
499 | { | |
500 | struct bio *prev = req->biotail; | |
501 | ||
502 | return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1], | |
503 | next->bio->bi_io_vec[0].bv_offset); | |
504 | } | |
505 | ||
d6d48196 JA |
506 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
507 | struct request *next) | |
508 | { | |
509 | int total_phys_segments; | |
86771427 FT |
510 | unsigned int seg_size = |
511 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
512 | |
513 | /* | |
514 | * First check if the either of the requests are re-queued | |
515 | * requests. Can't merge them if they are. | |
516 | */ | |
e7e24500 | 517 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
d6d48196 JA |
518 | return 0; |
519 | ||
854fbb9c KB |
520 | if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && |
521 | req_gap_to_prev(req, next)) | |
522 | return 0; | |
523 | ||
d6d48196 JA |
524 | /* |
525 | * Will it become too large? | |
526 | */ | |
f31dc1cd MP |
527 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
528 | blk_rq_get_max_sectors(req)) | |
d6d48196 JA |
529 | return 0; |
530 | ||
531 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
532 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
533 | if (req->nr_phys_segments == 1) | |
534 | req->bio->bi_seg_front_size = seg_size; | |
535 | if (next->nr_phys_segments == 1) | |
536 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 537 | total_phys_segments--; |
86771427 | 538 | } |
d6d48196 | 539 | |
8a78362c | 540 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
541 | return 0; |
542 | ||
4eaf99be | 543 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
544 | return 0; |
545 | ||
d6d48196 JA |
546 | /* Merge is OK... */ |
547 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
548 | return 1; |
549 | } | |
550 | ||
80a761fd TH |
551 | /** |
552 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
553 | * @rq: request to mark as mixed merge | |
554 | * | |
555 | * Description: | |
556 | * @rq is about to be mixed merged. Make sure the attributes | |
557 | * which can be mixed are set in each bio and mark @rq as mixed | |
558 | * merged. | |
559 | */ | |
560 | void blk_rq_set_mixed_merge(struct request *rq) | |
561 | { | |
562 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
563 | struct bio *bio; | |
564 | ||
565 | if (rq->cmd_flags & REQ_MIXED_MERGE) | |
566 | return; | |
567 | ||
568 | /* | |
569 | * @rq will no longer represent mixable attributes for all the | |
570 | * contained bios. It will just track those of the first one. | |
571 | * Distributes the attributs to each bio. | |
572 | */ | |
573 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
574 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | |
575 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | |
576 | bio->bi_rw |= ff; | |
577 | } | |
578 | rq->cmd_flags |= REQ_MIXED_MERGE; | |
579 | } | |
580 | ||
26308eab JM |
581 | static void blk_account_io_merge(struct request *req) |
582 | { | |
583 | if (blk_do_io_stat(req)) { | |
584 | struct hd_struct *part; | |
585 | int cpu; | |
586 | ||
587 | cpu = part_stat_lock(); | |
09e099d4 | 588 | part = req->part; |
26308eab JM |
589 | |
590 | part_round_stats(cpu, part); | |
316d315b | 591 | part_dec_in_flight(part, rq_data_dir(req)); |
26308eab | 592 | |
6c23a968 | 593 | hd_struct_put(part); |
26308eab JM |
594 | part_stat_unlock(); |
595 | } | |
596 | } | |
597 | ||
d6d48196 JA |
598 | /* |
599 | * Has to be called with the request spinlock acquired | |
600 | */ | |
601 | static int attempt_merge(struct request_queue *q, struct request *req, | |
602 | struct request *next) | |
603 | { | |
604 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
605 | return 0; | |
606 | ||
f31dc1cd MP |
607 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
608 | return 0; | |
609 | ||
d6d48196 JA |
610 | /* |
611 | * not contiguous | |
612 | */ | |
83096ebf | 613 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
d6d48196 JA |
614 | return 0; |
615 | ||
616 | if (rq_data_dir(req) != rq_data_dir(next) | |
617 | || req->rq_disk != next->rq_disk | |
e7e24500 | 618 | || req_no_special_merge(next)) |
d6d48196 JA |
619 | return 0; |
620 | ||
4363ac7c MP |
621 | if (req->cmd_flags & REQ_WRITE_SAME && |
622 | !blk_write_same_mergeable(req->bio, next->bio)) | |
623 | return 0; | |
624 | ||
d6d48196 JA |
625 | /* |
626 | * If we are allowed to merge, then append bio list | |
627 | * from next to rq and release next. merge_requests_fn | |
628 | * will have updated segment counts, update sector | |
629 | * counts here. | |
630 | */ | |
631 | if (!ll_merge_requests_fn(q, req, next)) | |
632 | return 0; | |
633 | ||
80a761fd TH |
634 | /* |
635 | * If failfast settings disagree or any of the two is already | |
636 | * a mixed merge, mark both as mixed before proceeding. This | |
637 | * makes sure that all involved bios have mixable attributes | |
638 | * set properly. | |
639 | */ | |
640 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || | |
641 | (req->cmd_flags & REQ_FAILFAST_MASK) != | |
642 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
643 | blk_rq_set_mixed_merge(req); | |
644 | blk_rq_set_mixed_merge(next); | |
645 | } | |
646 | ||
d6d48196 JA |
647 | /* |
648 | * At this point we have either done a back merge | |
649 | * or front merge. We need the smaller start_time of | |
650 | * the merged requests to be the current request | |
651 | * for accounting purposes. | |
652 | */ | |
653 | if (time_after(req->start_time, next->start_time)) | |
654 | req->start_time = next->start_time; | |
655 | ||
656 | req->biotail->bi_next = next->bio; | |
657 | req->biotail = next->biotail; | |
658 | ||
a2dec7b3 | 659 | req->__data_len += blk_rq_bytes(next); |
d6d48196 JA |
660 | |
661 | elv_merge_requests(q, req, next); | |
662 | ||
42dad764 JM |
663 | /* |
664 | * 'next' is going away, so update stats accordingly | |
665 | */ | |
666 | blk_account_io_merge(next); | |
d6d48196 JA |
667 | |
668 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
669 | if (blk_rq_cpu_valid(next)) |
670 | req->cpu = next->cpu; | |
d6d48196 | 671 | |
1cd96c24 BH |
672 | /* owner-ship of bio passed from next to req */ |
673 | next->bio = NULL; | |
d6d48196 JA |
674 | __blk_put_request(q, next); |
675 | return 1; | |
676 | } | |
677 | ||
678 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
679 | { | |
680 | struct request *next = elv_latter_request(q, rq); | |
681 | ||
682 | if (next) | |
683 | return attempt_merge(q, rq, next); | |
684 | ||
685 | return 0; | |
686 | } | |
687 | ||
688 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
689 | { | |
690 | struct request *prev = elv_former_request(q, rq); | |
691 | ||
692 | if (prev) | |
693 | return attempt_merge(q, prev, rq); | |
694 | ||
695 | return 0; | |
696 | } | |
5e84ea3a JA |
697 | |
698 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
699 | struct request *next) | |
700 | { | |
701 | return attempt_merge(q, rq, next); | |
702 | } | |
050c8ea8 TH |
703 | |
704 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
705 | { | |
66cb45aa JA |
706 | struct request_queue *q = rq->q; |
707 | ||
e2a60da7 | 708 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
709 | return false; |
710 | ||
f31dc1cd MP |
711 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
712 | return false; | |
713 | ||
050c8ea8 TH |
714 | /* different data direction or already started, don't merge */ |
715 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
716 | return false; | |
717 | ||
718 | /* must be same device and not a special request */ | |
e7e24500 | 719 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
050c8ea8 TH |
720 | return false; |
721 | ||
722 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 723 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
724 | return false; |
725 | ||
4363ac7c MP |
726 | /* must be using the same buffer */ |
727 | if (rq->cmd_flags & REQ_WRITE_SAME && | |
728 | !blk_write_same_mergeable(rq->bio, bio)) | |
729 | return false; | |
730 | ||
beefa6ba JA |
731 | /* Only check gaps if the bio carries data */ |
732 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) { | |
66cb45aa JA |
733 | struct bio_vec *bprev; |
734 | ||
7ee8e4f3 | 735 | bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; |
66cb45aa JA |
736 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) |
737 | return false; | |
738 | } | |
739 | ||
050c8ea8 TH |
740 | return true; |
741 | } | |
742 | ||
743 | int blk_try_merge(struct request *rq, struct bio *bio) | |
744 | { | |
4f024f37 | 745 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
050c8ea8 | 746 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 747 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
748 | return ELEVATOR_FRONT_MERGE; |
749 | return ELEVATOR_NO_MERGE; | |
750 | } |