]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include <trace/events/block.h> | |
12 | ||
13 | #include "blk.h" | |
14 | ||
15 | static struct bio *blk_bio_discard_split(struct request_queue *q, | |
16 | struct bio *bio, | |
17 | struct bio_set *bs, | |
18 | unsigned *nsegs) | |
19 | { | |
20 | unsigned int max_discard_sectors, granularity; | |
21 | int alignment; | |
22 | sector_t tmp; | |
23 | unsigned split_sectors; | |
24 | ||
25 | *nsegs = 1; | |
26 | ||
27 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | |
28 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
29 | ||
30 | max_discard_sectors = min(q->limits.max_discard_sectors, | |
31 | bio_allowed_max_sectors(q)); | |
32 | max_discard_sectors -= max_discard_sectors % granularity; | |
33 | ||
34 | if (unlikely(!max_discard_sectors)) { | |
35 | /* XXX: warn */ | |
36 | return NULL; | |
37 | } | |
38 | ||
39 | if (bio_sectors(bio) <= max_discard_sectors) | |
40 | return NULL; | |
41 | ||
42 | split_sectors = max_discard_sectors; | |
43 | ||
44 | /* | |
45 | * If the next starting sector would be misaligned, stop the discard at | |
46 | * the previous aligned sector. | |
47 | */ | |
48 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
49 | ||
50 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
51 | tmp = sector_div(tmp, granularity); | |
52 | ||
53 | if (split_sectors > tmp) | |
54 | split_sectors -= tmp; | |
55 | ||
56 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
57 | } | |
58 | ||
59 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, | |
60 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
61 | { | |
62 | *nsegs = 1; | |
63 | ||
64 | if (!q->limits.max_write_zeroes_sectors) | |
65 | return NULL; | |
66 | ||
67 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
68 | return NULL; | |
69 | ||
70 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
71 | } | |
72 | ||
73 | static struct bio *blk_bio_write_same_split(struct request_queue *q, | |
74 | struct bio *bio, | |
75 | struct bio_set *bs, | |
76 | unsigned *nsegs) | |
77 | { | |
78 | *nsegs = 1; | |
79 | ||
80 | if (!q->limits.max_write_same_sectors) | |
81 | return NULL; | |
82 | ||
83 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
84 | return NULL; | |
85 | ||
86 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
87 | } | |
88 | ||
89 | static inline unsigned get_max_io_size(struct request_queue *q, | |
90 | struct bio *bio) | |
91 | { | |
92 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
93 | unsigned mask = queue_logical_block_size(q) - 1; | |
94 | ||
95 | /* aligned to logical block size */ | |
96 | sectors &= ~(mask >> 9); | |
97 | ||
98 | return sectors; | |
99 | } | |
100 | ||
101 | static struct bio *blk_bio_segment_split(struct request_queue *q, | |
102 | struct bio *bio, | |
103 | struct bio_set *bs, | |
104 | unsigned *segs) | |
105 | { | |
106 | struct bio_vec bv, bvprv, *bvprvp = NULL; | |
107 | struct bvec_iter iter; | |
108 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | |
109 | unsigned front_seg_size = bio->bi_seg_front_size; | |
110 | bool do_split = true; | |
111 | struct bio *new = NULL; | |
112 | const unsigned max_sectors = get_max_io_size(q, bio); | |
113 | ||
114 | bio_for_each_segment(bv, bio, iter) { | |
115 | /* | |
116 | * If the queue doesn't support SG gaps and adding this | |
117 | * offset would create a gap, disallow it. | |
118 | */ | |
119 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) | |
120 | goto split; | |
121 | ||
122 | if (sectors + (bv.bv_len >> 9) > max_sectors) { | |
123 | /* | |
124 | * Consider this a new segment if we're splitting in | |
125 | * the middle of this vector. | |
126 | */ | |
127 | if (nsegs < queue_max_segments(q) && | |
128 | sectors < max_sectors) { | |
129 | nsegs++; | |
130 | sectors = max_sectors; | |
131 | } | |
132 | if (sectors) | |
133 | goto split; | |
134 | /* Make this single bvec as the 1st segment */ | |
135 | } | |
136 | ||
137 | if (bvprvp && blk_queue_cluster(q)) { | |
138 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | |
139 | goto new_segment; | |
140 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) | |
141 | goto new_segment; | |
142 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) | |
143 | goto new_segment; | |
144 | ||
145 | seg_size += bv.bv_len; | |
146 | bvprv = bv; | |
147 | bvprvp = &bvprv; | |
148 | sectors += bv.bv_len >> 9; | |
149 | ||
150 | if (nsegs == 1 && seg_size > front_seg_size) | |
151 | front_seg_size = seg_size; | |
152 | continue; | |
153 | } | |
154 | new_segment: | |
155 | if (nsegs == queue_max_segments(q)) | |
156 | goto split; | |
157 | ||
158 | nsegs++; | |
159 | bvprv = bv; | |
160 | bvprvp = &bvprv; | |
161 | seg_size = bv.bv_len; | |
162 | sectors += bv.bv_len >> 9; | |
163 | ||
164 | if (nsegs == 1 && seg_size > front_seg_size) | |
165 | front_seg_size = seg_size; | |
166 | } | |
167 | ||
168 | do_split = false; | |
169 | split: | |
170 | *segs = nsegs; | |
171 | ||
172 | if (do_split) { | |
173 | new = bio_split(bio, sectors, GFP_NOIO, bs); | |
174 | if (new) | |
175 | bio = new; | |
176 | } | |
177 | ||
178 | bio->bi_seg_front_size = front_seg_size; | |
179 | if (seg_size > bio->bi_seg_back_size) | |
180 | bio->bi_seg_back_size = seg_size; | |
181 | ||
182 | return do_split ? new : NULL; | |
183 | } | |
184 | ||
185 | void blk_queue_split(struct request_queue *q, struct bio **bio) | |
186 | { | |
187 | struct bio *split, *res; | |
188 | unsigned nsegs; | |
189 | ||
190 | switch (bio_op(*bio)) { | |
191 | case REQ_OP_DISCARD: | |
192 | case REQ_OP_SECURE_ERASE: | |
193 | split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs); | |
194 | break; | |
195 | case REQ_OP_WRITE_ZEROES: | |
196 | split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs); | |
197 | break; | |
198 | case REQ_OP_WRITE_SAME: | |
199 | split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs); | |
200 | break; | |
201 | default: | |
202 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); | |
203 | break; | |
204 | } | |
205 | ||
206 | /* physical segments can be figured out during splitting */ | |
207 | res = split ? split : *bio; | |
208 | res->bi_phys_segments = nsegs; | |
209 | bio_set_flag(res, BIO_SEG_VALID); | |
210 | ||
211 | if (split) { | |
212 | /* there isn't chance to merge the splitted bio */ | |
213 | split->bi_opf |= REQ_NOMERGE; | |
214 | ||
215 | bio_chain(split, *bio); | |
216 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); | |
217 | generic_make_request(*bio); | |
218 | *bio = split; | |
219 | } | |
220 | } | |
221 | EXPORT_SYMBOL(blk_queue_split); | |
222 | ||
223 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |
224 | struct bio *bio, | |
225 | bool no_sg_merge) | |
226 | { | |
227 | struct bio_vec bv, bvprv = { NULL }; | |
228 | int cluster, prev = 0; | |
229 | unsigned int seg_size, nr_phys_segs; | |
230 | struct bio *fbio, *bbio; | |
231 | struct bvec_iter iter; | |
232 | ||
233 | if (!bio) | |
234 | return 0; | |
235 | ||
236 | switch (bio_op(bio)) { | |
237 | case REQ_OP_DISCARD: | |
238 | case REQ_OP_SECURE_ERASE: | |
239 | case REQ_OP_WRITE_ZEROES: | |
240 | return 0; | |
241 | case REQ_OP_WRITE_SAME: | |
242 | return 1; | |
243 | } | |
244 | ||
245 | fbio = bio; | |
246 | cluster = blk_queue_cluster(q); | |
247 | seg_size = 0; | |
248 | nr_phys_segs = 0; | |
249 | for_each_bio(bio) { | |
250 | bio_for_each_segment(bv, bio, iter) { | |
251 | /* | |
252 | * If SG merging is disabled, each bio vector is | |
253 | * a segment | |
254 | */ | |
255 | if (no_sg_merge) | |
256 | goto new_segment; | |
257 | ||
258 | if (prev && cluster) { | |
259 | if (seg_size + bv.bv_len | |
260 | > queue_max_segment_size(q)) | |
261 | goto new_segment; | |
262 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | |
263 | goto new_segment; | |
264 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | |
265 | goto new_segment; | |
266 | ||
267 | seg_size += bv.bv_len; | |
268 | bvprv = bv; | |
269 | continue; | |
270 | } | |
271 | new_segment: | |
272 | if (nr_phys_segs == 1 && seg_size > | |
273 | fbio->bi_seg_front_size) | |
274 | fbio->bi_seg_front_size = seg_size; | |
275 | ||
276 | nr_phys_segs++; | |
277 | bvprv = bv; | |
278 | prev = 1; | |
279 | seg_size = bv.bv_len; | |
280 | } | |
281 | bbio = bio; | |
282 | } | |
283 | ||
284 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) | |
285 | fbio->bi_seg_front_size = seg_size; | |
286 | if (seg_size > bbio->bi_seg_back_size) | |
287 | bbio->bi_seg_back_size = seg_size; | |
288 | ||
289 | return nr_phys_segs; | |
290 | } | |
291 | ||
292 | void blk_recalc_rq_segments(struct request *rq) | |
293 | { | |
294 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, | |
295 | &rq->q->queue_flags); | |
296 | ||
297 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | |
298 | no_sg_merge); | |
299 | } | |
300 | ||
301 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
302 | { | |
303 | unsigned short seg_cnt; | |
304 | ||
305 | /* estimate segment number by bi_vcnt for non-cloned bio */ | |
306 | if (bio_flagged(bio, BIO_CLONED)) | |
307 | seg_cnt = bio_segments(bio); | |
308 | else | |
309 | seg_cnt = bio->bi_vcnt; | |
310 | ||
311 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && | |
312 | (seg_cnt < queue_max_segments(q))) | |
313 | bio->bi_phys_segments = seg_cnt; | |
314 | else { | |
315 | struct bio *nxt = bio->bi_next; | |
316 | ||
317 | bio->bi_next = NULL; | |
318 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); | |
319 | bio->bi_next = nxt; | |
320 | } | |
321 | ||
322 | bio_set_flag(bio, BIO_SEG_VALID); | |
323 | } | |
324 | EXPORT_SYMBOL(blk_recount_segments); | |
325 | ||
326 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
327 | struct bio *nxt) | |
328 | { | |
329 | struct bio_vec end_bv = { NULL }, nxt_bv; | |
330 | ||
331 | if (!blk_queue_cluster(q)) | |
332 | return 0; | |
333 | ||
334 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > | |
335 | queue_max_segment_size(q)) | |
336 | return 0; | |
337 | ||
338 | if (!bio_has_data(bio)) | |
339 | return 1; | |
340 | ||
341 | bio_get_last_bvec(bio, &end_bv); | |
342 | bio_get_first_bvec(nxt, &nxt_bv); | |
343 | ||
344 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | |
345 | return 0; | |
346 | ||
347 | /* | |
348 | * bio and nxt are contiguous in memory; check if the queue allows | |
349 | * these two to be merged into one | |
350 | */ | |
351 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) | |
352 | return 1; | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | static inline void | |
358 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | |
359 | struct scatterlist *sglist, struct bio_vec *bvprv, | |
360 | struct scatterlist **sg, int *nsegs, int *cluster) | |
361 | { | |
362 | ||
363 | int nbytes = bvec->bv_len; | |
364 | ||
365 | if (*sg && *cluster) { | |
366 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | |
367 | goto new_segment; | |
368 | ||
369 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | |
370 | goto new_segment; | |
371 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | |
372 | goto new_segment; | |
373 | ||
374 | (*sg)->length += nbytes; | |
375 | } else { | |
376 | new_segment: | |
377 | if (!*sg) | |
378 | *sg = sglist; | |
379 | else { | |
380 | /* | |
381 | * If the driver previously mapped a shorter | |
382 | * list, we could see a termination bit | |
383 | * prematurely unless it fully inits the sg | |
384 | * table on each mapping. We KNOW that there | |
385 | * must be more entries here or the driver | |
386 | * would be buggy, so force clear the | |
387 | * termination bit to avoid doing a full | |
388 | * sg_init_table() in drivers for each command. | |
389 | */ | |
390 | sg_unmark_end(*sg); | |
391 | *sg = sg_next(*sg); | |
392 | } | |
393 | ||
394 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
395 | (*nsegs)++; | |
396 | } | |
397 | *bvprv = *bvec; | |
398 | } | |
399 | ||
400 | static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, | |
401 | struct scatterlist *sglist, struct scatterlist **sg) | |
402 | { | |
403 | *sg = sglist; | |
404 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
405 | return 1; | |
406 | } | |
407 | ||
408 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, | |
409 | struct scatterlist *sglist, | |
410 | struct scatterlist **sg) | |
411 | { | |
412 | struct bio_vec bvec, bvprv = { NULL }; | |
413 | struct bvec_iter iter; | |
414 | int cluster = blk_queue_cluster(q), nsegs = 0; | |
415 | ||
416 | for_each_bio(bio) | |
417 | bio_for_each_segment(bvec, bio, iter) | |
418 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | |
419 | &nsegs, &cluster); | |
420 | ||
421 | return nsegs; | |
422 | } | |
423 | ||
424 | /* | |
425 | * map a request to scatterlist, return number of sg entries setup. Caller | |
426 | * must make sure sg can hold rq->nr_phys_segments entries | |
427 | */ | |
428 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
429 | struct scatterlist *sglist) | |
430 | { | |
431 | struct scatterlist *sg = NULL; | |
432 | int nsegs = 0; | |
433 | ||
434 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | |
435 | nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); | |
436 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) | |
437 | nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); | |
438 | else if (rq->bio) | |
439 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); | |
440 | ||
441 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && | |
442 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { | |
443 | unsigned int pad_len = | |
444 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
445 | ||
446 | sg->length += pad_len; | |
447 | rq->extra_len += pad_len; | |
448 | } | |
449 | ||
450 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { | |
451 | if (op_is_write(req_op(rq))) | |
452 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | |
453 | ||
454 | sg_unmark_end(sg); | |
455 | sg = sg_next(sg); | |
456 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
457 | q->dma_drain_size, | |
458 | ((unsigned long)q->dma_drain_buffer) & | |
459 | (PAGE_SIZE - 1)); | |
460 | nsegs++; | |
461 | rq->extra_len += q->dma_drain_size; | |
462 | } | |
463 | ||
464 | if (sg) | |
465 | sg_mark_end(sg); | |
466 | ||
467 | /* | |
468 | * Something must have been wrong if the figured number of | |
469 | * segment is bigger than number of req's physical segments | |
470 | */ | |
471 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); | |
472 | ||
473 | return nsegs; | |
474 | } | |
475 | EXPORT_SYMBOL(blk_rq_map_sg); | |
476 | ||
477 | static inline int ll_new_hw_segment(struct request_queue *q, | |
478 | struct request *req, | |
479 | struct bio *bio) | |
480 | { | |
481 | int nr_phys_segs = bio_phys_segments(q, bio); | |
482 | ||
483 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) | |
484 | goto no_merge; | |
485 | ||
486 | if (blk_integrity_merge_bio(q, req, bio) == false) | |
487 | goto no_merge; | |
488 | ||
489 | /* | |
490 | * This will form the start of a new hw segment. Bump both | |
491 | * counters. | |
492 | */ | |
493 | req->nr_phys_segments += nr_phys_segs; | |
494 | return 1; | |
495 | ||
496 | no_merge: | |
497 | req_set_nomerge(q, req); | |
498 | return 0; | |
499 | } | |
500 | ||
501 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
502 | struct bio *bio) | |
503 | { | |
504 | if (req_gap_back_merge(req, bio)) | |
505 | return 0; | |
506 | if (blk_integrity_rq(req) && | |
507 | integrity_req_gap_back_merge(req, bio)) | |
508 | return 0; | |
509 | if (blk_rq_sectors(req) + bio_sectors(bio) > | |
510 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { | |
511 | req_set_nomerge(q, req); | |
512 | return 0; | |
513 | } | |
514 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) | |
515 | blk_recount_segments(q, req->biotail); | |
516 | if (!bio_flagged(bio, BIO_SEG_VALID)) | |
517 | blk_recount_segments(q, bio); | |
518 | ||
519 | return ll_new_hw_segment(q, req, bio); | |
520 | } | |
521 | ||
522 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | |
523 | struct bio *bio) | |
524 | { | |
525 | ||
526 | if (req_gap_front_merge(req, bio)) | |
527 | return 0; | |
528 | if (blk_integrity_rq(req) && | |
529 | integrity_req_gap_front_merge(req, bio)) | |
530 | return 0; | |
531 | if (blk_rq_sectors(req) + bio_sectors(bio) > | |
532 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { | |
533 | req_set_nomerge(q, req); | |
534 | return 0; | |
535 | } | |
536 | if (!bio_flagged(bio, BIO_SEG_VALID)) | |
537 | blk_recount_segments(q, bio); | |
538 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) | |
539 | blk_recount_segments(q, req->bio); | |
540 | ||
541 | return ll_new_hw_segment(q, req, bio); | |
542 | } | |
543 | ||
544 | /* | |
545 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
546 | * does not indicate a prepared command that we cannot merge with. | |
547 | */ | |
548 | static bool req_no_special_merge(struct request *req) | |
549 | { | |
550 | struct request_queue *q = req->q; | |
551 | ||
552 | return !q->mq_ops && req->special; | |
553 | } | |
554 | ||
555 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, | |
556 | struct request *next) | |
557 | { | |
558 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
559 | ||
560 | if (segments >= queue_max_discard_segments(q)) | |
561 | goto no_merge; | |
562 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
563 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
564 | goto no_merge; | |
565 | ||
566 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
567 | return true; | |
568 | no_merge: | |
569 | req_set_nomerge(q, req); | |
570 | return false; | |
571 | } | |
572 | ||
573 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |
574 | struct request *next) | |
575 | { | |
576 | int total_phys_segments; | |
577 | unsigned int seg_size = | |
578 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
579 | ||
580 | /* | |
581 | * First check if the either of the requests are re-queued | |
582 | * requests. Can't merge them if they are. | |
583 | */ | |
584 | if (req_no_special_merge(req) || req_no_special_merge(next)) | |
585 | return 0; | |
586 | ||
587 | if (req_gap_back_merge(req, next->bio)) | |
588 | return 0; | |
589 | ||
590 | /* | |
591 | * Will it become too large? | |
592 | */ | |
593 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > | |
594 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
595 | return 0; | |
596 | ||
597 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
598 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { | |
599 | if (req->nr_phys_segments == 1) | |
600 | req->bio->bi_seg_front_size = seg_size; | |
601 | if (next->nr_phys_segments == 1) | |
602 | next->biotail->bi_seg_back_size = seg_size; | |
603 | total_phys_segments--; | |
604 | } | |
605 | ||
606 | if (total_phys_segments > queue_max_segments(q)) | |
607 | return 0; | |
608 | ||
609 | if (blk_integrity_merge_rq(q, req, next) == false) | |
610 | return 0; | |
611 | ||
612 | /* Merge is OK... */ | |
613 | req->nr_phys_segments = total_phys_segments; | |
614 | return 1; | |
615 | } | |
616 | ||
617 | /** | |
618 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
619 | * @rq: request to mark as mixed merge | |
620 | * | |
621 | * Description: | |
622 | * @rq is about to be mixed merged. Make sure the attributes | |
623 | * which can be mixed are set in each bio and mark @rq as mixed | |
624 | * merged. | |
625 | */ | |
626 | void blk_rq_set_mixed_merge(struct request *rq) | |
627 | { | |
628 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
629 | struct bio *bio; | |
630 | ||
631 | if (rq->rq_flags & RQF_MIXED_MERGE) | |
632 | return; | |
633 | ||
634 | /* | |
635 | * @rq will no longer represent mixable attributes for all the | |
636 | * contained bios. It will just track those of the first one. | |
637 | * Distributes the attributs to each bio. | |
638 | */ | |
639 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
640 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && | |
641 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
642 | bio->bi_opf |= ff; | |
643 | } | |
644 | rq->rq_flags |= RQF_MIXED_MERGE; | |
645 | } | |
646 | ||
647 | static void blk_account_io_merge(struct request *req) | |
648 | { | |
649 | if (blk_do_io_stat(req)) { | |
650 | struct hd_struct *part; | |
651 | int cpu; | |
652 | ||
653 | cpu = part_stat_lock(); | |
654 | part = req->part; | |
655 | ||
656 | part_round_stats(req->q, cpu, part); | |
657 | part_dec_in_flight(req->q, part, rq_data_dir(req)); | |
658 | ||
659 | hd_struct_put(part); | |
660 | part_stat_unlock(); | |
661 | } | |
662 | } | |
663 | /* | |
664 | * Two cases of handling DISCARD merge: | |
665 | * If max_discard_segments > 1, the driver takes every bio | |
666 | * as a range and send them to controller together. The ranges | |
667 | * needn't to be contiguous. | |
668 | * Otherwise, the bios/requests will be handled as same as | |
669 | * others which should be contiguous. | |
670 | */ | |
671 | static inline bool blk_discard_mergable(struct request *req) | |
672 | { | |
673 | if (req_op(req) == REQ_OP_DISCARD && | |
674 | queue_max_discard_segments(req->q) > 1) | |
675 | return true; | |
676 | return false; | |
677 | } | |
678 | ||
679 | enum elv_merge blk_try_req_merge(struct request *req, struct request *next) | |
680 | { | |
681 | if (blk_discard_mergable(req)) | |
682 | return ELEVATOR_DISCARD_MERGE; | |
683 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | |
684 | return ELEVATOR_BACK_MERGE; | |
685 | ||
686 | return ELEVATOR_NO_MERGE; | |
687 | } | |
688 | ||
689 | /* | |
690 | * For non-mq, this has to be called with the request spinlock acquired. | |
691 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
692 | */ | |
693 | static struct request *attempt_merge(struct request_queue *q, | |
694 | struct request *req, struct request *next) | |
695 | { | |
696 | if (!q->mq_ops) | |
697 | lockdep_assert_held(q->queue_lock); | |
698 | ||
699 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
700 | return NULL; | |
701 | ||
702 | if (req_op(req) != req_op(next)) | |
703 | return NULL; | |
704 | ||
705 | if (rq_data_dir(req) != rq_data_dir(next) | |
706 | || req->rq_disk != next->rq_disk | |
707 | || req_no_special_merge(next)) | |
708 | return NULL; | |
709 | ||
710 | if (req_op(req) == REQ_OP_WRITE_SAME && | |
711 | !blk_write_same_mergeable(req->bio, next->bio)) | |
712 | return NULL; | |
713 | ||
714 | /* | |
715 | * Don't allow merge of different write hints, or for a hint with | |
716 | * non-hint IO. | |
717 | */ | |
718 | if (req->write_hint != next->write_hint) | |
719 | return NULL; | |
720 | ||
721 | /* | |
722 | * If we are allowed to merge, then append bio list | |
723 | * from next to rq and release next. merge_requests_fn | |
724 | * will have updated segment counts, update sector | |
725 | * counts here. Handle DISCARDs separately, as they | |
726 | * have separate settings. | |
727 | */ | |
728 | ||
729 | switch (blk_try_req_merge(req, next)) { | |
730 | case ELEVATOR_DISCARD_MERGE: | |
731 | if (!req_attempt_discard_merge(q, req, next)) | |
732 | return NULL; | |
733 | break; | |
734 | case ELEVATOR_BACK_MERGE: | |
735 | if (!ll_merge_requests_fn(q, req, next)) | |
736 | return NULL; | |
737 | break; | |
738 | default: | |
739 | return NULL; | |
740 | } | |
741 | ||
742 | /* | |
743 | * If failfast settings disagree or any of the two is already | |
744 | * a mixed merge, mark both as mixed before proceeding. This | |
745 | * makes sure that all involved bios have mixable attributes | |
746 | * set properly. | |
747 | */ | |
748 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || | |
749 | (req->cmd_flags & REQ_FAILFAST_MASK) != | |
750 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
751 | blk_rq_set_mixed_merge(req); | |
752 | blk_rq_set_mixed_merge(next); | |
753 | } | |
754 | ||
755 | /* | |
756 | * At this point we have either done a back merge | |
757 | * or front merge. We need the smaller start_time of | |
758 | * the merged requests to be the current request | |
759 | * for accounting purposes. | |
760 | */ | |
761 | if (time_after(req->start_time, next->start_time)) | |
762 | req->start_time = next->start_time; | |
763 | ||
764 | req->biotail->bi_next = next->bio; | |
765 | req->biotail = next->biotail; | |
766 | ||
767 | req->__data_len += blk_rq_bytes(next); | |
768 | ||
769 | if (!blk_discard_mergable(req)) | |
770 | elv_merge_requests(q, req, next); | |
771 | ||
772 | /* | |
773 | * 'next' is going away, so update stats accordingly | |
774 | */ | |
775 | blk_account_io_merge(next); | |
776 | ||
777 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
778 | if (blk_rq_cpu_valid(next)) | |
779 | req->cpu = next->cpu; | |
780 | ||
781 | /* | |
782 | * ownership of bio passed from next to req, return 'next' for | |
783 | * the caller to free | |
784 | */ | |
785 | next->bio = NULL; | |
786 | return next; | |
787 | } | |
788 | ||
789 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) | |
790 | { | |
791 | struct request *next = elv_latter_request(q, rq); | |
792 | ||
793 | if (next) | |
794 | return attempt_merge(q, rq, next); | |
795 | ||
796 | return NULL; | |
797 | } | |
798 | ||
799 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) | |
800 | { | |
801 | struct request *prev = elv_former_request(q, rq); | |
802 | ||
803 | if (prev) | |
804 | return attempt_merge(q, prev, rq); | |
805 | ||
806 | return NULL; | |
807 | } | |
808 | ||
809 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
810 | struct request *next) | |
811 | { | |
812 | struct elevator_queue *e = q->elevator; | |
813 | struct request *free; | |
814 | ||
815 | if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) | |
816 | if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) | |
817 | return 0; | |
818 | ||
819 | free = attempt_merge(q, rq, next); | |
820 | if (free) { | |
821 | __blk_put_request(q, free); | |
822 | return 1; | |
823 | } | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
828 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
829 | { | |
830 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) | |
831 | return false; | |
832 | ||
833 | if (req_op(rq) != bio_op(bio)) | |
834 | return false; | |
835 | ||
836 | /* different data direction or already started, don't merge */ | |
837 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
838 | return false; | |
839 | ||
840 | /* must be same device and not a special request */ | |
841 | if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) | |
842 | return false; | |
843 | ||
844 | /* only merge integrity protected bio into ditto rq */ | |
845 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) | |
846 | return false; | |
847 | ||
848 | /* must be using the same buffer */ | |
849 | if (req_op(rq) == REQ_OP_WRITE_SAME && | |
850 | !blk_write_same_mergeable(rq->bio, bio)) | |
851 | return false; | |
852 | ||
853 | /* | |
854 | * Don't allow merge of different write hints, or for a hint with | |
855 | * non-hint IO. | |
856 | */ | |
857 | if (rq->write_hint != bio->bi_write_hint) | |
858 | return false; | |
859 | ||
860 | return true; | |
861 | } | |
862 | ||
863 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) | |
864 | { | |
865 | if (blk_discard_mergable(rq)) | |
866 | return ELEVATOR_DISCARD_MERGE; | |
867 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
868 | return ELEVATOR_BACK_MERGE; | |
869 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) | |
870 | return ELEVATOR_FRONT_MERGE; | |
871 | return ELEVATOR_NO_MERGE; | |
872 | } |