]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - block/blk-merge.c
dm rq: leverage blk_mq_queue_busy() to check for outstanding IO
[mirror_ubuntu-jammy-kernel.git] / block / blk-merge.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d6d48196
JA
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
cda22646
MK
11#include <trace/events/block.h>
12
d6d48196
JA
13#include "blk.h"
14
e9907009
CH
15/*
16 * Check if the two bvecs from two bios can be merged to one segment. If yes,
17 * no need to check gap between the two bios since the 1st bio and the 1st bvec
18 * in the 2nd bio can be handled in one segment.
19 */
20static inline bool bios_segs_mergeable(struct request_queue *q,
21 struct bio *prev, struct bio_vec *prev_last_bv,
22 struct bio_vec *next_first_bv)
23{
3dccdae5 24 if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
e9907009
CH
25 return false;
26 if (prev->bi_seg_back_size + next_first_bv->bv_len >
27 queue_max_segment_size(q))
28 return false;
29 return true;
30}
31
32static inline bool bio_will_gap(struct request_queue *q,
33 struct request *prev_rq, struct bio *prev, struct bio *next)
34{
35 struct bio_vec pb, nb;
36
37 if (!bio_has_data(prev) || !queue_virt_boundary(q))
38 return false;
39
40 /*
41 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
42 * is quite difficult to respect the sg gap limit. We work hard to
43 * merge a huge number of small single bios in case of mkfs.
44 */
45 if (prev_rq)
46 bio_get_first_bvec(prev_rq->bio, &pb);
47 else
48 bio_get_first_bvec(prev, &pb);
df376b2e 49 if (pb.bv_offset & queue_virt_boundary(q))
e9907009
CH
50 return true;
51
52 /*
53 * We don't need to worry about the situation that the merged segment
54 * ends in unaligned virt boundary:
55 *
56 * - if 'pb' ends aligned, the merged segment ends aligned
57 * - if 'pb' ends unaligned, the next bio must include
58 * one single bvec of 'nb', otherwise the 'nb' can't
59 * merge with 'pb'
60 */
61 bio_get_last_bvec(prev, &pb);
62 bio_get_first_bvec(next, &nb);
63 if (bios_segs_mergeable(q, prev, &pb, &nb))
64 return false;
65 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
66}
67
68static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
69{
70 return bio_will_gap(req->q, req, req->biotail, bio);
71}
72
73static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
74{
75 return bio_will_gap(req->q, NULL, bio, req->bio);
76}
77
54efd50b
KO
78static struct bio *blk_bio_discard_split(struct request_queue *q,
79 struct bio *bio,
bdced438
ML
80 struct bio_set *bs,
81 unsigned *nsegs)
54efd50b
KO
82{
83 unsigned int max_discard_sectors, granularity;
84 int alignment;
85 sector_t tmp;
86 unsigned split_sectors;
87
bdced438
ML
88 *nsegs = 1;
89
54efd50b
KO
90 /* Zero-sector (unknown) and one-sector granularities are the same. */
91 granularity = max(q->limits.discard_granularity >> 9, 1U);
92
1adfc5e4
ML
93 max_discard_sectors = min(q->limits.max_discard_sectors,
94 bio_allowed_max_sectors(q));
54efd50b
KO
95 max_discard_sectors -= max_discard_sectors % granularity;
96
97 if (unlikely(!max_discard_sectors)) {
98 /* XXX: warn */
99 return NULL;
100 }
101
102 if (bio_sectors(bio) <= max_discard_sectors)
103 return NULL;
104
105 split_sectors = max_discard_sectors;
106
107 /*
108 * If the next starting sector would be misaligned, stop the discard at
109 * the previous aligned sector.
110 */
111 alignment = (q->limits.discard_alignment >> 9) % granularity;
112
113 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
114 tmp = sector_div(tmp, granularity);
115
116 if (split_sectors > tmp)
117 split_sectors -= tmp;
118
119 return bio_split(bio, split_sectors, GFP_NOIO, bs);
120}
121
885fa13f
CH
122static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
123 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
124{
125 *nsegs = 1;
126
127 if (!q->limits.max_write_zeroes_sectors)
128 return NULL;
129
130 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
131 return NULL;
132
133 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
134}
135
54efd50b
KO
136static struct bio *blk_bio_write_same_split(struct request_queue *q,
137 struct bio *bio,
bdced438
ML
138 struct bio_set *bs,
139 unsigned *nsegs)
54efd50b 140{
bdced438
ML
141 *nsegs = 1;
142
54efd50b
KO
143 if (!q->limits.max_write_same_sectors)
144 return NULL;
145
146 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
147 return NULL;
148
149 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
150}
151
d0e5fbb0
ML
152static inline unsigned get_max_io_size(struct request_queue *q,
153 struct bio *bio)
154{
155 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
156 unsigned mask = queue_logical_block_size(q) - 1;
157
158 /* aligned to logical block size */
159 sectors &= ~(mask >> 9);
160
161 return sectors;
162}
163
54efd50b
KO
164static struct bio *blk_bio_segment_split(struct request_queue *q,
165 struct bio *bio,
bdced438
ML
166 struct bio_set *bs,
167 unsigned *segs)
54efd50b 168{
5014c311 169 struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50b 170 struct bvec_iter iter;
8ae12666 171 unsigned seg_size = 0, nsegs = 0, sectors = 0;
02e70742
ML
172 unsigned front_seg_size = bio->bi_seg_front_size;
173 bool do_split = true;
174 struct bio *new = NULL;
d0e5fbb0 175 const unsigned max_sectors = get_max_io_size(q, bio);
54efd50b 176
54efd50b 177 bio_for_each_segment(bv, bio, iter) {
54efd50b
KO
178 /*
179 * If the queue doesn't support SG gaps and adding this
180 * offset would create a gap, disallow it.
181 */
5014c311 182 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50b
KO
183 goto split;
184
d0e5fbb0 185 if (sectors + (bv.bv_len >> 9) > max_sectors) {
e36f6204
KB
186 /*
187 * Consider this a new segment if we're splitting in
188 * the middle of this vector.
189 */
190 if (nsegs < queue_max_segments(q) &&
d0e5fbb0 191 sectors < max_sectors) {
e36f6204 192 nsegs++;
d0e5fbb0 193 sectors = max_sectors;
e36f6204 194 }
cf8c0c6a 195 goto split;
e36f6204
KB
196 }
197
5014c311 198 if (bvprvp && blk_queue_cluster(q)) {
b4b6cb61
ML
199 if (seg_size + bv.bv_len > queue_max_segment_size(q))
200 goto new_segment;
3dccdae5 201 if (!biovec_phys_mergeable(q, bvprvp, &bv))
54efd50b
KO
202 goto new_segment;
203
204 seg_size += bv.bv_len;
205 bvprv = bv;
578270bf 206 bvprvp = &bvprv;
52cc6eea 207 sectors += bv.bv_len >> 9;
a88d32af 208
54efd50b
KO
209 continue;
210 }
211new_segment:
212 if (nsegs == queue_max_segments(q))
213 goto split;
214
6a501bf0
ML
215 if (nsegs == 1 && seg_size > front_seg_size)
216 front_seg_size = seg_size;
217
54efd50b
KO
218 nsegs++;
219 bvprv = bv;
578270bf 220 bvprvp = &bvprv;
54efd50b 221 seg_size = bv.bv_len;
52cc6eea 222 sectors += bv.bv_len >> 9;
02e70742 223
54efd50b
KO
224 }
225
02e70742 226 do_split = false;
54efd50b 227split:
bdced438 228 *segs = nsegs;
02e70742
ML
229
230 if (do_split) {
231 new = bio_split(bio, sectors, GFP_NOIO, bs);
232 if (new)
233 bio = new;
234 }
235
6a501bf0
ML
236 if (nsegs == 1 && seg_size > front_seg_size)
237 front_seg_size = seg_size;
02e70742
ML
238 bio->bi_seg_front_size = front_seg_size;
239 if (seg_size > bio->bi_seg_back_size)
240 bio->bi_seg_back_size = seg_size;
241
242 return do_split ? new : NULL;
54efd50b
KO
243}
244
af67c31f 245void blk_queue_split(struct request_queue *q, struct bio **bio)
54efd50b 246{
bdced438
ML
247 struct bio *split, *res;
248 unsigned nsegs;
54efd50b 249
7afafc8a
AH
250 switch (bio_op(*bio)) {
251 case REQ_OP_DISCARD:
252 case REQ_OP_SECURE_ERASE:
338aa96d 253 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a 254 break;
a6f0788e 255 case REQ_OP_WRITE_ZEROES:
338aa96d 256 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
a6f0788e 257 break;
7afafc8a 258 case REQ_OP_WRITE_SAME:
338aa96d 259 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a
AH
260 break;
261 default:
338aa96d 262 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a
AH
263 break;
264 }
bdced438
ML
265
266 /* physical segments can be figured out during splitting */
267 res = split ? split : *bio;
268 res->bi_phys_segments = nsegs;
269 bio_set_flag(res, BIO_SEG_VALID);
54efd50b
KO
270
271 if (split) {
6ac45aeb 272 /* there isn't chance to merge the splitted bio */
1eff9d32 273 split->bi_opf |= REQ_NOMERGE;
6ac45aeb 274
cd4a4ae4
JA
275 /*
276 * Since we're recursing into make_request here, ensure
277 * that we mark this bio as already having entered the queue.
278 * If not, and the queue is going away, we can get stuck
279 * forever on waiting for the queue reference to drop. But
280 * that will never happen, as we're already holding a
281 * reference to it.
282 */
283 bio_set_flag(*bio, BIO_QUEUE_ENTERED);
284
54efd50b 285 bio_chain(split, *bio);
cda22646 286 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50b
KO
287 generic_make_request(*bio);
288 *bio = split;
289 }
290}
291EXPORT_SYMBOL(blk_queue_split);
292
1e428079 293static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
07388549
ML
294 struct bio *bio,
295 bool no_sg_merge)
d6d48196 296{
7988613b 297 struct bio_vec bv, bvprv = { NULL };
54efd50b 298 int cluster, prev = 0;
1e428079 299 unsigned int seg_size, nr_phys_segs;
59247eae 300 struct bio *fbio, *bbio;
7988613b 301 struct bvec_iter iter;
d6d48196 302
1e428079
JA
303 if (!bio)
304 return 0;
d6d48196 305
a6f0788e
CK
306 switch (bio_op(bio)) {
307 case REQ_OP_DISCARD:
308 case REQ_OP_SECURE_ERASE:
a6f0788e 309 case REQ_OP_WRITE_ZEROES:
f9d03f96
CH
310 return 0;
311 case REQ_OP_WRITE_SAME:
5cb8850c 312 return 1;
a6f0788e 313 }
5cb8850c 314
1e428079 315 fbio = bio;
e692cb66 316 cluster = blk_queue_cluster(q);
5df97b91 317 seg_size = 0;
2c8919de 318 nr_phys_segs = 0;
1e428079 319 for_each_bio(bio) {
7988613b 320 bio_for_each_segment(bv, bio, iter) {
05f1dd53
JA
321 /*
322 * If SG merging is disabled, each bio vector is
323 * a segment
324 */
325 if (no_sg_merge)
326 goto new_segment;
327
54efd50b 328 if (prev && cluster) {
7988613b 329 if (seg_size + bv.bv_len
ae03bf63 330 > queue_max_segment_size(q))
1e428079 331 goto new_segment;
3dccdae5 332 if (!biovec_phys_mergeable(q, &bvprv, &bv))
1e428079 333 goto new_segment;
d6d48196 334
7988613b 335 seg_size += bv.bv_len;
1e428079
JA
336 bvprv = bv;
337 continue;
338 }
d6d48196 339new_segment:
1e428079
JA
340 if (nr_phys_segs == 1 && seg_size >
341 fbio->bi_seg_front_size)
342 fbio->bi_seg_front_size = seg_size;
86771427 343
1e428079
JA
344 nr_phys_segs++;
345 bvprv = bv;
54efd50b 346 prev = 1;
7988613b 347 seg_size = bv.bv_len;
1e428079 348 }
59247eae 349 bbio = bio;
d6d48196
JA
350 }
351
59247eae
JA
352 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
353 fbio->bi_seg_front_size = seg_size;
354 if (seg_size > bbio->bi_seg_back_size)
355 bbio->bi_seg_back_size = seg_size;
1e428079
JA
356
357 return nr_phys_segs;
358}
359
360void blk_recalc_rq_segments(struct request *rq)
361{
07388549
ML
362 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
363 &rq->q->queue_flags);
364
365 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
366 no_sg_merge);
d6d48196
JA
367}
368
369void blk_recount_segments(struct request_queue *q, struct bio *bio)
370{
7f60dcaa
ML
371 unsigned short seg_cnt;
372
373 /* estimate segment number by bi_vcnt for non-cloned bio */
374 if (bio_flagged(bio, BIO_CLONED))
375 seg_cnt = bio_segments(bio);
376 else
377 seg_cnt = bio->bi_vcnt;
764f612c 378
7f60dcaa
ML
379 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
380 (seg_cnt < queue_max_segments(q)))
381 bio->bi_phys_segments = seg_cnt;
05f1dd53
JA
382 else {
383 struct bio *nxt = bio->bi_next;
384
385 bio->bi_next = NULL;
7f60dcaa 386 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd53
JA
387 bio->bi_next = nxt;
388 }
1e428079 389
b7c44ed9 390 bio_set_flag(bio, BIO_SEG_VALID);
d6d48196
JA
391}
392EXPORT_SYMBOL(blk_recount_segments);
393
394static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
395 struct bio *nxt)
396{
2b8221e1 397 struct bio_vec end_bv = { NULL }, nxt_bv;
f619d254 398
e692cb66 399 if (!blk_queue_cluster(q))
d6d48196
JA
400 return 0;
401
86771427 402 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf63 403 queue_max_segment_size(q))
d6d48196
JA
404 return 0;
405
e17fc0a1
DW
406 if (!bio_has_data(bio))
407 return 1;
408
e827091c
ML
409 bio_get_last_bvec(bio, &end_bv);
410 bio_get_first_bvec(nxt, &nxt_bv);
f619d254 411
3dccdae5 412 return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
d6d48196
JA
413}
414
7988613b 415static inline void
963ab9e5 416__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b 417 struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5
AH
418 struct scatterlist **sg, int *nsegs, int *cluster)
419{
420
421 int nbytes = bvec->bv_len;
422
7988613b 423 if (*sg && *cluster) {
b4b6cb61
ML
424 if ((*sg)->length + nbytes > queue_max_segment_size(q))
425 goto new_segment;
3dccdae5 426 if (!biovec_phys_mergeable(q, bvprv, bvec))
963ab9e5
AH
427 goto new_segment;
428
429 (*sg)->length += nbytes;
430 } else {
431new_segment:
432 if (!*sg)
433 *sg = sglist;
434 else {
435 /*
436 * If the driver previously mapped a shorter
437 * list, we could see a termination bit
438 * prematurely unless it fully inits the sg
439 * table on each mapping. We KNOW that there
440 * must be more entries here or the driver
441 * would be buggy, so force clear the
442 * termination bit to avoid doing a full
443 * sg_init_table() in drivers for each command.
444 */
c8164d89 445 sg_unmark_end(*sg);
963ab9e5
AH
446 *sg = sg_next(*sg);
447 }
448
449 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
450 (*nsegs)++;
451 }
7988613b 452 *bvprv = *bvec;
963ab9e5
AH
453}
454
f9d03f96
CH
455static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
456 struct scatterlist *sglist, struct scatterlist **sg)
457{
458 *sg = sglist;
459 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
460 return 1;
461}
462
5cb8850c
KO
463static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
464 struct scatterlist *sglist,
465 struct scatterlist **sg)
d6d48196 466{
2b8221e1 467 struct bio_vec bvec, bvprv = { NULL };
5cb8850c 468 struct bvec_iter iter;
f9d03f96 469 int cluster = blk_queue_cluster(q), nsegs = 0;
5cb8850c
KO
470
471 for_each_bio(bio)
472 bio_for_each_segment(bvec, bio, iter)
473 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
474 &nsegs, &cluster);
d6d48196 475
5cb8850c
KO
476 return nsegs;
477}
478
479/*
480 * map a request to scatterlist, return number of sg entries setup. Caller
481 * must make sure sg can hold rq->nr_phys_segments entries
482 */
483int blk_rq_map_sg(struct request_queue *q, struct request *rq,
484 struct scatterlist *sglist)
485{
486 struct scatterlist *sg = NULL;
487 int nsegs = 0;
488
f9d03f96
CH
489 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
490 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
491 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
492 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
493 else if (rq->bio)
5cb8850c 494 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573ab 495
e8064021 496 if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
2e46e8b2
TH
497 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
498 unsigned int pad_len =
499 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573ab
FT
500
501 sg->length += pad_len;
502 rq->extra_len += pad_len;
503 }
504
2fb98e84 505 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056 506 if (op_is_write(req_op(rq)))
db0a2e00
TH
507 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
508
da81ed16 509 sg_unmark_end(sg);
d6d48196
JA
510 sg = sg_next(sg);
511 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
512 q->dma_drain_size,
513 ((unsigned long)q->dma_drain_buffer) &
514 (PAGE_SIZE - 1));
515 nsegs++;
7a85f889 516 rq->extra_len += q->dma_drain_size;
d6d48196
JA
517 }
518
519 if (sg)
520 sg_mark_end(sg);
521
12e57f59
ML
522 /*
523 * Something must have been wrong if the figured number of
524 * segment is bigger than number of req's physical segments
525 */
f9d03f96 526 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59 527
d6d48196
JA
528 return nsegs;
529}
d6d48196
JA
530EXPORT_SYMBOL(blk_rq_map_sg);
531
d6d48196
JA
532static inline int ll_new_hw_segment(struct request_queue *q,
533 struct request *req,
534 struct bio *bio)
535{
d6d48196
JA
536 int nr_phys_segs = bio_phys_segments(q, bio);
537
13f05c8d
MP
538 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
539 goto no_merge;
540
4eaf99be 541 if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d 542 goto no_merge;
d6d48196
JA
543
544 /*
545 * This will form the start of a new hw segment. Bump both
546 * counters.
547 */
d6d48196
JA
548 req->nr_phys_segments += nr_phys_segs;
549 return 1;
13f05c8d
MP
550
551no_merge:
e0c72300 552 req_set_nomerge(q, req);
13f05c8d 553 return 0;
d6d48196
JA
554}
555
556int ll_back_merge_fn(struct request_queue *q, struct request *req,
557 struct bio *bio)
558{
5e7c4274
JA
559 if (req_gap_back_merge(req, bio))
560 return 0;
7f39add3
SG
561 if (blk_integrity_rq(req) &&
562 integrity_req_gap_back_merge(req, bio))
563 return 0;
f31dc1cd 564 if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f39 565 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
e0c72300 566 req_set_nomerge(q, req);
d6d48196
JA
567 return 0;
568 }
2cdf79ca 569 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d48196 570 blk_recount_segments(q, req->biotail);
2cdf79ca 571 if (!bio_flagged(bio, BIO_SEG_VALID))
d6d48196 572 blk_recount_segments(q, bio);
d6d48196
JA
573
574 return ll_new_hw_segment(q, req, bio);
575}
576
6728cb0e 577int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d48196
JA
578 struct bio *bio)
579{
5e7c4274
JA
580
581 if (req_gap_front_merge(req, bio))
582 return 0;
7f39add3
SG
583 if (blk_integrity_rq(req) &&
584 integrity_req_gap_front_merge(req, bio))
585 return 0;
f31dc1cd 586 if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f39 587 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
e0c72300 588 req_set_nomerge(q, req);
d6d48196
JA
589 return 0;
590 }
2cdf79ca 591 if (!bio_flagged(bio, BIO_SEG_VALID))
d6d48196 592 blk_recount_segments(q, bio);
2cdf79ca 593 if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d48196 594 blk_recount_segments(q, req->bio);
d6d48196
JA
595
596 return ll_new_hw_segment(q, req, bio);
597}
598
445251d0
JA
599static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
600 struct request *next)
601{
602 unsigned short segments = blk_rq_nr_discard_segments(req);
603
604 if (segments >= queue_max_discard_segments(q))
605 goto no_merge;
606 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
607 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
608 goto no_merge;
609
610 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
611 return true;
612no_merge:
613 req_set_nomerge(q, req);
614 return false;
615}
616
d6d48196
JA
617static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
618 struct request *next)
619{
620 int total_phys_segments;
86771427
FT
621 unsigned int seg_size =
622 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d48196 623
5e7c4274 624 if (req_gap_back_merge(req, next->bio))
854fbb9c
KB
625 return 0;
626
d6d48196
JA
627 /*
628 * Will it become too large?
629 */
f31dc1cd 630 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f39 631 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d48196
JA
632 return 0;
633
634 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
86771427
FT
635 if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
636 if (req->nr_phys_segments == 1)
637 req->bio->bi_seg_front_size = seg_size;
638 if (next->nr_phys_segments == 1)
639 next->biotail->bi_seg_back_size = seg_size;
d6d48196 640 total_phys_segments--;
86771427 641 }
d6d48196 642
8a78362c 643 if (total_phys_segments > queue_max_segments(q))
d6d48196
JA
644 return 0;
645
4eaf99be 646 if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d
MP
647 return 0;
648
d6d48196
JA
649 /* Merge is OK... */
650 req->nr_phys_segments = total_phys_segments;
d6d48196
JA
651 return 1;
652}
653
80a761fd
TH
654/**
655 * blk_rq_set_mixed_merge - mark a request as mixed merge
656 * @rq: request to mark as mixed merge
657 *
658 * Description:
659 * @rq is about to be mixed merged. Make sure the attributes
660 * which can be mixed are set in each bio and mark @rq as mixed
661 * merged.
662 */
663void blk_rq_set_mixed_merge(struct request *rq)
664{
665 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
666 struct bio *bio;
667
e8064021 668 if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd
TH
669 return;
670
671 /*
672 * @rq will no longer represent mixable attributes for all the
673 * contained bios. It will just track those of the first one.
674 * Distributes the attributs to each bio.
675 */
676 for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d32
JA
677 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
678 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
679 bio->bi_opf |= ff;
80a761fd 680 }
e8064021 681 rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd
TH
682}
683
26308eab
JM
684static void blk_account_io_merge(struct request *req)
685{
686 if (blk_do_io_stat(req)) {
687 struct hd_struct *part;
688 int cpu;
689
690 cpu = part_stat_lock();
09e099d4 691 part = req->part;
26308eab 692
d62e26b3
JA
693 part_round_stats(req->q, cpu, part);
694 part_dec_in_flight(req->q, part, rq_data_dir(req));
26308eab 695
6c23a968 696 hd_struct_put(part);
26308eab
JM
697 part_stat_unlock();
698 }
699}
69840466
JW
700/*
701 * Two cases of handling DISCARD merge:
702 * If max_discard_segments > 1, the driver takes every bio
703 * as a range and send them to controller together. The ranges
704 * needn't to be contiguous.
705 * Otherwise, the bios/requests will be handled as same as
706 * others which should be contiguous.
707 */
708static inline bool blk_discard_mergable(struct request *req)
709{
710 if (req_op(req) == REQ_OP_DISCARD &&
711 queue_max_discard_segments(req->q) > 1)
712 return true;
713 return false;
714}
715
e96c0d83
EB
716static enum elv_merge blk_try_req_merge(struct request *req,
717 struct request *next)
69840466
JW
718{
719 if (blk_discard_mergable(req))
720 return ELEVATOR_DISCARD_MERGE;
721 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
722 return ELEVATOR_BACK_MERGE;
723
724 return ELEVATOR_NO_MERGE;
725}
26308eab 726
d6d48196 727/*
b973cb7e
JA
728 * For non-mq, this has to be called with the request spinlock acquired.
729 * For mq with scheduling, the appropriate queue wide lock should be held.
d6d48196 730 */
b973cb7e
JA
731static struct request *attempt_merge(struct request_queue *q,
732 struct request *req, struct request *next)
d6d48196
JA
733{
734 if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e 735 return NULL;
d6d48196 736
288dab8a 737 if (req_op(req) != req_op(next))
b973cb7e 738 return NULL;
f31dc1cd 739
d6d48196 740 if (rq_data_dir(req) != rq_data_dir(next)
2081a56b 741 || req->rq_disk != next->rq_disk)
b973cb7e 742 return NULL;
d6d48196 743
8fe0d473 744 if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c 745 !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e 746 return NULL;
4363ac7c 747
cb6934f8
JA
748 /*
749 * Don't allow merge of different write hints, or for a hint with
750 * non-hint IO.
751 */
752 if (req->write_hint != next->write_hint)
753 return NULL;
754
668ffc03
DLM
755 if (req->ioprio != next->ioprio)
756 return NULL;
757
d6d48196
JA
758 /*
759 * If we are allowed to merge, then append bio list
760 * from next to rq and release next. merge_requests_fn
761 * will have updated segment counts, update sector
445251d0
JA
762 * counts here. Handle DISCARDs separately, as they
763 * have separate settings.
d6d48196 764 */
69840466
JW
765
766 switch (blk_try_req_merge(req, next)) {
767 case ELEVATOR_DISCARD_MERGE:
445251d0
JA
768 if (!req_attempt_discard_merge(q, req, next))
769 return NULL;
69840466
JW
770 break;
771 case ELEVATOR_BACK_MERGE:
772 if (!ll_merge_requests_fn(q, req, next))
773 return NULL;
774 break;
775 default:
b973cb7e 776 return NULL;
69840466 777 }
d6d48196 778
80a761fd
TH
779 /*
780 * If failfast settings disagree or any of the two is already
781 * a mixed merge, mark both as mixed before proceeding. This
782 * makes sure that all involved bios have mixable attributes
783 * set properly.
784 */
e8064021 785 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd
TH
786 (req->cmd_flags & REQ_FAILFAST_MASK) !=
787 (next->cmd_flags & REQ_FAILFAST_MASK)) {
788 blk_rq_set_mixed_merge(req);
789 blk_rq_set_mixed_merge(next);
790 }
791
d6d48196 792 /*
522a7775
OS
793 * At this point we have either done a back merge or front merge. We
794 * need the smaller start_time_ns of the merged requests to be the
795 * current request for accounting purposes.
d6d48196 796 */
522a7775
OS
797 if (next->start_time_ns < req->start_time_ns)
798 req->start_time_ns = next->start_time_ns;
d6d48196
JA
799
800 req->biotail->bi_next = next->bio;
801 req->biotail = next->biotail;
802
a2dec7b3 803 req->__data_len += blk_rq_bytes(next);
d6d48196 804
2a5cf35c 805 if (!blk_discard_mergable(req))
445251d0 806 elv_merge_requests(q, req, next);
d6d48196 807
42dad764
JM
808 /*
809 * 'next' is going away, so update stats accordingly
810 */
811 blk_account_io_merge(next);
d6d48196 812
e4d750c9
JA
813 /*
814 * ownership of bio passed from next to req, return 'next' for
815 * the caller to free
816 */
1cd96c24 817 next->bio = NULL;
b973cb7e 818 return next;
d6d48196
JA
819}
820
b973cb7e 821struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
d6d48196
JA
822{
823 struct request *next = elv_latter_request(q, rq);
824
825 if (next)
826 return attempt_merge(q, rq, next);
827
b973cb7e 828 return NULL;
d6d48196
JA
829}
830
b973cb7e 831struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
d6d48196
JA
832{
833 struct request *prev = elv_former_request(q, rq);
834
835 if (prev)
836 return attempt_merge(q, prev, rq);
837
b973cb7e 838 return NULL;
d6d48196 839}
5e84ea3a
JA
840
841int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
842 struct request *next)
843{
e4d750c9 844 struct request *free;
72ef799b 845
e4d750c9
JA
846 free = attempt_merge(q, rq, next);
847 if (free) {
92bc5a24 848 blk_put_request(free);
e4d750c9
JA
849 return 1;
850 }
851
852 return 0;
5e84ea3a 853}
050c8ea8
TH
854
855bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
856{
e2a60da7 857 if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea8
TH
858 return false;
859
288dab8a 860 if (req_op(rq) != bio_op(bio))
f31dc1cd
MP
861 return false;
862
050c8ea8
TH
863 /* different data direction or already started, don't merge */
864 if (bio_data_dir(bio) != rq_data_dir(rq))
865 return false;
866
2081a56b
JA
867 /* must be same device */
868 if (rq->rq_disk != bio->bi_disk)
050c8ea8
TH
869 return false;
870
871 /* only merge integrity protected bio into ditto rq */
4eaf99be 872 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea8
TH
873 return false;
874
4363ac7c 875 /* must be using the same buffer */
8fe0d473 876 if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c
MP
877 !blk_write_same_mergeable(rq->bio, bio))
878 return false;
879
cb6934f8
JA
880 /*
881 * Don't allow merge of different write hints, or for a hint with
882 * non-hint IO.
883 */
884 if (rq->write_hint != bio->bi_write_hint)
885 return false;
886
668ffc03
DLM
887 if (rq->ioprio != bio_prio(bio))
888 return false;
889
050c8ea8
TH
890 return true;
891}
892
34fe7c05 893enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea8 894{
69840466 895 if (blk_discard_mergable(rq))
1e739730
CH
896 return ELEVATOR_DISCARD_MERGE;
897 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea8 898 return ELEVATOR_BACK_MERGE;
4f024f37 899 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea8
TH
900 return ELEVATOR_FRONT_MERGE;
901 return ELEVATOR_NO_MERGE;
902}