]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-merge.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to segment and merge handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 #include <linux/blk-cgroup.h>
12 #include <trace/events/block.h>
15 #include "blk-rq-qos.h"
17 static inline bool bio_will_gap(struct request_queue
*q
,
18 struct request
*prev_rq
, struct bio
*prev
, struct bio
*next
)
20 struct bio_vec pb
, nb
;
22 if (!bio_has_data(prev
) || !queue_virt_boundary(q
))
26 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
27 * is quite difficult to respect the sg gap limit. We work hard to
28 * merge a huge number of small single bios in case of mkfs.
31 bio_get_first_bvec(prev_rq
->bio
, &pb
);
33 bio_get_first_bvec(prev
, &pb
);
34 if (pb
.bv_offset
& queue_virt_boundary(q
))
38 * We don't need to worry about the situation that the merged segment
39 * ends in unaligned virt boundary:
41 * - if 'pb' ends aligned, the merged segment ends aligned
42 * - if 'pb' ends unaligned, the next bio must include
43 * one single bvec of 'nb', otherwise the 'nb' can't
46 bio_get_last_bvec(prev
, &pb
);
47 bio_get_first_bvec(next
, &nb
);
48 if (biovec_phys_mergeable(q
, &pb
, &nb
))
50 return __bvec_gap_to_prev(q
, &pb
, nb
.bv_offset
);
53 static inline bool req_gap_back_merge(struct request
*req
, struct bio
*bio
)
55 return bio_will_gap(req
->q
, req
, req
->biotail
, bio
);
58 static inline bool req_gap_front_merge(struct request
*req
, struct bio
*bio
)
60 return bio_will_gap(req
->q
, NULL
, bio
, req
->bio
);
63 static struct bio
*blk_bio_discard_split(struct request_queue
*q
,
68 unsigned int max_discard_sectors
, granularity
;
71 unsigned split_sectors
;
75 /* Zero-sector (unknown) and one-sector granularities are the same. */
76 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
78 max_discard_sectors
= min(q
->limits
.max_discard_sectors
,
79 bio_allowed_max_sectors(q
));
80 max_discard_sectors
-= max_discard_sectors
% granularity
;
82 if (unlikely(!max_discard_sectors
)) {
87 if (bio_sectors(bio
) <= max_discard_sectors
)
90 split_sectors
= max_discard_sectors
;
93 * If the next starting sector would be misaligned, stop the discard at
94 * the previous aligned sector.
96 alignment
= (q
->limits
.discard_alignment
>> 9) % granularity
;
98 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
- alignment
;
99 tmp
= sector_div(tmp
, granularity
);
101 if (split_sectors
> tmp
)
102 split_sectors
-= tmp
;
104 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
107 static struct bio
*blk_bio_write_zeroes_split(struct request_queue
*q
,
108 struct bio
*bio
, struct bio_set
*bs
, unsigned *nsegs
)
112 if (!q
->limits
.max_write_zeroes_sectors
)
115 if (bio_sectors(bio
) <= q
->limits
.max_write_zeroes_sectors
)
118 return bio_split(bio
, q
->limits
.max_write_zeroes_sectors
, GFP_NOIO
, bs
);
121 static struct bio
*blk_bio_write_same_split(struct request_queue
*q
,
128 if (!q
->limits
.max_write_same_sectors
)
131 if (bio_sectors(bio
) <= q
->limits
.max_write_same_sectors
)
134 return bio_split(bio
, q
->limits
.max_write_same_sectors
, GFP_NOIO
, bs
);
138 * Return the maximum number of sectors from the start of a bio that may be
139 * submitted as a single request to a block device. If enough sectors remain,
140 * align the end to the physical block size. Otherwise align the end to the
141 * logical block size. This approach minimizes the number of non-aligned
142 * requests that are submitted to a block device if the start of a bio is not
143 * aligned to a physical block boundary.
145 static inline unsigned get_max_io_size(struct request_queue
*q
,
148 unsigned sectors
= blk_max_size_offset(q
, bio
->bi_iter
.bi_sector
, 0);
149 unsigned max_sectors
= sectors
;
150 unsigned pbs
= queue_physical_block_size(q
) >> SECTOR_SHIFT
;
151 unsigned lbs
= queue_logical_block_size(q
) >> SECTOR_SHIFT
;
152 unsigned start_offset
= bio
->bi_iter
.bi_sector
& (pbs
- 1);
154 max_sectors
+= start_offset
;
155 max_sectors
&= ~(pbs
- 1);
156 if (max_sectors
> start_offset
)
157 return max_sectors
- start_offset
;
159 return sectors
& ~(lbs
- 1);
162 static inline unsigned get_max_segment_size(const struct request_queue
*q
,
163 struct page
*start_page
,
164 unsigned long offset
)
166 unsigned long mask
= queue_segment_boundary(q
);
168 offset
= mask
& (page_to_phys(start_page
) + offset
);
171 * overflow may be triggered in case of zero page physical address
172 * on 32bit arch, use queue's max segment size when that happens.
174 return min_not_zero(mask
- offset
+ 1,
175 (unsigned long)queue_max_segment_size(q
));
179 * bvec_split_segs - verify whether or not a bvec should be split in the middle
180 * @q: [in] request queue associated with the bio associated with @bv
181 * @bv: [in] bvec to examine
182 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
183 * by the number of segments from @bv that may be appended to that
184 * bio without exceeding @max_segs
185 * @sectors: [in,out] Number of sectors in the bio being built. Incremented
186 * by the number of sectors from @bv that may be appended to that
187 * bio without exceeding @max_sectors
188 * @max_segs: [in] upper bound for *@nsegs
189 * @max_sectors: [in] upper bound for *@sectors
191 * When splitting a bio, it can happen that a bvec is encountered that is too
192 * big to fit in a single segment and hence that it has to be split in the
193 * middle. This function verifies whether or not that should happen. The value
194 * %true is returned if and only if appending the entire @bv to a bio with
195 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
198 static bool bvec_split_segs(const struct request_queue
*q
,
199 const struct bio_vec
*bv
, unsigned *nsegs
,
200 unsigned *sectors
, unsigned max_segs
,
201 unsigned max_sectors
)
203 unsigned max_len
= (min(max_sectors
, UINT_MAX
>> 9) - *sectors
) << 9;
204 unsigned len
= min(bv
->bv_len
, max_len
);
205 unsigned total_len
= 0;
206 unsigned seg_size
= 0;
208 while (len
&& *nsegs
< max_segs
) {
209 seg_size
= get_max_segment_size(q
, bv
->bv_page
,
210 bv
->bv_offset
+ total_len
);
211 seg_size
= min(seg_size
, len
);
214 total_len
+= seg_size
;
217 if ((bv
->bv_offset
+ total_len
) & queue_virt_boundary(q
))
221 *sectors
+= total_len
>> 9;
223 /* tell the caller to split the bvec if it is too big to fit */
224 return len
> 0 || bv
->bv_len
> max_len
;
228 * blk_bio_segment_split - split a bio in two bios
229 * @q: [in] request queue pointer
230 * @bio: [in] bio to be split
231 * @bs: [in] bio set to allocate the clone from
232 * @segs: [out] number of segments in the bio with the first half of the sectors
234 * Clone @bio, update the bi_iter of the clone to represent the first sectors
235 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
236 * following is guaranteed for the cloned bio:
237 * - That it has at most get_max_io_size(@q, @bio) sectors.
238 * - That it has at most queue_max_segments(@q) segments.
240 * Except for discard requests the cloned bio will point at the bi_io_vec of
241 * the original bio. It is the responsibility of the caller to ensure that the
242 * original bio is not freed before the cloned bio. The caller is also
243 * responsible for ensuring that @bs is only destroyed after processing of the
244 * split bio has finished.
246 static struct bio
*blk_bio_segment_split(struct request_queue
*q
,
251 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
252 struct bvec_iter iter
;
253 unsigned nsegs
= 0, sectors
= 0;
254 const unsigned max_sectors
= get_max_io_size(q
, bio
);
255 const unsigned max_segs
= queue_max_segments(q
);
257 bio_for_each_bvec(bv
, bio
, iter
) {
259 * If the queue doesn't support SG gaps and adding this
260 * offset would create a gap, disallow it.
262 if (bvprvp
&& bvec_gap_to_prev(q
, bvprvp
, bv
.bv_offset
))
265 if (nsegs
< max_segs
&&
266 sectors
+ (bv
.bv_len
>> 9) <= max_sectors
&&
267 bv
.bv_offset
+ bv
.bv_len
<= PAGE_SIZE
) {
269 sectors
+= bv
.bv_len
>> 9;
270 } else if (bvec_split_segs(q
, &bv
, &nsegs
, §ors
, max_segs
,
285 * Bio splitting may cause subtle trouble such as hang when doing sync
286 * iopoll in direct IO routine. Given performance gain of iopoll for
287 * big IO can be trival, disable iopoll when split needed.
289 bio_clear_hipri(bio
);
291 return bio_split(bio
, sectors
, GFP_NOIO
, bs
);
295 * __blk_queue_split - split a bio and submit the second half
296 * @bio: [in, out] bio to be split
297 * @nr_segs: [out] number of segments in the first bio
299 * Split a bio into two bios, chain the two bios, submit the second half and
300 * store a pointer to the first half in *@bio. If the second bio is still too
301 * big it will be split by a recursive call to this function. Since this
302 * function may allocate a new bio from q->bio_split, it is the responsibility
303 * of the caller to ensure that q->bio_split is only released after processing
304 * of the split bio has finished.
306 void __blk_queue_split(struct bio
**bio
, unsigned int *nr_segs
)
308 struct request_queue
*q
= (*bio
)->bi_bdev
->bd_disk
->queue
;
309 struct bio
*split
= NULL
;
311 switch (bio_op(*bio
)) {
313 case REQ_OP_SECURE_ERASE
:
314 split
= blk_bio_discard_split(q
, *bio
, &q
->bio_split
, nr_segs
);
316 case REQ_OP_WRITE_ZEROES
:
317 split
= blk_bio_write_zeroes_split(q
, *bio
, &q
->bio_split
,
320 case REQ_OP_WRITE_SAME
:
321 split
= blk_bio_write_same_split(q
, *bio
, &q
->bio_split
,
326 * All drivers must accept single-segments bios that are <=
327 * PAGE_SIZE. This is a quick and dirty check that relies on
328 * the fact that bi_io_vec[0] is always valid if a bio has data.
329 * The check might lead to occasional false negatives when bios
330 * are cloned, but compared to the performance impact of cloned
331 * bios themselves the loop below doesn't matter anyway.
333 if (!q
->limits
.chunk_sectors
&&
334 (*bio
)->bi_vcnt
== 1 &&
335 ((*bio
)->bi_io_vec
[0].bv_len
+
336 (*bio
)->bi_io_vec
[0].bv_offset
) <= PAGE_SIZE
) {
340 split
= blk_bio_segment_split(q
, *bio
, &q
->bio_split
, nr_segs
);
345 /* there isn't chance to merge the splitted bio */
346 split
->bi_opf
|= REQ_NOMERGE
;
348 bio_chain(split
, *bio
);
349 trace_block_split(split
, (*bio
)->bi_iter
.bi_sector
);
350 submit_bio_noacct(*bio
);
353 blk_throtl_charge_bio_split(*bio
);
358 * blk_queue_split - split a bio and submit the second half
359 * @bio: [in, out] bio to be split
361 * Split a bio into two bios, chains the two bios, submit the second half and
362 * store a pointer to the first half in *@bio. Since this function may allocate
363 * a new bio from q->bio_split, it is the responsibility of the caller to ensure
364 * that q->bio_split is only released after processing of the split bio has
367 void blk_queue_split(struct bio
**bio
)
369 unsigned int nr_segs
;
371 __blk_queue_split(bio
, &nr_segs
);
373 EXPORT_SYMBOL(blk_queue_split
);
375 unsigned int blk_recalc_rq_segments(struct request
*rq
)
377 unsigned int nr_phys_segs
= 0;
378 unsigned int nr_sectors
= 0;
379 struct req_iterator iter
;
385 switch (bio_op(rq
->bio
)) {
387 case REQ_OP_SECURE_ERASE
:
388 if (queue_max_discard_segments(rq
->q
) > 1) {
389 struct bio
*bio
= rq
->bio
;
396 case REQ_OP_WRITE_ZEROES
:
398 case REQ_OP_WRITE_SAME
:
402 rq_for_each_bvec(bv
, rq
, iter
)
403 bvec_split_segs(rq
->q
, &bv
, &nr_phys_segs
, &nr_sectors
,
408 static inline struct scatterlist
*blk_next_sg(struct scatterlist
**sg
,
409 struct scatterlist
*sglist
)
415 * If the driver previously mapped a shorter list, we could see a
416 * termination bit prematurely unless it fully inits the sg table
417 * on each mapping. We KNOW that there must be more entries here
418 * or the driver would be buggy, so force clear the termination bit
419 * to avoid doing a full sg_init_table() in drivers for each command.
425 static unsigned blk_bvec_map_sg(struct request_queue
*q
,
426 struct bio_vec
*bvec
, struct scatterlist
*sglist
,
427 struct scatterlist
**sg
)
429 unsigned nbytes
= bvec
->bv_len
;
430 unsigned nsegs
= 0, total
= 0;
433 unsigned offset
= bvec
->bv_offset
+ total
;
434 unsigned len
= min(get_max_segment_size(q
, bvec
->bv_page
,
436 struct page
*page
= bvec
->bv_page
;
439 * Unfortunately a fair number of drivers barf on scatterlists
440 * that have an offset larger than PAGE_SIZE, despite other
441 * subsystems dealing with that invariant just fine. For now
442 * stick to the legacy format where we never present those from
443 * the block layer, but the code below should be removed once
444 * these offenders (mostly MMC/SD drivers) are fixed.
446 page
+= (offset
>> PAGE_SHIFT
);
447 offset
&= ~PAGE_MASK
;
449 *sg
= blk_next_sg(sg
, sglist
);
450 sg_set_page(*sg
, page
, len
, offset
);
460 static inline int __blk_bvec_map_sg(struct bio_vec bv
,
461 struct scatterlist
*sglist
, struct scatterlist
**sg
)
463 *sg
= blk_next_sg(sg
, sglist
);
464 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
468 /* only try to merge bvecs into one sg if they are from two bios */
470 __blk_segment_map_sg_merge(struct request_queue
*q
, struct bio_vec
*bvec
,
471 struct bio_vec
*bvprv
, struct scatterlist
**sg
)
474 int nbytes
= bvec
->bv_len
;
479 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
482 if (!biovec_phys_mergeable(q
, bvprv
, bvec
))
485 (*sg
)->length
+= nbytes
;
490 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
491 struct scatterlist
*sglist
,
492 struct scatterlist
**sg
)
494 struct bio_vec bvec
, bvprv
= { NULL
};
495 struct bvec_iter iter
;
497 bool new_bio
= false;
500 bio_for_each_bvec(bvec
, bio
, iter
) {
502 * Only try to merge bvecs from two bios given we
503 * have done bio internal merge when adding pages
507 __blk_segment_map_sg_merge(q
, &bvec
, &bvprv
, sg
))
510 if (bvec
.bv_offset
+ bvec
.bv_len
<= PAGE_SIZE
)
511 nsegs
+= __blk_bvec_map_sg(bvec
, sglist
, sg
);
513 nsegs
+= blk_bvec_map_sg(q
, &bvec
, sglist
, sg
);
517 if (likely(bio
->bi_iter
.bi_size
)) {
527 * map a request to scatterlist, return number of sg entries setup. Caller
528 * must make sure sg can hold rq->nr_phys_segments entries
530 int __blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
531 struct scatterlist
*sglist
, struct scatterlist
**last_sg
)
535 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
)
536 nsegs
= __blk_bvec_map_sg(rq
->special_vec
, sglist
, last_sg
);
537 else if (rq
->bio
&& bio_op(rq
->bio
) == REQ_OP_WRITE_SAME
)
538 nsegs
= __blk_bvec_map_sg(bio_iovec(rq
->bio
), sglist
, last_sg
);
540 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, last_sg
);
543 sg_mark_end(*last_sg
);
546 * Something must have been wrong if the figured number of
547 * segment is bigger than number of req's physical segments
549 WARN_ON(nsegs
> blk_rq_nr_phys_segments(rq
));
553 EXPORT_SYMBOL(__blk_rq_map_sg
);
555 static inline unsigned int blk_rq_get_max_segments(struct request
*rq
)
557 if (req_op(rq
) == REQ_OP_DISCARD
)
558 return queue_max_discard_segments(rq
->q
);
559 return queue_max_segments(rq
->q
);
562 static inline int ll_new_hw_segment(struct request
*req
, struct bio
*bio
,
563 unsigned int nr_phys_segs
)
565 if (!blk_cgroup_mergeable(req
, bio
))
568 if (blk_integrity_merge_bio(req
->q
, req
, bio
) == false)
571 /* discard request merge won't add new segment */
572 if (req_op(req
) == REQ_OP_DISCARD
)
575 if (req
->nr_phys_segments
+ nr_phys_segs
> blk_rq_get_max_segments(req
))
579 * This will form the start of a new hw segment. Bump both
582 req
->nr_phys_segments
+= nr_phys_segs
;
586 req_set_nomerge(req
->q
, req
);
590 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
, unsigned int nr_segs
)
592 if (req_gap_back_merge(req
, bio
))
594 if (blk_integrity_rq(req
) &&
595 integrity_req_gap_back_merge(req
, bio
))
597 if (!bio_crypt_ctx_back_mergeable(req
, bio
))
599 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
600 blk_rq_get_max_sectors(req
, blk_rq_pos(req
))) {
601 req_set_nomerge(req
->q
, req
);
605 return ll_new_hw_segment(req
, bio
, nr_segs
);
608 static int ll_front_merge_fn(struct request
*req
, struct bio
*bio
,
609 unsigned int nr_segs
)
611 if (req_gap_front_merge(req
, bio
))
613 if (blk_integrity_rq(req
) &&
614 integrity_req_gap_front_merge(req
, bio
))
616 if (!bio_crypt_ctx_front_mergeable(req
, bio
))
618 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
619 blk_rq_get_max_sectors(req
, bio
->bi_iter
.bi_sector
)) {
620 req_set_nomerge(req
->q
, req
);
624 return ll_new_hw_segment(req
, bio
, nr_segs
);
627 static bool req_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
628 struct request
*next
)
630 unsigned short segments
= blk_rq_nr_discard_segments(req
);
632 if (segments
>= queue_max_discard_segments(q
))
634 if (blk_rq_sectors(req
) + bio_sectors(next
->bio
) >
635 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
638 req
->nr_phys_segments
= segments
+ blk_rq_nr_discard_segments(next
);
641 req_set_nomerge(q
, req
);
645 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
646 struct request
*next
)
648 int total_phys_segments
;
650 if (req_gap_back_merge(req
, next
->bio
))
654 * Will it become too large?
656 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
657 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
660 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
661 if (total_phys_segments
> blk_rq_get_max_segments(req
))
664 if (!blk_cgroup_mergeable(req
, next
->bio
))
667 if (blk_integrity_merge_rq(q
, req
, next
) == false)
670 if (!bio_crypt_ctx_merge_rq(req
, next
))
674 req
->nr_phys_segments
= total_phys_segments
;
679 * blk_rq_set_mixed_merge - mark a request as mixed merge
680 * @rq: request to mark as mixed merge
683 * @rq is about to be mixed merged. Make sure the attributes
684 * which can be mixed are set in each bio and mark @rq as mixed
687 void blk_rq_set_mixed_merge(struct request
*rq
)
689 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
692 if (rq
->rq_flags
& RQF_MIXED_MERGE
)
696 * @rq will no longer represent mixable attributes for all the
697 * contained bios. It will just track those of the first one.
698 * Distributes the attributs to each bio.
700 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
701 WARN_ON_ONCE((bio
->bi_opf
& REQ_FAILFAST_MASK
) &&
702 (bio
->bi_opf
& REQ_FAILFAST_MASK
) != ff
);
705 rq
->rq_flags
|= RQF_MIXED_MERGE
;
708 static void blk_account_io_merge_request(struct request
*req
)
710 if (blk_do_io_stat(req
)) {
712 part_stat_inc(req
->part
, merges
[op_stat_group(req_op(req
))]);
717 static enum elv_merge
blk_try_req_merge(struct request
*req
,
718 struct request
*next
)
720 if (blk_discard_mergable(req
))
721 return ELEVATOR_DISCARD_MERGE
;
722 else if (blk_rq_pos(req
) + blk_rq_sectors(req
) == blk_rq_pos(next
))
723 return ELEVATOR_BACK_MERGE
;
725 return ELEVATOR_NO_MERGE
;
729 * For non-mq, this has to be called with the request spinlock acquired.
730 * For mq with scheduling, the appropriate queue wide lock should be held.
732 static struct request
*attempt_merge(struct request_queue
*q
,
733 struct request
*req
, struct request
*next
)
735 if (!rq_mergeable(req
) || !rq_mergeable(next
))
738 if (req_op(req
) != req_op(next
))
741 if (rq_data_dir(req
) != rq_data_dir(next
)
742 || req
->rq_disk
!= next
->rq_disk
)
745 if (req_op(req
) == REQ_OP_WRITE_SAME
&&
746 !blk_write_same_mergeable(req
->bio
, next
->bio
))
750 * Don't allow merge of different write hints, or for a hint with
753 if (req
->write_hint
!= next
->write_hint
)
756 if (req
->ioprio
!= next
->ioprio
)
760 * If we are allowed to merge, then append bio list
761 * from next to rq and release next. merge_requests_fn
762 * will have updated segment counts, update sector
763 * counts here. Handle DISCARDs separately, as they
764 * have separate settings.
767 switch (blk_try_req_merge(req
, next
)) {
768 case ELEVATOR_DISCARD_MERGE
:
769 if (!req_attempt_discard_merge(q
, req
, next
))
772 case ELEVATOR_BACK_MERGE
:
773 if (!ll_merge_requests_fn(q
, req
, next
))
781 * If failfast settings disagree or any of the two is already
782 * a mixed merge, mark both as mixed before proceeding. This
783 * makes sure that all involved bios have mixable attributes
786 if (((req
->rq_flags
| next
->rq_flags
) & RQF_MIXED_MERGE
) ||
787 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
788 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
789 blk_rq_set_mixed_merge(req
);
790 blk_rq_set_mixed_merge(next
);
794 * At this point we have either done a back merge or front merge. We
795 * need the smaller start_time_ns of the merged requests to be the
796 * current request for accounting purposes.
798 if (next
->start_time_ns
< req
->start_time_ns
)
799 req
->start_time_ns
= next
->start_time_ns
;
801 req
->biotail
->bi_next
= next
->bio
;
802 req
->biotail
= next
->biotail
;
804 req
->__data_len
+= blk_rq_bytes(next
);
806 if (!blk_discard_mergable(req
))
807 elv_merge_requests(q
, req
, next
);
810 * 'next' is going away, so update stats accordingly
812 blk_account_io_merge_request(next
);
814 trace_block_rq_merge(next
);
817 * ownership of bio passed from next to req, return 'next' for
824 static struct request
*attempt_back_merge(struct request_queue
*q
,
827 struct request
*next
= elv_latter_request(q
, rq
);
830 return attempt_merge(q
, rq
, next
);
835 static struct request
*attempt_front_merge(struct request_queue
*q
,
838 struct request
*prev
= elv_former_request(q
, rq
);
841 return attempt_merge(q
, prev
, rq
);
847 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
848 * otherwise. The caller is responsible for freeing 'next' if the merge
851 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
852 struct request
*next
)
854 return attempt_merge(q
, rq
, next
);
857 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
859 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
862 if (req_op(rq
) != bio_op(bio
))
865 /* different data direction or already started, don't merge */
866 if (bio_data_dir(bio
) != rq_data_dir(rq
))
869 /* must be same device */
870 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
)
873 /* don't merge across cgroup boundaries */
874 if (!blk_cgroup_mergeable(rq
, bio
))
877 /* only merge integrity protected bio into ditto rq */
878 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
881 /* Only merge if the crypt contexts are compatible */
882 if (!bio_crypt_rq_ctx_compatible(rq
, bio
))
885 /* must be using the same buffer */
886 if (req_op(rq
) == REQ_OP_WRITE_SAME
&&
887 !blk_write_same_mergeable(rq
->bio
, bio
))
891 * Don't allow merge of different write hints, or for a hint with
894 if (rq
->write_hint
!= bio
->bi_write_hint
)
897 if (rq
->ioprio
!= bio_prio(bio
))
903 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
)
905 if (blk_discard_mergable(rq
))
906 return ELEVATOR_DISCARD_MERGE
;
907 else if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
908 return ELEVATOR_BACK_MERGE
;
909 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
910 return ELEVATOR_FRONT_MERGE
;
911 return ELEVATOR_NO_MERGE
;
914 static void blk_account_io_merge_bio(struct request
*req
)
916 if (!blk_do_io_stat(req
))
920 part_stat_inc(req
->part
, merges
[op_stat_group(req_op(req
))]);
924 enum bio_merge_status
{
930 static enum bio_merge_status
bio_attempt_back_merge(struct request
*req
,
931 struct bio
*bio
, unsigned int nr_segs
)
933 const int ff
= bio
->bi_opf
& REQ_FAILFAST_MASK
;
935 if (!ll_back_merge_fn(req
, bio
, nr_segs
))
936 return BIO_MERGE_FAILED
;
938 trace_block_bio_backmerge(bio
);
939 rq_qos_merge(req
->q
, req
, bio
);
941 if ((req
->cmd_flags
& REQ_FAILFAST_MASK
) != ff
)
942 blk_rq_set_mixed_merge(req
);
944 req
->biotail
->bi_next
= bio
;
946 req
->__data_len
+= bio
->bi_iter
.bi_size
;
948 bio_crypt_free_ctx(bio
);
950 blk_account_io_merge_bio(req
);
954 static enum bio_merge_status
bio_attempt_front_merge(struct request
*req
,
955 struct bio
*bio
, unsigned int nr_segs
)
957 const int ff
= bio
->bi_opf
& REQ_FAILFAST_MASK
;
959 if (!ll_front_merge_fn(req
, bio
, nr_segs
))
960 return BIO_MERGE_FAILED
;
962 trace_block_bio_frontmerge(bio
);
963 rq_qos_merge(req
->q
, req
, bio
);
965 if ((req
->cmd_flags
& REQ_FAILFAST_MASK
) != ff
)
966 blk_rq_set_mixed_merge(req
);
968 bio
->bi_next
= req
->bio
;
971 req
->__sector
= bio
->bi_iter
.bi_sector
;
972 req
->__data_len
+= bio
->bi_iter
.bi_size
;
974 bio_crypt_do_front_merge(req
, bio
);
976 blk_account_io_merge_bio(req
);
980 static enum bio_merge_status
bio_attempt_discard_merge(struct request_queue
*q
,
981 struct request
*req
, struct bio
*bio
)
983 unsigned short segments
= blk_rq_nr_discard_segments(req
);
985 if (segments
>= queue_max_discard_segments(q
))
987 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
988 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
991 rq_qos_merge(q
, req
, bio
);
993 req
->biotail
->bi_next
= bio
;
995 req
->__data_len
+= bio
->bi_iter
.bi_size
;
996 req
->nr_phys_segments
= segments
+ 1;
998 blk_account_io_merge_bio(req
);
1001 req_set_nomerge(q
, req
);
1002 return BIO_MERGE_FAILED
;
1005 static enum bio_merge_status
blk_attempt_bio_merge(struct request_queue
*q
,
1008 unsigned int nr_segs
,
1009 bool sched_allow_merge
)
1011 if (!blk_rq_merge_ok(rq
, bio
))
1012 return BIO_MERGE_NONE
;
1014 switch (blk_try_merge(rq
, bio
)) {
1015 case ELEVATOR_BACK_MERGE
:
1016 if (!sched_allow_merge
|| blk_mq_sched_allow_merge(q
, rq
, bio
))
1017 return bio_attempt_back_merge(rq
, bio
, nr_segs
);
1019 case ELEVATOR_FRONT_MERGE
:
1020 if (!sched_allow_merge
|| blk_mq_sched_allow_merge(q
, rq
, bio
))
1021 return bio_attempt_front_merge(rq
, bio
, nr_segs
);
1023 case ELEVATOR_DISCARD_MERGE
:
1024 return bio_attempt_discard_merge(q
, rq
, bio
);
1026 return BIO_MERGE_NONE
;
1029 return BIO_MERGE_FAILED
;
1033 * blk_attempt_plug_merge - try to merge with %current's plugged list
1034 * @q: request_queue new bio is being queued at
1035 * @bio: new bio being queued
1036 * @nr_segs: number of segments in @bio
1037 * @same_queue_rq: pointer to &struct request that gets filled in when
1038 * another request associated with @q is found on the plug list
1039 * (optional, may be %NULL)
1041 * Determine whether @bio being queued on @q can be merged with a request
1042 * on %current's plugged list. Returns %true if merge was successful,
1045 * Plugging coalesces IOs from the same issuer for the same purpose without
1046 * going through @q->queue_lock. As such it's more of an issuing mechanism
1047 * than scheduling, and the request, while may have elvpriv data, is not
1048 * added on the elevator at this point. In addition, we don't have
1049 * reliable access to the elevator outside queue lock. Only check basic
1050 * merging parameters without querying the elevator.
1052 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1054 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
1055 unsigned int nr_segs
, struct request
**same_queue_rq
)
1057 struct blk_plug
*plug
;
1059 struct list_head
*plug_list
;
1061 plug
= blk_mq_plug(q
, bio
);
1065 plug_list
= &plug
->mq_list
;
1067 list_for_each_entry_reverse(rq
, plug_list
, queuelist
) {
1068 if (rq
->q
== q
&& same_queue_rq
) {
1070 * Only blk-mq multiple hardware queues case checks the
1071 * rq in the same queue, there should be only one such
1074 *same_queue_rq
= rq
;
1080 if (blk_attempt_bio_merge(q
, rq
, bio
, nr_segs
, false) ==
1089 * Iterate list of requests and see if we can merge this bio with any
1092 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
1093 struct bio
*bio
, unsigned int nr_segs
)
1098 list_for_each_entry_reverse(rq
, list
, queuelist
) {
1102 switch (blk_attempt_bio_merge(q
, rq
, bio
, nr_segs
, true)) {
1103 case BIO_MERGE_NONE
:
1107 case BIO_MERGE_FAILED
:
1115 EXPORT_SYMBOL_GPL(blk_bio_list_merge
);
1117 bool blk_mq_sched_try_merge(struct request_queue
*q
, struct bio
*bio
,
1118 unsigned int nr_segs
, struct request
**merged_request
)
1122 switch (elv_merge(q
, &rq
, bio
)) {
1123 case ELEVATOR_BACK_MERGE
:
1124 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
1126 if (bio_attempt_back_merge(rq
, bio
, nr_segs
) != BIO_MERGE_OK
)
1128 *merged_request
= attempt_back_merge(q
, rq
);
1129 if (!*merged_request
)
1130 elv_merged_request(q
, rq
, ELEVATOR_BACK_MERGE
);
1132 case ELEVATOR_FRONT_MERGE
:
1133 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
1135 if (bio_attempt_front_merge(rq
, bio
, nr_segs
) != BIO_MERGE_OK
)
1137 *merged_request
= attempt_front_merge(q
, rq
);
1138 if (!*merged_request
)
1139 elv_merged_request(q
, rq
, ELEVATOR_FRONT_MERGE
);
1141 case ELEVATOR_DISCARD_MERGE
:
1142 return bio_attempt_discard_merge(q
, rq
, bio
) == BIO_MERGE_OK
;
1147 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge
);