1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to setting various queue properties from drivers
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
11 #include <linux/gcd.h>
12 #include <linux/lcm.h>
13 #include <linux/jiffies.h>
14 #include <linux/gfp.h>
15 #include <linux/dma-mapping.h>
20 unsigned long blk_max_low_pfn
;
21 EXPORT_SYMBOL(blk_max_low_pfn
);
23 unsigned long blk_max_pfn
;
25 void blk_queue_rq_timeout(struct request_queue
*q
, unsigned int timeout
)
27 q
->rq_timeout
= timeout
;
29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout
);
32 * blk_set_default_limits - reset limits to default values
33 * @lim: the queue_limits structure to reset
36 * Returns a queue_limit struct to its default state.
38 void blk_set_default_limits(struct queue_limits
*lim
)
40 lim
->max_segments
= BLK_MAX_SEGMENTS
;
41 lim
->max_discard_segments
= 1;
42 lim
->max_integrity_segments
= 0;
43 lim
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
44 lim
->virt_boundary_mask
= 0;
45 lim
->max_segment_size
= BLK_MAX_SEGMENT_SIZE
;
46 lim
->max_sectors
= lim
->max_hw_sectors
= BLK_SAFE_MAX_SECTORS
;
47 lim
->max_dev_sectors
= 0;
48 lim
->chunk_sectors
= 0;
49 lim
->max_write_same_sectors
= 0;
50 lim
->max_write_zeroes_sectors
= 0;
51 lim
->max_zone_append_sectors
= 0;
52 lim
->max_discard_sectors
= 0;
53 lim
->max_hw_discard_sectors
= 0;
54 lim
->discard_granularity
= 0;
55 lim
->discard_alignment
= 0;
56 lim
->discard_misaligned
= 0;
57 lim
->logical_block_size
= lim
->physical_block_size
= lim
->io_min
= 512;
58 lim
->bounce_pfn
= (unsigned long)(BLK_BOUNCE_ANY
>> PAGE_SHIFT
);
59 lim
->alignment_offset
= 0;
62 lim
->zoned
= BLK_ZONED_NONE
;
63 lim
->zone_write_granularity
= 0;
65 EXPORT_SYMBOL(blk_set_default_limits
);
68 * blk_set_stacking_limits - set default limits for stacking devices
69 * @lim: the queue_limits structure to reset
72 * Returns a queue_limit struct to its default state. Should be used
73 * by stacking drivers like DM that have no internal limits.
75 void blk_set_stacking_limits(struct queue_limits
*lim
)
77 blk_set_default_limits(lim
);
79 /* Inherit limits from component devices */
80 lim
->max_segments
= USHRT_MAX
;
81 lim
->max_discard_segments
= USHRT_MAX
;
82 lim
->max_hw_sectors
= UINT_MAX
;
83 lim
->max_segment_size
= UINT_MAX
;
84 lim
->max_sectors
= UINT_MAX
;
85 lim
->max_dev_sectors
= UINT_MAX
;
86 lim
->max_write_same_sectors
= UINT_MAX
;
87 lim
->max_write_zeroes_sectors
= UINT_MAX
;
88 lim
->max_zone_append_sectors
= UINT_MAX
;
90 EXPORT_SYMBOL(blk_set_stacking_limits
);
93 * blk_queue_bounce_limit - set bounce buffer limit for queue
94 * @q: the request queue for the device
95 * @max_addr: the maximum address the device can handle
98 * Different hardware can have different requirements as to what pages
99 * it can do I/O directly to. A low level driver can call
100 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
101 * buffers for doing I/O to pages residing above @max_addr.
103 void blk_queue_bounce_limit(struct request_queue
*q
, u64 max_addr
)
105 unsigned long b_pfn
= max_addr
>> PAGE_SHIFT
;
108 q
->bounce_gfp
= GFP_NOIO
;
109 #if BITS_PER_LONG == 64
111 * Assume anything <= 4GB can be handled by IOMMU. Actually
112 * some IOMMUs can handle everything, but I don't know of a
113 * way to test this here.
115 if (b_pfn
< (min_t(u64
, 0xffffffffUL
, BLK_BOUNCE_HIGH
) >> PAGE_SHIFT
))
117 q
->limits
.bounce_pfn
= max(max_low_pfn
, b_pfn
);
119 if (b_pfn
< blk_max_low_pfn
)
121 q
->limits
.bounce_pfn
= b_pfn
;
124 init_emergency_isa_pool();
125 q
->bounce_gfp
= GFP_NOIO
| GFP_DMA
;
126 q
->limits
.bounce_pfn
= b_pfn
;
129 EXPORT_SYMBOL(blk_queue_bounce_limit
);
132 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
133 * @q: the request queue for the device
134 * @max_hw_sectors: max hardware sectors in the usual 512b unit
137 * Enables a low level driver to set a hard upper limit,
138 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
139 * the device driver based upon the capabilities of the I/O
142 * max_dev_sectors is a hard limit imposed by the storage device for
143 * READ/WRITE requests. It is set by the disk driver.
145 * max_sectors is a soft limit imposed by the block layer for
146 * filesystem type requests. This value can be overridden on a
147 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
148 * The soft limit can not exceed max_hw_sectors.
150 void blk_queue_max_hw_sectors(struct request_queue
*q
, unsigned int max_hw_sectors
)
152 struct queue_limits
*limits
= &q
->limits
;
153 unsigned int max_sectors
;
155 if ((max_hw_sectors
<< 9) < PAGE_SIZE
) {
156 max_hw_sectors
= 1 << (PAGE_SHIFT
- 9);
157 printk(KERN_INFO
"%s: set to minimum %d\n",
158 __func__
, max_hw_sectors
);
161 max_hw_sectors
= round_down(max_hw_sectors
,
162 limits
->logical_block_size
>> SECTOR_SHIFT
);
163 limits
->max_hw_sectors
= max_hw_sectors
;
165 max_sectors
= min_not_zero(max_hw_sectors
, limits
->max_dev_sectors
);
166 max_sectors
= min_t(unsigned int, max_sectors
, BLK_DEF_MAX_SECTORS
);
167 max_sectors
= round_down(max_sectors
,
168 limits
->logical_block_size
>> SECTOR_SHIFT
);
169 limits
->max_sectors
= max_sectors
;
171 q
->backing_dev_info
->io_pages
= max_sectors
>> (PAGE_SHIFT
- 9);
173 EXPORT_SYMBOL(blk_queue_max_hw_sectors
);
176 * blk_queue_chunk_sectors - set size of the chunk for this queue
177 * @q: the request queue for the device
178 * @chunk_sectors: chunk sectors in the usual 512b unit
181 * If a driver doesn't want IOs to cross a given chunk size, it can set
182 * this limit and prevent merging across chunks. Note that the block layer
183 * must accept a page worth of data at any offset. So if the crossing of
184 * chunks is a hard limitation in the driver, it must still be prepared
185 * to split single page bios.
187 void blk_queue_chunk_sectors(struct request_queue
*q
, unsigned int chunk_sectors
)
189 q
->limits
.chunk_sectors
= chunk_sectors
;
191 EXPORT_SYMBOL(blk_queue_chunk_sectors
);
194 * blk_queue_max_discard_sectors - set max sectors for a single discard
195 * @q: the request queue for the device
196 * @max_discard_sectors: maximum number of sectors to discard
198 void blk_queue_max_discard_sectors(struct request_queue
*q
,
199 unsigned int max_discard_sectors
)
201 q
->limits
.max_hw_discard_sectors
= max_discard_sectors
;
202 q
->limits
.max_discard_sectors
= max_discard_sectors
;
204 EXPORT_SYMBOL(blk_queue_max_discard_sectors
);
207 * blk_queue_max_write_same_sectors - set max sectors for a single write same
208 * @q: the request queue for the device
209 * @max_write_same_sectors: maximum number of sectors to write per command
211 void blk_queue_max_write_same_sectors(struct request_queue
*q
,
212 unsigned int max_write_same_sectors
)
214 q
->limits
.max_write_same_sectors
= max_write_same_sectors
;
216 EXPORT_SYMBOL(blk_queue_max_write_same_sectors
);
219 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
221 * @q: the request queue for the device
222 * @max_write_zeroes_sectors: maximum number of sectors to write per command
224 void blk_queue_max_write_zeroes_sectors(struct request_queue
*q
,
225 unsigned int max_write_zeroes_sectors
)
227 q
->limits
.max_write_zeroes_sectors
= max_write_zeroes_sectors
;
229 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors
);
232 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
233 * @q: the request queue for the device
234 * @max_zone_append_sectors: maximum number of sectors to write per command
236 void blk_queue_max_zone_append_sectors(struct request_queue
*q
,
237 unsigned int max_zone_append_sectors
)
239 unsigned int max_sectors
;
241 if (WARN_ON(!blk_queue_is_zoned(q
)))
244 max_sectors
= min(q
->limits
.max_hw_sectors
, max_zone_append_sectors
);
245 max_sectors
= min(q
->limits
.chunk_sectors
, max_sectors
);
248 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
249 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
250 * or the max_hw_sectors limit not set.
252 WARN_ON(!max_sectors
);
254 q
->limits
.max_zone_append_sectors
= max_sectors
;
256 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors
);
259 * blk_queue_max_segments - set max hw segments for a request for this queue
260 * @q: the request queue for the device
261 * @max_segments: max number of segments
264 * Enables a low level driver to set an upper limit on the number of
265 * hw data segments in a request.
267 void blk_queue_max_segments(struct request_queue
*q
, unsigned short max_segments
)
271 printk(KERN_INFO
"%s: set to minimum %d\n",
272 __func__
, max_segments
);
275 q
->limits
.max_segments
= max_segments
;
277 EXPORT_SYMBOL(blk_queue_max_segments
);
280 * blk_queue_max_discard_segments - set max segments for discard requests
281 * @q: the request queue for the device
282 * @max_segments: max number of segments
285 * Enables a low level driver to set an upper limit on the number of
286 * segments in a discard request.
288 void blk_queue_max_discard_segments(struct request_queue
*q
,
289 unsigned short max_segments
)
291 q
->limits
.max_discard_segments
= max_segments
;
293 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments
);
296 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
297 * @q: the request queue for the device
298 * @max_size: max size of segment in bytes
301 * Enables a low level driver to set an upper limit on the size of a
304 void blk_queue_max_segment_size(struct request_queue
*q
, unsigned int max_size
)
306 if (max_size
< PAGE_SIZE
) {
307 max_size
= PAGE_SIZE
;
308 printk(KERN_INFO
"%s: set to minimum %d\n",
312 /* see blk_queue_virt_boundary() for the explanation */
313 WARN_ON_ONCE(q
->limits
.virt_boundary_mask
);
315 q
->limits
.max_segment_size
= max_size
;
317 EXPORT_SYMBOL(blk_queue_max_segment_size
);
320 * blk_queue_logical_block_size - set logical block size for the queue
321 * @q: the request queue for the device
322 * @size: the logical block size, in bytes
325 * This should be set to the lowest possible block size that the
326 * storage device can address. The default of 512 covers most
329 void blk_queue_logical_block_size(struct request_queue
*q
, unsigned int size
)
331 struct queue_limits
*limits
= &q
->limits
;
333 limits
->logical_block_size
= size
;
335 if (limits
->physical_block_size
< size
)
336 limits
->physical_block_size
= size
;
338 if (limits
->io_min
< limits
->physical_block_size
)
339 limits
->io_min
= limits
->physical_block_size
;
341 limits
->max_hw_sectors
=
342 round_down(limits
->max_hw_sectors
, size
>> SECTOR_SHIFT
);
343 limits
->max_sectors
=
344 round_down(limits
->max_sectors
, size
>> SECTOR_SHIFT
);
346 EXPORT_SYMBOL(blk_queue_logical_block_size
);
349 * blk_queue_physical_block_size - set physical block size for the queue
350 * @q: the request queue for the device
351 * @size: the physical block size, in bytes
354 * This should be set to the lowest possible sector size that the
355 * hardware can operate on without reverting to read-modify-write
358 void blk_queue_physical_block_size(struct request_queue
*q
, unsigned int size
)
360 q
->limits
.physical_block_size
= size
;
362 if (q
->limits
.physical_block_size
< q
->limits
.logical_block_size
)
363 q
->limits
.physical_block_size
= q
->limits
.logical_block_size
;
365 if (q
->limits
.io_min
< q
->limits
.physical_block_size
)
366 q
->limits
.io_min
= q
->limits
.physical_block_size
;
368 EXPORT_SYMBOL(blk_queue_physical_block_size
);
371 * blk_queue_zone_write_granularity - set zone write granularity for the queue
372 * @q: the request queue for the zoned device
373 * @size: the zone write granularity size, in bytes
376 * This should be set to the lowest possible size allowing to write in
377 * sequential zones of a zoned block device.
379 void blk_queue_zone_write_granularity(struct request_queue
*q
,
382 if (WARN_ON_ONCE(!blk_queue_is_zoned(q
)))
385 q
->limits
.zone_write_granularity
= size
;
387 if (q
->limits
.zone_write_granularity
< q
->limits
.logical_block_size
)
388 q
->limits
.zone_write_granularity
= q
->limits
.logical_block_size
;
390 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity
);
393 * blk_queue_alignment_offset - set physical block alignment offset
394 * @q: the request queue for the device
395 * @offset: alignment offset in bytes
398 * Some devices are naturally misaligned to compensate for things like
399 * the legacy DOS partition table 63-sector offset. Low-level drivers
400 * should call this function for devices whose first sector is not
403 void blk_queue_alignment_offset(struct request_queue
*q
, unsigned int offset
)
405 q
->limits
.alignment_offset
=
406 offset
& (q
->limits
.physical_block_size
- 1);
407 q
->limits
.misaligned
= 0;
409 EXPORT_SYMBOL(blk_queue_alignment_offset
);
411 void blk_queue_update_readahead(struct request_queue
*q
)
414 * For read-ahead of large files to be effective, we need to read ahead
415 * at least twice the optimal I/O size.
417 q
->backing_dev_info
->ra_pages
=
418 max(queue_io_opt(q
) * 2 / PAGE_SIZE
, VM_READAHEAD_PAGES
);
419 q
->backing_dev_info
->io_pages
=
420 queue_max_sectors(q
) >> (PAGE_SHIFT
- 9);
422 EXPORT_SYMBOL_GPL(blk_queue_update_readahead
);
425 * blk_limits_io_min - set minimum request size for a device
426 * @limits: the queue limits
427 * @min: smallest I/O size in bytes
430 * Some devices have an internal block size bigger than the reported
431 * hardware sector size. This function can be used to signal the
432 * smallest I/O the device can perform without incurring a performance
435 void blk_limits_io_min(struct queue_limits
*limits
, unsigned int min
)
437 limits
->io_min
= min
;
439 if (limits
->io_min
< limits
->logical_block_size
)
440 limits
->io_min
= limits
->logical_block_size
;
442 if (limits
->io_min
< limits
->physical_block_size
)
443 limits
->io_min
= limits
->physical_block_size
;
445 EXPORT_SYMBOL(blk_limits_io_min
);
448 * blk_queue_io_min - set minimum request size for the queue
449 * @q: the request queue for the device
450 * @min: smallest I/O size in bytes
453 * Storage devices may report a granularity or preferred minimum I/O
454 * size which is the smallest request the device can perform without
455 * incurring a performance penalty. For disk drives this is often the
456 * physical block size. For RAID arrays it is often the stripe chunk
457 * size. A properly aligned multiple of minimum_io_size is the
458 * preferred request size for workloads where a high number of I/O
459 * operations is desired.
461 void blk_queue_io_min(struct request_queue
*q
, unsigned int min
)
463 blk_limits_io_min(&q
->limits
, min
);
465 EXPORT_SYMBOL(blk_queue_io_min
);
468 * blk_limits_io_opt - set optimal request size for a device
469 * @limits: the queue limits
470 * @opt: smallest I/O size in bytes
473 * Storage devices may report an optimal I/O size, which is the
474 * device's preferred unit for sustained I/O. This is rarely reported
475 * for disk drives. For RAID arrays it is usually the stripe width or
476 * the internal track size. A properly aligned multiple of
477 * optimal_io_size is the preferred request size for workloads where
478 * sustained throughput is desired.
480 void blk_limits_io_opt(struct queue_limits
*limits
, unsigned int opt
)
482 limits
->io_opt
= opt
;
484 EXPORT_SYMBOL(blk_limits_io_opt
);
487 * blk_queue_io_opt - set optimal request size for the queue
488 * @q: the request queue for the device
489 * @opt: optimal request size in bytes
492 * Storage devices may report an optimal I/O size, which is the
493 * device's preferred unit for sustained I/O. This is rarely reported
494 * for disk drives. For RAID arrays it is usually the stripe width or
495 * the internal track size. A properly aligned multiple of
496 * optimal_io_size is the preferred request size for workloads where
497 * sustained throughput is desired.
499 void blk_queue_io_opt(struct request_queue
*q
, unsigned int opt
)
501 blk_limits_io_opt(&q
->limits
, opt
);
502 q
->backing_dev_info
->ra_pages
=
503 max(queue_io_opt(q
) * 2 / PAGE_SIZE
, VM_READAHEAD_PAGES
);
505 EXPORT_SYMBOL(blk_queue_io_opt
);
507 static unsigned int blk_round_down_sectors(unsigned int sectors
, unsigned int lbs
)
509 sectors
= round_down(sectors
, lbs
>> SECTOR_SHIFT
);
510 if (sectors
< PAGE_SIZE
>> SECTOR_SHIFT
)
511 sectors
= PAGE_SIZE
>> SECTOR_SHIFT
;
516 * blk_stack_limits - adjust queue_limits for stacked devices
517 * @t: the stacking driver limits (top device)
518 * @b: the underlying queue limits (bottom, component device)
519 * @start: first data sector within component device
522 * This function is used by stacking drivers like MD and DM to ensure
523 * that all component devices have compatible block sizes and
524 * alignments. The stacking driver must provide a queue_limits
525 * struct (top) and then iteratively call the stacking function for
526 * all component (bottom) devices. The stacking function will
527 * attempt to combine the values and ensure proper alignment.
529 * Returns 0 if the top and bottom queue_limits are compatible. The
530 * top device's block sizes and alignment offsets may be adjusted to
531 * ensure alignment with the bottom device. If no compatible sizes
532 * and alignments exist, -1 is returned and the resulting top
533 * queue_limits will have the misaligned flag set to indicate that
534 * the alignment_offset is undefined.
536 int blk_stack_limits(struct queue_limits
*t
, struct queue_limits
*b
,
539 unsigned int top
, bottom
, alignment
, ret
= 0;
541 t
->max_sectors
= min_not_zero(t
->max_sectors
, b
->max_sectors
);
542 t
->max_hw_sectors
= min_not_zero(t
->max_hw_sectors
, b
->max_hw_sectors
);
543 t
->max_dev_sectors
= min_not_zero(t
->max_dev_sectors
, b
->max_dev_sectors
);
544 t
->max_write_same_sectors
= min(t
->max_write_same_sectors
,
545 b
->max_write_same_sectors
);
546 t
->max_write_zeroes_sectors
= min(t
->max_write_zeroes_sectors
,
547 b
->max_write_zeroes_sectors
);
548 t
->max_zone_append_sectors
= min(t
->max_zone_append_sectors
,
549 b
->max_zone_append_sectors
);
550 t
->bounce_pfn
= min_not_zero(t
->bounce_pfn
, b
->bounce_pfn
);
552 t
->seg_boundary_mask
= min_not_zero(t
->seg_boundary_mask
,
553 b
->seg_boundary_mask
);
554 t
->virt_boundary_mask
= min_not_zero(t
->virt_boundary_mask
,
555 b
->virt_boundary_mask
);
557 t
->max_segments
= min_not_zero(t
->max_segments
, b
->max_segments
);
558 t
->max_discard_segments
= min_not_zero(t
->max_discard_segments
,
559 b
->max_discard_segments
);
560 t
->max_integrity_segments
= min_not_zero(t
->max_integrity_segments
,
561 b
->max_integrity_segments
);
563 t
->max_segment_size
= min_not_zero(t
->max_segment_size
,
564 b
->max_segment_size
);
566 t
->misaligned
|= b
->misaligned
;
568 alignment
= queue_limit_alignment_offset(b
, start
);
570 /* Bottom device has different alignment. Check that it is
571 * compatible with the current top alignment.
573 if (t
->alignment_offset
!= alignment
) {
575 top
= max(t
->physical_block_size
, t
->io_min
)
576 + t
->alignment_offset
;
577 bottom
= max(b
->physical_block_size
, b
->io_min
) + alignment
;
579 /* Verify that top and bottom intervals line up */
580 if (max(top
, bottom
) % min(top
, bottom
)) {
586 t
->logical_block_size
= max(t
->logical_block_size
,
587 b
->logical_block_size
);
589 t
->physical_block_size
= max(t
->physical_block_size
,
590 b
->physical_block_size
);
592 t
->io_min
= max(t
->io_min
, b
->io_min
);
593 t
->io_opt
= lcm_not_zero(t
->io_opt
, b
->io_opt
);
595 /* Set non-power-of-2 compatible chunk_sectors boundary */
596 if (b
->chunk_sectors
)
597 t
->chunk_sectors
= gcd(t
->chunk_sectors
, b
->chunk_sectors
);
599 /* Physical block size a multiple of the logical block size? */
600 if (t
->physical_block_size
& (t
->logical_block_size
- 1)) {
601 t
->physical_block_size
= t
->logical_block_size
;
606 /* Minimum I/O a multiple of the physical block size? */
607 if (t
->io_min
& (t
->physical_block_size
- 1)) {
608 t
->io_min
= t
->physical_block_size
;
613 /* Optimal I/O a multiple of the physical block size? */
614 if (t
->io_opt
& (t
->physical_block_size
- 1)) {
620 /* chunk_sectors a multiple of the physical block size? */
621 if ((t
->chunk_sectors
<< 9) & (t
->physical_block_size
- 1)) {
622 t
->chunk_sectors
= 0;
627 t
->raid_partial_stripes_expensive
=
628 max(t
->raid_partial_stripes_expensive
,
629 b
->raid_partial_stripes_expensive
);
631 /* Find lowest common alignment_offset */
632 t
->alignment_offset
= lcm_not_zero(t
->alignment_offset
, alignment
)
633 % max(t
->physical_block_size
, t
->io_min
);
635 /* Verify that new alignment_offset is on a logical block boundary */
636 if (t
->alignment_offset
& (t
->logical_block_size
- 1)) {
641 t
->max_sectors
= blk_round_down_sectors(t
->max_sectors
, t
->logical_block_size
);
642 t
->max_hw_sectors
= blk_round_down_sectors(t
->max_hw_sectors
, t
->logical_block_size
);
643 t
->max_dev_sectors
= blk_round_down_sectors(t
->max_dev_sectors
, t
->logical_block_size
);
645 /* Discard alignment and granularity */
646 if (b
->discard_granularity
) {
647 alignment
= queue_limit_discard_alignment(b
, start
);
649 if (t
->discard_granularity
!= 0 &&
650 t
->discard_alignment
!= alignment
) {
651 top
= t
->discard_granularity
+ t
->discard_alignment
;
652 bottom
= b
->discard_granularity
+ alignment
;
654 /* Verify that top and bottom intervals line up */
655 if ((max(top
, bottom
) % min(top
, bottom
)) != 0)
656 t
->discard_misaligned
= 1;
659 t
->max_discard_sectors
= min_not_zero(t
->max_discard_sectors
,
660 b
->max_discard_sectors
);
661 t
->max_hw_discard_sectors
= min_not_zero(t
->max_hw_discard_sectors
,
662 b
->max_hw_discard_sectors
);
663 t
->discard_granularity
= max(t
->discard_granularity
,
664 b
->discard_granularity
);
665 t
->discard_alignment
= lcm_not_zero(t
->discard_alignment
, alignment
) %
666 t
->discard_granularity
;
669 t
->zone_write_granularity
= max(t
->zone_write_granularity
,
670 b
->zone_write_granularity
);
671 t
->zoned
= max(t
->zoned
, b
->zoned
);
674 EXPORT_SYMBOL(blk_stack_limits
);
677 * disk_stack_limits - adjust queue limits for stacked drivers
678 * @disk: MD/DM gendisk (top)
679 * @bdev: the underlying block device (bottom)
680 * @offset: offset to beginning of data within component device
683 * Merges the limits for a top level gendisk and a bottom level
686 void disk_stack_limits(struct gendisk
*disk
, struct block_device
*bdev
,
689 struct request_queue
*t
= disk
->queue
;
691 if (blk_stack_limits(&t
->limits
, &bdev_get_queue(bdev
)->limits
,
692 get_start_sect(bdev
) + (offset
>> 9)) < 0) {
693 char top
[BDEVNAME_SIZE
], bottom
[BDEVNAME_SIZE
];
695 disk_name(disk
, 0, top
);
696 bdevname(bdev
, bottom
);
698 printk(KERN_NOTICE
"%s: Warning: Device %s is misaligned\n",
702 blk_queue_update_readahead(disk
->queue
);
704 EXPORT_SYMBOL(disk_stack_limits
);
707 * blk_queue_update_dma_pad - update pad mask
708 * @q: the request queue for the device
711 * Update dma pad mask.
713 * Appending pad buffer to a request modifies the last entry of a
714 * scatter list such that it includes the pad buffer.
716 void blk_queue_update_dma_pad(struct request_queue
*q
, unsigned int mask
)
718 if (mask
> q
->dma_pad_mask
)
719 q
->dma_pad_mask
= mask
;
721 EXPORT_SYMBOL(blk_queue_update_dma_pad
);
724 * blk_queue_segment_boundary - set boundary rules for segment merging
725 * @q: the request queue for the device
726 * @mask: the memory boundary mask
728 void blk_queue_segment_boundary(struct request_queue
*q
, unsigned long mask
)
730 if (mask
< PAGE_SIZE
- 1) {
731 mask
= PAGE_SIZE
- 1;
732 printk(KERN_INFO
"%s: set to minimum %lx\n",
736 q
->limits
.seg_boundary_mask
= mask
;
738 EXPORT_SYMBOL(blk_queue_segment_boundary
);
741 * blk_queue_virt_boundary - set boundary rules for bio merging
742 * @q: the request queue for the device
743 * @mask: the memory boundary mask
745 void blk_queue_virt_boundary(struct request_queue
*q
, unsigned long mask
)
747 q
->limits
.virt_boundary_mask
= mask
;
750 * Devices that require a virtual boundary do not support scatter/gather
751 * I/O natively, but instead require a descriptor list entry for each
752 * page (which might not be idential to the Linux PAGE_SIZE). Because
753 * of that they are not limited by our notion of "segment size".
756 q
->limits
.max_segment_size
= UINT_MAX
;
758 EXPORT_SYMBOL(blk_queue_virt_boundary
);
761 * blk_queue_dma_alignment - set dma length and memory alignment
762 * @q: the request queue for the device
763 * @mask: alignment mask
766 * set required memory and length alignment for direct dma transactions.
767 * this is used when building direct io requests for the queue.
770 void blk_queue_dma_alignment(struct request_queue
*q
, int mask
)
772 q
->dma_alignment
= mask
;
774 EXPORT_SYMBOL(blk_queue_dma_alignment
);
777 * blk_queue_update_dma_alignment - update dma length and memory alignment
778 * @q: the request queue for the device
779 * @mask: alignment mask
782 * update required memory and length alignment for direct dma transactions.
783 * If the requested alignment is larger than the current alignment, then
784 * the current queue alignment is updated to the new value, otherwise it
785 * is left alone. The design of this is to allow multiple objects
786 * (driver, device, transport etc) to set their respective
787 * alignments without having them interfere.
790 void blk_queue_update_dma_alignment(struct request_queue
*q
, int mask
)
792 BUG_ON(mask
> PAGE_SIZE
);
794 if (mask
> q
->dma_alignment
)
795 q
->dma_alignment
= mask
;
797 EXPORT_SYMBOL(blk_queue_update_dma_alignment
);
800 * blk_set_queue_depth - tell the block layer about the device queue depth
801 * @q: the request queue for the device
802 * @depth: queue depth
805 void blk_set_queue_depth(struct request_queue
*q
, unsigned int depth
)
807 q
->queue_depth
= depth
;
808 rq_qos_queue_depth_changed(q
);
810 EXPORT_SYMBOL(blk_set_queue_depth
);
813 * blk_queue_write_cache - configure queue's write cache
814 * @q: the request queue for the device
815 * @wc: write back cache on or off
816 * @fua: device supports FUA writes, if true
818 * Tell the block layer about the write cache of @q.
820 void blk_queue_write_cache(struct request_queue
*q
, bool wc
, bool fua
)
823 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
825 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
827 blk_queue_flag_set(QUEUE_FLAG_FUA
, q
);
829 blk_queue_flag_clear(QUEUE_FLAG_FUA
, q
);
831 wbt_set_write_cache(q
, test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
));
833 EXPORT_SYMBOL_GPL(blk_queue_write_cache
);
836 * blk_queue_required_elevator_features - Set a queue required elevator features
837 * @q: the request queue for the target device
838 * @features: Required elevator features OR'ed together
840 * Tell the block layer that for the device controlled through @q, only the
841 * only elevators that can be used are those that implement at least the set of
842 * features specified by @features.
844 void blk_queue_required_elevator_features(struct request_queue
*q
,
845 unsigned int features
)
847 q
->required_elevator_features
= features
;
849 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features
);
852 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
853 * @q: the request queue for the device
854 * @dev: the device pointer for dma
856 * Tell the block layer about merging the segments by dma map of @q.
858 bool blk_queue_can_use_dma_map_merging(struct request_queue
*q
,
861 unsigned long boundary
= dma_get_merge_boundary(dev
);
866 /* No need to update max_segment_size. see blk_queue_virt_boundary() */
867 blk_queue_virt_boundary(q
, boundary
);
871 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging
);
874 * blk_queue_set_zoned - configure a disk queue zoned model.
875 * @disk: the gendisk of the queue to configure
876 * @model: the zoned model to set
878 * Set the zoned model of the request queue of @disk according to @model.
879 * When @model is BLK_ZONED_HM (host managed), this should be called only
880 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
881 * If @model specifies BLK_ZONED_HA (host aware), the effective model used
882 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
885 void blk_queue_set_zoned(struct gendisk
*disk
, enum blk_zoned_model model
)
887 struct request_queue
*q
= disk
->queue
;
892 * Host managed devices are supported only if
893 * CONFIG_BLK_DEV_ZONED is enabled.
895 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED
));
899 * Host aware devices can be treated either as regular block
900 * devices (similar to drive managed devices) or as zoned block
901 * devices to take advantage of the zone command set, similarly
902 * to host managed devices. We try the latter if there are no
903 * partitions and zoned block device support is enabled, else
904 * we do nothing special as far as the block layer is concerned.
906 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
) ||
907 !xa_empty(&disk
->part_tbl
))
908 model
= BLK_ZONED_NONE
;
912 if (WARN_ON_ONCE(model
!= BLK_ZONED_NONE
))
913 model
= BLK_ZONED_NONE
;
917 q
->limits
.zoned
= model
;
918 if (model
!= BLK_ZONED_NONE
) {
920 * Set the zone write granularity to the device logical block
921 * size by default. The driver can change this value if needed.
923 blk_queue_zone_write_granularity(q
,
924 queue_logical_block_size(q
));
926 blk_queue_clear_zone_settings(q
);
929 EXPORT_SYMBOL_GPL(blk_queue_set_zoned
);
931 static int __init
blk_settings_init(void)
933 blk_max_low_pfn
= max_low_pfn
- 1;
934 blk_max_pfn
= max_pfn
- 1;
937 subsys_initcall(blk_settings_init
);