]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-settings.c
Merge tag 'libnvdimm-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[mirror_ubuntu-jammy-kernel.git] / block / blk-settings.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
11 #include <linux/gcd.h>
12 #include <linux/lcm.h>
13 #include <linux/jiffies.h>
14 #include <linux/gfp.h>
15 #include <linux/dma-mapping.h>
16
17 #include "blk.h"
18 #include "blk-wbt.h"
19
20 unsigned long blk_max_low_pfn;
21 EXPORT_SYMBOL(blk_max_low_pfn);
22
23 unsigned long blk_max_pfn;
24
25 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
26 {
27 q->rq_timeout = timeout;
28 }
29 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
30
31 /**
32 * blk_set_default_limits - reset limits to default values
33 * @lim: the queue_limits structure to reset
34 *
35 * Description:
36 * Returns a queue_limit struct to its default state.
37 */
38 void blk_set_default_limits(struct queue_limits *lim)
39 {
40 lim->max_segments = BLK_MAX_SEGMENTS;
41 lim->max_discard_segments = 1;
42 lim->max_integrity_segments = 0;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 lim->virt_boundary_mask = 0;
45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
47 lim->max_dev_sectors = 0;
48 lim->chunk_sectors = 0;
49 lim->max_write_same_sectors = 0;
50 lim->max_write_zeroes_sectors = 0;
51 lim->max_zone_append_sectors = 0;
52 lim->max_discard_sectors = 0;
53 lim->max_hw_discard_sectors = 0;
54 lim->discard_granularity = 0;
55 lim->discard_alignment = 0;
56 lim->discard_misaligned = 0;
57 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
58 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
59 lim->alignment_offset = 0;
60 lim->io_opt = 0;
61 lim->misaligned = 0;
62 lim->zoned = BLK_ZONED_NONE;
63 lim->zone_write_granularity = 0;
64 }
65 EXPORT_SYMBOL(blk_set_default_limits);
66
67 /**
68 * blk_set_stacking_limits - set default limits for stacking devices
69 * @lim: the queue_limits structure to reset
70 *
71 * Description:
72 * Returns a queue_limit struct to its default state. Should be used
73 * by stacking drivers like DM that have no internal limits.
74 */
75 void blk_set_stacking_limits(struct queue_limits *lim)
76 {
77 blk_set_default_limits(lim);
78
79 /* Inherit limits from component devices */
80 lim->max_segments = USHRT_MAX;
81 lim->max_discard_segments = USHRT_MAX;
82 lim->max_hw_sectors = UINT_MAX;
83 lim->max_segment_size = UINT_MAX;
84 lim->max_sectors = UINT_MAX;
85 lim->max_dev_sectors = UINT_MAX;
86 lim->max_write_same_sectors = UINT_MAX;
87 lim->max_write_zeroes_sectors = UINT_MAX;
88 lim->max_zone_append_sectors = UINT_MAX;
89 }
90 EXPORT_SYMBOL(blk_set_stacking_limits);
91
92 /**
93 * blk_queue_bounce_limit - set bounce buffer limit for queue
94 * @q: the request queue for the device
95 * @max_addr: the maximum address the device can handle
96 *
97 * Description:
98 * Different hardware can have different requirements as to what pages
99 * it can do I/O directly to. A low level driver can call
100 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
101 * buffers for doing I/O to pages residing above @max_addr.
102 **/
103 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
104 {
105 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
106 int dma = 0;
107
108 q->bounce_gfp = GFP_NOIO;
109 #if BITS_PER_LONG == 64
110 /*
111 * Assume anything <= 4GB can be handled by IOMMU. Actually
112 * some IOMMUs can handle everything, but I don't know of a
113 * way to test this here.
114 */
115 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
116 dma = 1;
117 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
118 #else
119 if (b_pfn < blk_max_low_pfn)
120 dma = 1;
121 q->limits.bounce_pfn = b_pfn;
122 #endif
123 if (dma) {
124 init_emergency_isa_pool();
125 q->bounce_gfp = GFP_NOIO | GFP_DMA;
126 q->limits.bounce_pfn = b_pfn;
127 }
128 }
129 EXPORT_SYMBOL(blk_queue_bounce_limit);
130
131 /**
132 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
133 * @q: the request queue for the device
134 * @max_hw_sectors: max hardware sectors in the usual 512b unit
135 *
136 * Description:
137 * Enables a low level driver to set a hard upper limit,
138 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
139 * the device driver based upon the capabilities of the I/O
140 * controller.
141 *
142 * max_dev_sectors is a hard limit imposed by the storage device for
143 * READ/WRITE requests. It is set by the disk driver.
144 *
145 * max_sectors is a soft limit imposed by the block layer for
146 * filesystem type requests. This value can be overridden on a
147 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
148 * The soft limit can not exceed max_hw_sectors.
149 **/
150 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
151 {
152 struct queue_limits *limits = &q->limits;
153 unsigned int max_sectors;
154
155 if ((max_hw_sectors << 9) < PAGE_SIZE) {
156 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
157 printk(KERN_INFO "%s: set to minimum %d\n",
158 __func__, max_hw_sectors);
159 }
160
161 max_hw_sectors = round_down(max_hw_sectors,
162 limits->logical_block_size >> SECTOR_SHIFT);
163 limits->max_hw_sectors = max_hw_sectors;
164
165 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
166 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
167 max_sectors = round_down(max_sectors,
168 limits->logical_block_size >> SECTOR_SHIFT);
169 limits->max_sectors = max_sectors;
170
171 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
172 }
173 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
174
175 /**
176 * blk_queue_chunk_sectors - set size of the chunk for this queue
177 * @q: the request queue for the device
178 * @chunk_sectors: chunk sectors in the usual 512b unit
179 *
180 * Description:
181 * If a driver doesn't want IOs to cross a given chunk size, it can set
182 * this limit and prevent merging across chunks. Note that the block layer
183 * must accept a page worth of data at any offset. So if the crossing of
184 * chunks is a hard limitation in the driver, it must still be prepared
185 * to split single page bios.
186 **/
187 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
188 {
189 q->limits.chunk_sectors = chunk_sectors;
190 }
191 EXPORT_SYMBOL(blk_queue_chunk_sectors);
192
193 /**
194 * blk_queue_max_discard_sectors - set max sectors for a single discard
195 * @q: the request queue for the device
196 * @max_discard_sectors: maximum number of sectors to discard
197 **/
198 void blk_queue_max_discard_sectors(struct request_queue *q,
199 unsigned int max_discard_sectors)
200 {
201 q->limits.max_hw_discard_sectors = max_discard_sectors;
202 q->limits.max_discard_sectors = max_discard_sectors;
203 }
204 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
205
206 /**
207 * blk_queue_max_write_same_sectors - set max sectors for a single write same
208 * @q: the request queue for the device
209 * @max_write_same_sectors: maximum number of sectors to write per command
210 **/
211 void blk_queue_max_write_same_sectors(struct request_queue *q,
212 unsigned int max_write_same_sectors)
213 {
214 q->limits.max_write_same_sectors = max_write_same_sectors;
215 }
216 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
217
218 /**
219 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
220 * write zeroes
221 * @q: the request queue for the device
222 * @max_write_zeroes_sectors: maximum number of sectors to write per command
223 **/
224 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
225 unsigned int max_write_zeroes_sectors)
226 {
227 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
228 }
229 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
230
231 /**
232 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
233 * @q: the request queue for the device
234 * @max_zone_append_sectors: maximum number of sectors to write per command
235 **/
236 void blk_queue_max_zone_append_sectors(struct request_queue *q,
237 unsigned int max_zone_append_sectors)
238 {
239 unsigned int max_sectors;
240
241 if (WARN_ON(!blk_queue_is_zoned(q)))
242 return;
243
244 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
245 max_sectors = min(q->limits.chunk_sectors, max_sectors);
246
247 /*
248 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
249 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
250 * or the max_hw_sectors limit not set.
251 */
252 WARN_ON(!max_sectors);
253
254 q->limits.max_zone_append_sectors = max_sectors;
255 }
256 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
257
258 /**
259 * blk_queue_max_segments - set max hw segments for a request for this queue
260 * @q: the request queue for the device
261 * @max_segments: max number of segments
262 *
263 * Description:
264 * Enables a low level driver to set an upper limit on the number of
265 * hw data segments in a request.
266 **/
267 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
268 {
269 if (!max_segments) {
270 max_segments = 1;
271 printk(KERN_INFO "%s: set to minimum %d\n",
272 __func__, max_segments);
273 }
274
275 q->limits.max_segments = max_segments;
276 }
277 EXPORT_SYMBOL(blk_queue_max_segments);
278
279 /**
280 * blk_queue_max_discard_segments - set max segments for discard requests
281 * @q: the request queue for the device
282 * @max_segments: max number of segments
283 *
284 * Description:
285 * Enables a low level driver to set an upper limit on the number of
286 * segments in a discard request.
287 **/
288 void blk_queue_max_discard_segments(struct request_queue *q,
289 unsigned short max_segments)
290 {
291 q->limits.max_discard_segments = max_segments;
292 }
293 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
294
295 /**
296 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
297 * @q: the request queue for the device
298 * @max_size: max size of segment in bytes
299 *
300 * Description:
301 * Enables a low level driver to set an upper limit on the size of a
302 * coalesced segment
303 **/
304 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
305 {
306 if (max_size < PAGE_SIZE) {
307 max_size = PAGE_SIZE;
308 printk(KERN_INFO "%s: set to minimum %d\n",
309 __func__, max_size);
310 }
311
312 /* see blk_queue_virt_boundary() for the explanation */
313 WARN_ON_ONCE(q->limits.virt_boundary_mask);
314
315 q->limits.max_segment_size = max_size;
316 }
317 EXPORT_SYMBOL(blk_queue_max_segment_size);
318
319 /**
320 * blk_queue_logical_block_size - set logical block size for the queue
321 * @q: the request queue for the device
322 * @size: the logical block size, in bytes
323 *
324 * Description:
325 * This should be set to the lowest possible block size that the
326 * storage device can address. The default of 512 covers most
327 * hardware.
328 **/
329 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
330 {
331 struct queue_limits *limits = &q->limits;
332
333 limits->logical_block_size = size;
334
335 if (limits->physical_block_size < size)
336 limits->physical_block_size = size;
337
338 if (limits->io_min < limits->physical_block_size)
339 limits->io_min = limits->physical_block_size;
340
341 limits->max_hw_sectors =
342 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
343 limits->max_sectors =
344 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
345 }
346 EXPORT_SYMBOL(blk_queue_logical_block_size);
347
348 /**
349 * blk_queue_physical_block_size - set physical block size for the queue
350 * @q: the request queue for the device
351 * @size: the physical block size, in bytes
352 *
353 * Description:
354 * This should be set to the lowest possible sector size that the
355 * hardware can operate on without reverting to read-modify-write
356 * operations.
357 */
358 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
359 {
360 q->limits.physical_block_size = size;
361
362 if (q->limits.physical_block_size < q->limits.logical_block_size)
363 q->limits.physical_block_size = q->limits.logical_block_size;
364
365 if (q->limits.io_min < q->limits.physical_block_size)
366 q->limits.io_min = q->limits.physical_block_size;
367 }
368 EXPORT_SYMBOL(blk_queue_physical_block_size);
369
370 /**
371 * blk_queue_zone_write_granularity - set zone write granularity for the queue
372 * @q: the request queue for the zoned device
373 * @size: the zone write granularity size, in bytes
374 *
375 * Description:
376 * This should be set to the lowest possible size allowing to write in
377 * sequential zones of a zoned block device.
378 */
379 void blk_queue_zone_write_granularity(struct request_queue *q,
380 unsigned int size)
381 {
382 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
383 return;
384
385 q->limits.zone_write_granularity = size;
386
387 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
388 q->limits.zone_write_granularity = q->limits.logical_block_size;
389 }
390 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
391
392 /**
393 * blk_queue_alignment_offset - set physical block alignment offset
394 * @q: the request queue for the device
395 * @offset: alignment offset in bytes
396 *
397 * Description:
398 * Some devices are naturally misaligned to compensate for things like
399 * the legacy DOS partition table 63-sector offset. Low-level drivers
400 * should call this function for devices whose first sector is not
401 * naturally aligned.
402 */
403 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
404 {
405 q->limits.alignment_offset =
406 offset & (q->limits.physical_block_size - 1);
407 q->limits.misaligned = 0;
408 }
409 EXPORT_SYMBOL(blk_queue_alignment_offset);
410
411 void blk_queue_update_readahead(struct request_queue *q)
412 {
413 /*
414 * For read-ahead of large files to be effective, we need to read ahead
415 * at least twice the optimal I/O size.
416 */
417 q->backing_dev_info->ra_pages =
418 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
419 q->backing_dev_info->io_pages =
420 queue_max_sectors(q) >> (PAGE_SHIFT - 9);
421 }
422 EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
423
424 /**
425 * blk_limits_io_min - set minimum request size for a device
426 * @limits: the queue limits
427 * @min: smallest I/O size in bytes
428 *
429 * Description:
430 * Some devices have an internal block size bigger than the reported
431 * hardware sector size. This function can be used to signal the
432 * smallest I/O the device can perform without incurring a performance
433 * penalty.
434 */
435 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
436 {
437 limits->io_min = min;
438
439 if (limits->io_min < limits->logical_block_size)
440 limits->io_min = limits->logical_block_size;
441
442 if (limits->io_min < limits->physical_block_size)
443 limits->io_min = limits->physical_block_size;
444 }
445 EXPORT_SYMBOL(blk_limits_io_min);
446
447 /**
448 * blk_queue_io_min - set minimum request size for the queue
449 * @q: the request queue for the device
450 * @min: smallest I/O size in bytes
451 *
452 * Description:
453 * Storage devices may report a granularity or preferred minimum I/O
454 * size which is the smallest request the device can perform without
455 * incurring a performance penalty. For disk drives this is often the
456 * physical block size. For RAID arrays it is often the stripe chunk
457 * size. A properly aligned multiple of minimum_io_size is the
458 * preferred request size for workloads where a high number of I/O
459 * operations is desired.
460 */
461 void blk_queue_io_min(struct request_queue *q, unsigned int min)
462 {
463 blk_limits_io_min(&q->limits, min);
464 }
465 EXPORT_SYMBOL(blk_queue_io_min);
466
467 /**
468 * blk_limits_io_opt - set optimal request size for a device
469 * @limits: the queue limits
470 * @opt: smallest I/O size in bytes
471 *
472 * Description:
473 * Storage devices may report an optimal I/O size, which is the
474 * device's preferred unit for sustained I/O. This is rarely reported
475 * for disk drives. For RAID arrays it is usually the stripe width or
476 * the internal track size. A properly aligned multiple of
477 * optimal_io_size is the preferred request size for workloads where
478 * sustained throughput is desired.
479 */
480 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
481 {
482 limits->io_opt = opt;
483 }
484 EXPORT_SYMBOL(blk_limits_io_opt);
485
486 /**
487 * blk_queue_io_opt - set optimal request size for the queue
488 * @q: the request queue for the device
489 * @opt: optimal request size in bytes
490 *
491 * Description:
492 * Storage devices may report an optimal I/O size, which is the
493 * device's preferred unit for sustained I/O. This is rarely reported
494 * for disk drives. For RAID arrays it is usually the stripe width or
495 * the internal track size. A properly aligned multiple of
496 * optimal_io_size is the preferred request size for workloads where
497 * sustained throughput is desired.
498 */
499 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
500 {
501 blk_limits_io_opt(&q->limits, opt);
502 q->backing_dev_info->ra_pages =
503 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
504 }
505 EXPORT_SYMBOL(blk_queue_io_opt);
506
507 /**
508 * blk_stack_limits - adjust queue_limits for stacked devices
509 * @t: the stacking driver limits (top device)
510 * @b: the underlying queue limits (bottom, component device)
511 * @start: first data sector within component device
512 *
513 * Description:
514 * This function is used by stacking drivers like MD and DM to ensure
515 * that all component devices have compatible block sizes and
516 * alignments. The stacking driver must provide a queue_limits
517 * struct (top) and then iteratively call the stacking function for
518 * all component (bottom) devices. The stacking function will
519 * attempt to combine the values and ensure proper alignment.
520 *
521 * Returns 0 if the top and bottom queue_limits are compatible. The
522 * top device's block sizes and alignment offsets may be adjusted to
523 * ensure alignment with the bottom device. If no compatible sizes
524 * and alignments exist, -1 is returned and the resulting top
525 * queue_limits will have the misaligned flag set to indicate that
526 * the alignment_offset is undefined.
527 */
528 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
529 sector_t start)
530 {
531 unsigned int top, bottom, alignment, ret = 0;
532
533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
535 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
536 t->max_write_same_sectors = min(t->max_write_same_sectors,
537 b->max_write_same_sectors);
538 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
539 b->max_write_zeroes_sectors);
540 t->max_zone_append_sectors = min(t->max_zone_append_sectors,
541 b->max_zone_append_sectors);
542 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
543
544 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
545 b->seg_boundary_mask);
546 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
547 b->virt_boundary_mask);
548
549 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
550 t->max_discard_segments = min_not_zero(t->max_discard_segments,
551 b->max_discard_segments);
552 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
553 b->max_integrity_segments);
554
555 t->max_segment_size = min_not_zero(t->max_segment_size,
556 b->max_segment_size);
557
558 t->misaligned |= b->misaligned;
559
560 alignment = queue_limit_alignment_offset(b, start);
561
562 /* Bottom device has different alignment. Check that it is
563 * compatible with the current top alignment.
564 */
565 if (t->alignment_offset != alignment) {
566
567 top = max(t->physical_block_size, t->io_min)
568 + t->alignment_offset;
569 bottom = max(b->physical_block_size, b->io_min) + alignment;
570
571 /* Verify that top and bottom intervals line up */
572 if (max(top, bottom) % min(top, bottom)) {
573 t->misaligned = 1;
574 ret = -1;
575 }
576 }
577
578 t->logical_block_size = max(t->logical_block_size,
579 b->logical_block_size);
580
581 t->physical_block_size = max(t->physical_block_size,
582 b->physical_block_size);
583
584 t->io_min = max(t->io_min, b->io_min);
585 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
586
587 /* Set non-power-of-2 compatible chunk_sectors boundary */
588 if (b->chunk_sectors)
589 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
590
591 /* Physical block size a multiple of the logical block size? */
592 if (t->physical_block_size & (t->logical_block_size - 1)) {
593 t->physical_block_size = t->logical_block_size;
594 t->misaligned = 1;
595 ret = -1;
596 }
597
598 /* Minimum I/O a multiple of the physical block size? */
599 if (t->io_min & (t->physical_block_size - 1)) {
600 t->io_min = t->physical_block_size;
601 t->misaligned = 1;
602 ret = -1;
603 }
604
605 /* Optimal I/O a multiple of the physical block size? */
606 if (t->io_opt & (t->physical_block_size - 1)) {
607 t->io_opt = 0;
608 t->misaligned = 1;
609 ret = -1;
610 }
611
612 /* chunk_sectors a multiple of the physical block size? */
613 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
614 t->chunk_sectors = 0;
615 t->misaligned = 1;
616 ret = -1;
617 }
618
619 t->raid_partial_stripes_expensive =
620 max(t->raid_partial_stripes_expensive,
621 b->raid_partial_stripes_expensive);
622
623 /* Find lowest common alignment_offset */
624 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
625 % max(t->physical_block_size, t->io_min);
626
627 /* Verify that new alignment_offset is on a logical block boundary */
628 if (t->alignment_offset & (t->logical_block_size - 1)) {
629 t->misaligned = 1;
630 ret = -1;
631 }
632
633 /* Discard alignment and granularity */
634 if (b->discard_granularity) {
635 alignment = queue_limit_discard_alignment(b, start);
636
637 if (t->discard_granularity != 0 &&
638 t->discard_alignment != alignment) {
639 top = t->discard_granularity + t->discard_alignment;
640 bottom = b->discard_granularity + alignment;
641
642 /* Verify that top and bottom intervals line up */
643 if ((max(top, bottom) % min(top, bottom)) != 0)
644 t->discard_misaligned = 1;
645 }
646
647 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
648 b->max_discard_sectors);
649 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
650 b->max_hw_discard_sectors);
651 t->discard_granularity = max(t->discard_granularity,
652 b->discard_granularity);
653 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
654 t->discard_granularity;
655 }
656
657 t->zone_write_granularity = max(t->zone_write_granularity,
658 b->zone_write_granularity);
659 t->zoned = max(t->zoned, b->zoned);
660 return ret;
661 }
662 EXPORT_SYMBOL(blk_stack_limits);
663
664 /**
665 * disk_stack_limits - adjust queue limits for stacked drivers
666 * @disk: MD/DM gendisk (top)
667 * @bdev: the underlying block device (bottom)
668 * @offset: offset to beginning of data within component device
669 *
670 * Description:
671 * Merges the limits for a top level gendisk and a bottom level
672 * block_device.
673 */
674 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
675 sector_t offset)
676 {
677 struct request_queue *t = disk->queue;
678
679 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
680 get_start_sect(bdev) + (offset >> 9)) < 0) {
681 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
682
683 disk_name(disk, 0, top);
684 bdevname(bdev, bottom);
685
686 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
687 top, bottom);
688 }
689
690 blk_queue_update_readahead(disk->queue);
691 }
692 EXPORT_SYMBOL(disk_stack_limits);
693
694 /**
695 * blk_queue_update_dma_pad - update pad mask
696 * @q: the request queue for the device
697 * @mask: pad mask
698 *
699 * Update dma pad mask.
700 *
701 * Appending pad buffer to a request modifies the last entry of a
702 * scatter list such that it includes the pad buffer.
703 **/
704 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
705 {
706 if (mask > q->dma_pad_mask)
707 q->dma_pad_mask = mask;
708 }
709 EXPORT_SYMBOL(blk_queue_update_dma_pad);
710
711 /**
712 * blk_queue_segment_boundary - set boundary rules for segment merging
713 * @q: the request queue for the device
714 * @mask: the memory boundary mask
715 **/
716 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
717 {
718 if (mask < PAGE_SIZE - 1) {
719 mask = PAGE_SIZE - 1;
720 printk(KERN_INFO "%s: set to minimum %lx\n",
721 __func__, mask);
722 }
723
724 q->limits.seg_boundary_mask = mask;
725 }
726 EXPORT_SYMBOL(blk_queue_segment_boundary);
727
728 /**
729 * blk_queue_virt_boundary - set boundary rules for bio merging
730 * @q: the request queue for the device
731 * @mask: the memory boundary mask
732 **/
733 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
734 {
735 q->limits.virt_boundary_mask = mask;
736
737 /*
738 * Devices that require a virtual boundary do not support scatter/gather
739 * I/O natively, but instead require a descriptor list entry for each
740 * page (which might not be idential to the Linux PAGE_SIZE). Because
741 * of that they are not limited by our notion of "segment size".
742 */
743 if (mask)
744 q->limits.max_segment_size = UINT_MAX;
745 }
746 EXPORT_SYMBOL(blk_queue_virt_boundary);
747
748 /**
749 * blk_queue_dma_alignment - set dma length and memory alignment
750 * @q: the request queue for the device
751 * @mask: alignment mask
752 *
753 * description:
754 * set required memory and length alignment for direct dma transactions.
755 * this is used when building direct io requests for the queue.
756 *
757 **/
758 void blk_queue_dma_alignment(struct request_queue *q, int mask)
759 {
760 q->dma_alignment = mask;
761 }
762 EXPORT_SYMBOL(blk_queue_dma_alignment);
763
764 /**
765 * blk_queue_update_dma_alignment - update dma length and memory alignment
766 * @q: the request queue for the device
767 * @mask: alignment mask
768 *
769 * description:
770 * update required memory and length alignment for direct dma transactions.
771 * If the requested alignment is larger than the current alignment, then
772 * the current queue alignment is updated to the new value, otherwise it
773 * is left alone. The design of this is to allow multiple objects
774 * (driver, device, transport etc) to set their respective
775 * alignments without having them interfere.
776 *
777 **/
778 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
779 {
780 BUG_ON(mask > PAGE_SIZE);
781
782 if (mask > q->dma_alignment)
783 q->dma_alignment = mask;
784 }
785 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
786
787 /**
788 * blk_set_queue_depth - tell the block layer about the device queue depth
789 * @q: the request queue for the device
790 * @depth: queue depth
791 *
792 */
793 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
794 {
795 q->queue_depth = depth;
796 rq_qos_queue_depth_changed(q);
797 }
798 EXPORT_SYMBOL(blk_set_queue_depth);
799
800 /**
801 * blk_queue_write_cache - configure queue's write cache
802 * @q: the request queue for the device
803 * @wc: write back cache on or off
804 * @fua: device supports FUA writes, if true
805 *
806 * Tell the block layer about the write cache of @q.
807 */
808 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
809 {
810 if (wc)
811 blk_queue_flag_set(QUEUE_FLAG_WC, q);
812 else
813 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
814 if (fua)
815 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
816 else
817 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
818
819 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
820 }
821 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
822
823 /**
824 * blk_queue_required_elevator_features - Set a queue required elevator features
825 * @q: the request queue for the target device
826 * @features: Required elevator features OR'ed together
827 *
828 * Tell the block layer that for the device controlled through @q, only the
829 * only elevators that can be used are those that implement at least the set of
830 * features specified by @features.
831 */
832 void blk_queue_required_elevator_features(struct request_queue *q,
833 unsigned int features)
834 {
835 q->required_elevator_features = features;
836 }
837 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
838
839 /**
840 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
841 * @q: the request queue for the device
842 * @dev: the device pointer for dma
843 *
844 * Tell the block layer about merging the segments by dma map of @q.
845 */
846 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
847 struct device *dev)
848 {
849 unsigned long boundary = dma_get_merge_boundary(dev);
850
851 if (!boundary)
852 return false;
853
854 /* No need to update max_segment_size. see blk_queue_virt_boundary() */
855 blk_queue_virt_boundary(q, boundary);
856
857 return true;
858 }
859 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
860
861 /**
862 * blk_queue_set_zoned - configure a disk queue zoned model.
863 * @disk: the gendisk of the queue to configure
864 * @model: the zoned model to set
865 *
866 * Set the zoned model of the request queue of @disk according to @model.
867 * When @model is BLK_ZONED_HM (host managed), this should be called only
868 * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
869 * If @model specifies BLK_ZONED_HA (host aware), the effective model used
870 * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
871 * on the disk.
872 */
873 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
874 {
875 struct request_queue *q = disk->queue;
876
877 switch (model) {
878 case BLK_ZONED_HM:
879 /*
880 * Host managed devices are supported only if
881 * CONFIG_BLK_DEV_ZONED is enabled.
882 */
883 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
884 break;
885 case BLK_ZONED_HA:
886 /*
887 * Host aware devices can be treated either as regular block
888 * devices (similar to drive managed devices) or as zoned block
889 * devices to take advantage of the zone command set, similarly
890 * to host managed devices. We try the latter if there are no
891 * partitions and zoned block device support is enabled, else
892 * we do nothing special as far as the block layer is concerned.
893 */
894 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
895 !xa_empty(&disk->part_tbl))
896 model = BLK_ZONED_NONE;
897 break;
898 case BLK_ZONED_NONE:
899 default:
900 if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
901 model = BLK_ZONED_NONE;
902 break;
903 }
904
905 q->limits.zoned = model;
906 if (model != BLK_ZONED_NONE) {
907 /*
908 * Set the zone write granularity to the device logical block
909 * size by default. The driver can change this value if needed.
910 */
911 blk_queue_zone_write_granularity(q,
912 queue_logical_block_size(q));
913 } else {
914 blk_queue_clear_zone_settings(q);
915 }
916 }
917 EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
918
919 static int __init blk_settings_init(void)
920 {
921 blk_max_low_pfn = max_low_pfn - 1;
922 blk_max_pfn = max_pfn - 1;
923 return 0;
924 }
925 subsys_initcall(blk_settings_init);