]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-settings.c
block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
[mirror_ubuntu-artful-kernel.git] / block / blk-settings.c
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11 #include <linux/jiffies.h>
12
13 #include "blk.h"
14
15 unsigned long blk_max_low_pfn;
16 EXPORT_SYMBOL(blk_max_low_pfn);
17
18 unsigned long blk_max_pfn;
19
20 /**
21 * blk_queue_prep_rq - set a prepare_request function for queue
22 * @q: queue
23 * @pfn: prepare_request function
24 *
25 * It's possible for a queue to register a prepare_request callback which
26 * is invoked before the request is handed to the request_fn. The goal of
27 * the function is to prepare a request for I/O, it can be used to build a
28 * cdb from the request data for instance.
29 *
30 */
31 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
32 {
33 q->prep_rq_fn = pfn;
34 }
35 EXPORT_SYMBOL(blk_queue_prep_rq);
36
37 /**
38 * blk_queue_merge_bvec - set a merge_bvec function for queue
39 * @q: queue
40 * @mbfn: merge_bvec_fn
41 *
42 * Usually queues have static limitations on the max sectors or segments that
43 * we can put in a request. Stacking drivers may have some settings that
44 * are dynamic, and thus we have to query the queue whether it is ok to
45 * add a new bio_vec to a bio at a given offset or not. If the block device
46 * has such limitations, it needs to register a merge_bvec_fn to control
47 * the size of bio's sent to it. Note that a block device *must* allow a
48 * single page to be added to an empty bio. The block device driver may want
49 * to use the bio_split() function to deal with these bio's. By default
50 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
51 * honored.
52 */
53 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
54 {
55 q->merge_bvec_fn = mbfn;
56 }
57 EXPORT_SYMBOL(blk_queue_merge_bvec);
58
59 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
60 {
61 q->softirq_done_fn = fn;
62 }
63 EXPORT_SYMBOL(blk_queue_softirq_done);
64
65 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
66 {
67 q->rq_timeout = timeout;
68 }
69 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
70
71 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
72 {
73 q->rq_timed_out_fn = fn;
74 }
75 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
76
77 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
78 {
79 q->lld_busy_fn = fn;
80 }
81 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
82
83 /**
84 * blk_set_default_limits - reset limits to default values
85 * @lim: the queue_limits structure to reset
86 *
87 * Description:
88 * Returns a queue_limit struct to its default state. Can be used by
89 * stacking drivers like DM that stage table swaps and reuse an
90 * existing device queue.
91 */
92 void blk_set_default_limits(struct queue_limits *lim)
93 {
94 lim->max_phys_segments = MAX_PHYS_SEGMENTS;
95 lim->max_hw_segments = MAX_HW_SEGMENTS;
96 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
97 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
98 lim->max_sectors = BLK_DEF_MAX_SECTORS;
99 lim->max_hw_sectors = INT_MAX;
100 lim->max_discard_sectors = 0;
101 lim->discard_granularity = 0;
102 lim->discard_alignment = 0;
103 lim->discard_misaligned = 0;
104 lim->discard_zeroes_data = -1;
105 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
106 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
107 lim->alignment_offset = 0;
108 lim->io_opt = 0;
109 lim->misaligned = 0;
110 lim->no_cluster = 0;
111 }
112 EXPORT_SYMBOL(blk_set_default_limits);
113
114 /**
115 * blk_queue_make_request - define an alternate make_request function for a device
116 * @q: the request queue for the device to be affected
117 * @mfn: the alternate make_request function
118 *
119 * Description:
120 * The normal way for &struct bios to be passed to a device
121 * driver is for them to be collected into requests on a request
122 * queue, and then to allow the device driver to select requests
123 * off that queue when it is ready. This works well for many block
124 * devices. However some block devices (typically virtual devices
125 * such as md or lvm) do not benefit from the processing on the
126 * request queue, and are served best by having the requests passed
127 * directly to them. This can be achieved by providing a function
128 * to blk_queue_make_request().
129 *
130 * Caveat:
131 * The driver that does this *must* be able to deal appropriately
132 * with buffers in "highmemory". This can be accomplished by either calling
133 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
134 * blk_queue_bounce() to create a buffer in normal memory.
135 **/
136 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
137 {
138 /*
139 * set defaults
140 */
141 q->nr_requests = BLKDEV_MAX_RQ;
142
143 q->make_request_fn = mfn;
144 blk_queue_dma_alignment(q, 511);
145 blk_queue_congestion_threshold(q);
146 q->nr_batching = BLK_BATCH_REQ;
147
148 q->unplug_thresh = 4; /* hmm */
149 q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
150 if (q->unplug_delay == 0)
151 q->unplug_delay = 1;
152
153 q->unplug_timer.function = blk_unplug_timeout;
154 q->unplug_timer.data = (unsigned long)q;
155
156 blk_set_default_limits(&q->limits);
157 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
158
159 /*
160 * If the caller didn't supply a lock, fall back to our embedded
161 * per-queue locks
162 */
163 if (!q->queue_lock)
164 q->queue_lock = &q->__queue_lock;
165
166 /*
167 * by default assume old behaviour and bounce for any highmem page
168 */
169 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
170 }
171 EXPORT_SYMBOL(blk_queue_make_request);
172
173 /**
174 * blk_queue_bounce_limit - set bounce buffer limit for queue
175 * @q: the request queue for the device
176 * @dma_mask: the maximum address the device can handle
177 *
178 * Description:
179 * Different hardware can have different requirements as to what pages
180 * it can do I/O directly to. A low level driver can call
181 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
182 * buffers for doing I/O to pages residing above @dma_mask.
183 **/
184 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
185 {
186 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
187 int dma = 0;
188
189 q->bounce_gfp = GFP_NOIO;
190 #if BITS_PER_LONG == 64
191 /*
192 * Assume anything <= 4GB can be handled by IOMMU. Actually
193 * some IOMMUs can handle everything, but I don't know of a
194 * way to test this here.
195 */
196 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
197 dma = 1;
198 q->limits.bounce_pfn = max_low_pfn;
199 #else
200 if (b_pfn < blk_max_low_pfn)
201 dma = 1;
202 q->limits.bounce_pfn = b_pfn;
203 #endif
204 if (dma) {
205 init_emergency_isa_pool();
206 q->bounce_gfp = GFP_NOIO | GFP_DMA;
207 q->limits.bounce_pfn = b_pfn;
208 }
209 }
210 EXPORT_SYMBOL(blk_queue_bounce_limit);
211
212 /**
213 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
214 * @q: the request queue for the device
215 * @max_hw_sectors: max hardware sectors in the usual 512b unit
216 *
217 * Description:
218 * Enables a low level driver to set a hard upper limit,
219 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
220 * the device driver based upon the combined capabilities of I/O
221 * controller and storage device.
222 *
223 * max_sectors is a soft limit imposed by the block layer for
224 * filesystem type requests. This value can be overridden on a
225 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
226 * The soft limit can not exceed max_hw_sectors.
227 **/
228 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
229 {
230 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
231 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
232 printk(KERN_INFO "%s: set to minimum %d\n",
233 __func__, max_hw_sectors);
234 }
235
236 q->limits.max_hw_sectors = max_hw_sectors;
237 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
238 BLK_DEF_MAX_SECTORS);
239 }
240 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
241
242 /**
243 * blk_queue_max_discard_sectors - set max sectors for a single discard
244 * @q: the request queue for the device
245 * @max_discard_sectors: maximum number of sectors to discard
246 **/
247 void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors)
249 {
250 q->limits.max_discard_sectors = max_discard_sectors;
251 }
252 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
253
254 /**
255 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
256 * @q: the request queue for the device
257 * @max_segments: max number of segments
258 *
259 * Description:
260 * Enables a low level driver to set an upper limit on the number of
261 * physical data segments in a request. This would be the largest sized
262 * scatter list the driver could handle.
263 **/
264 void blk_queue_max_phys_segments(struct request_queue *q,
265 unsigned short max_segments)
266 {
267 if (!max_segments) {
268 max_segments = 1;
269 printk(KERN_INFO "%s: set to minimum %d\n",
270 __func__, max_segments);
271 }
272
273 q->limits.max_phys_segments = max_segments;
274 }
275 EXPORT_SYMBOL(blk_queue_max_phys_segments);
276
277 /**
278 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
279 * @q: the request queue for the device
280 * @max_segments: max number of segments
281 *
282 * Description:
283 * Enables a low level driver to set an upper limit on the number of
284 * hw data segments in a request. This would be the largest number of
285 * address/length pairs the host adapter can actually give at once
286 * to the device.
287 **/
288 void blk_queue_max_hw_segments(struct request_queue *q,
289 unsigned short max_segments)
290 {
291 if (!max_segments) {
292 max_segments = 1;
293 printk(KERN_INFO "%s: set to minimum %d\n",
294 __func__, max_segments);
295 }
296
297 q->limits.max_hw_segments = max_segments;
298 }
299 EXPORT_SYMBOL(blk_queue_max_hw_segments);
300
301 /**
302 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
303 * @q: the request queue for the device
304 * @max_size: max size of segment in bytes
305 *
306 * Description:
307 * Enables a low level driver to set an upper limit on the size of a
308 * coalesced segment
309 **/
310 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
311 {
312 if (max_size < PAGE_CACHE_SIZE) {
313 max_size = PAGE_CACHE_SIZE;
314 printk(KERN_INFO "%s: set to minimum %d\n",
315 __func__, max_size);
316 }
317
318 q->limits.max_segment_size = max_size;
319 }
320 EXPORT_SYMBOL(blk_queue_max_segment_size);
321
322 /**
323 * blk_queue_logical_block_size - set logical block size for the queue
324 * @q: the request queue for the device
325 * @size: the logical block size, in bytes
326 *
327 * Description:
328 * This should be set to the lowest possible block size that the
329 * storage device can address. The default of 512 covers most
330 * hardware.
331 **/
332 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
333 {
334 q->limits.logical_block_size = size;
335
336 if (q->limits.physical_block_size < size)
337 q->limits.physical_block_size = size;
338
339 if (q->limits.io_min < q->limits.physical_block_size)
340 q->limits.io_min = q->limits.physical_block_size;
341 }
342 EXPORT_SYMBOL(blk_queue_logical_block_size);
343
344 /**
345 * blk_queue_physical_block_size - set physical block size for the queue
346 * @q: the request queue for the device
347 * @size: the physical block size, in bytes
348 *
349 * Description:
350 * This should be set to the lowest possible sector size that the
351 * hardware can operate on without reverting to read-modify-write
352 * operations.
353 */
354 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
355 {
356 q->limits.physical_block_size = size;
357
358 if (q->limits.physical_block_size < q->limits.logical_block_size)
359 q->limits.physical_block_size = q->limits.logical_block_size;
360
361 if (q->limits.io_min < q->limits.physical_block_size)
362 q->limits.io_min = q->limits.physical_block_size;
363 }
364 EXPORT_SYMBOL(blk_queue_physical_block_size);
365
366 /**
367 * blk_queue_alignment_offset - set physical block alignment offset
368 * @q: the request queue for the device
369 * @offset: alignment offset in bytes
370 *
371 * Description:
372 * Some devices are naturally misaligned to compensate for things like
373 * the legacy DOS partition table 63-sector offset. Low-level drivers
374 * should call this function for devices whose first sector is not
375 * naturally aligned.
376 */
377 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
378 {
379 q->limits.alignment_offset =
380 offset & (q->limits.physical_block_size - 1);
381 q->limits.misaligned = 0;
382 }
383 EXPORT_SYMBOL(blk_queue_alignment_offset);
384
385 /**
386 * blk_limits_io_min - set minimum request size for a device
387 * @limits: the queue limits
388 * @min: smallest I/O size in bytes
389 *
390 * Description:
391 * Some devices have an internal block size bigger than the reported
392 * hardware sector size. This function can be used to signal the
393 * smallest I/O the device can perform without incurring a performance
394 * penalty.
395 */
396 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
397 {
398 limits->io_min = min;
399
400 if (limits->io_min < limits->logical_block_size)
401 limits->io_min = limits->logical_block_size;
402
403 if (limits->io_min < limits->physical_block_size)
404 limits->io_min = limits->physical_block_size;
405 }
406 EXPORT_SYMBOL(blk_limits_io_min);
407
408 /**
409 * blk_queue_io_min - set minimum request size for the queue
410 * @q: the request queue for the device
411 * @min: smallest I/O size in bytes
412 *
413 * Description:
414 * Storage devices may report a granularity or preferred minimum I/O
415 * size which is the smallest request the device can perform without
416 * incurring a performance penalty. For disk drives this is often the
417 * physical block size. For RAID arrays it is often the stripe chunk
418 * size. A properly aligned multiple of minimum_io_size is the
419 * preferred request size for workloads where a high number of I/O
420 * operations is desired.
421 */
422 void blk_queue_io_min(struct request_queue *q, unsigned int min)
423 {
424 blk_limits_io_min(&q->limits, min);
425 }
426 EXPORT_SYMBOL(blk_queue_io_min);
427
428 /**
429 * blk_limits_io_opt - set optimal request size for a device
430 * @limits: the queue limits
431 * @opt: smallest I/O size in bytes
432 *
433 * Description:
434 * Storage devices may report an optimal I/O size, which is the
435 * device's preferred unit for sustained I/O. This is rarely reported
436 * for disk drives. For RAID arrays it is usually the stripe width or
437 * the internal track size. A properly aligned multiple of
438 * optimal_io_size is the preferred request size for workloads where
439 * sustained throughput is desired.
440 */
441 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
442 {
443 limits->io_opt = opt;
444 }
445 EXPORT_SYMBOL(blk_limits_io_opt);
446
447 /**
448 * blk_queue_io_opt - set optimal request size for the queue
449 * @q: the request queue for the device
450 * @opt: optimal request size in bytes
451 *
452 * Description:
453 * Storage devices may report an optimal I/O size, which is the
454 * device's preferred unit for sustained I/O. This is rarely reported
455 * for disk drives. For RAID arrays it is usually the stripe width or
456 * the internal track size. A properly aligned multiple of
457 * optimal_io_size is the preferred request size for workloads where
458 * sustained throughput is desired.
459 */
460 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
461 {
462 blk_limits_io_opt(&q->limits, opt);
463 }
464 EXPORT_SYMBOL(blk_queue_io_opt);
465
466 /*
467 * Returns the minimum that is _not_ zero, unless both are zero.
468 */
469 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
470
471 /**
472 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
473 * @t: the stacking driver (top)
474 * @b: the underlying device (bottom)
475 **/
476 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
477 {
478 blk_stack_limits(&t->limits, &b->limits, 0);
479
480 if (!t->queue_lock)
481 WARN_ON_ONCE(1);
482 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
483 unsigned long flags;
484 spin_lock_irqsave(t->queue_lock, flags);
485 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
486 spin_unlock_irqrestore(t->queue_lock, flags);
487 }
488 }
489 EXPORT_SYMBOL(blk_queue_stack_limits);
490
491 static unsigned int lcm(unsigned int a, unsigned int b)
492 {
493 if (a && b)
494 return (a * b) / gcd(a, b);
495 else if (b)
496 return b;
497
498 return a;
499 }
500
501 /**
502 * blk_stack_limits - adjust queue_limits for stacked devices
503 * @t: the stacking driver limits (top device)
504 * @b: the underlying queue limits (bottom, component device)
505 * @start: first data sector within component device
506 *
507 * Description:
508 * This function is used by stacking drivers like MD and DM to ensure
509 * that all component devices have compatible block sizes and
510 * alignments. The stacking driver must provide a queue_limits
511 * struct (top) and then iteratively call the stacking function for
512 * all component (bottom) devices. The stacking function will
513 * attempt to combine the values and ensure proper alignment.
514 *
515 * Returns 0 if the top and bottom queue_limits are compatible. The
516 * top device's block sizes and alignment offsets may be adjusted to
517 * ensure alignment with the bottom device. If no compatible sizes
518 * and alignments exist, -1 is returned and the resulting top
519 * queue_limits will have the misaligned flag set to indicate that
520 * the alignment_offset is undefined.
521 */
522 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
523 sector_t start)
524 {
525 unsigned int top, bottom, alignment, ret = 0;
526
527 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
528 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
529 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
530
531 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
532 b->seg_boundary_mask);
533
534 t->max_phys_segments = min_not_zero(t->max_phys_segments,
535 b->max_phys_segments);
536
537 t->max_hw_segments = min_not_zero(t->max_hw_segments,
538 b->max_hw_segments);
539
540 t->max_segment_size = min_not_zero(t->max_segment_size,
541 b->max_segment_size);
542
543 t->misaligned |= b->misaligned;
544
545 alignment = queue_limit_alignment_offset(b, start);
546
547 /* Bottom device has different alignment. Check that it is
548 * compatible with the current top alignment.
549 */
550 if (t->alignment_offset != alignment) {
551
552 top = max(t->physical_block_size, t->io_min)
553 + t->alignment_offset;
554 bottom = max(b->physical_block_size, b->io_min) + alignment;
555
556 /* Verify that top and bottom intervals line up */
557 if (max(top, bottom) & (min(top, bottom) - 1)) {
558 t->misaligned = 1;
559 ret = -1;
560 }
561 }
562
563 t->logical_block_size = max(t->logical_block_size,
564 b->logical_block_size);
565
566 t->physical_block_size = max(t->physical_block_size,
567 b->physical_block_size);
568
569 t->io_min = max(t->io_min, b->io_min);
570 t->io_opt = lcm(t->io_opt, b->io_opt);
571
572 t->no_cluster |= b->no_cluster;
573 t->discard_zeroes_data &= b->discard_zeroes_data;
574
575 /* Physical block size a multiple of the logical block size? */
576 if (t->physical_block_size & (t->logical_block_size - 1)) {
577 t->physical_block_size = t->logical_block_size;
578 t->misaligned = 1;
579 ret = -1;
580 }
581
582 /* Minimum I/O a multiple of the physical block size? */
583 if (t->io_min & (t->physical_block_size - 1)) {
584 t->io_min = t->physical_block_size;
585 t->misaligned = 1;
586 ret = -1;
587 }
588
589 /* Optimal I/O a multiple of the physical block size? */
590 if (t->io_opt & (t->physical_block_size - 1)) {
591 t->io_opt = 0;
592 t->misaligned = 1;
593 ret = -1;
594 }
595
596 /* Find lowest common alignment_offset */
597 t->alignment_offset = lcm(t->alignment_offset, alignment)
598 & (max(t->physical_block_size, t->io_min) - 1);
599
600 /* Verify that new alignment_offset is on a logical block boundary */
601 if (t->alignment_offset & (t->logical_block_size - 1)) {
602 t->misaligned = 1;
603 ret = -1;
604 }
605
606 /* Discard alignment and granularity */
607 if (b->discard_granularity) {
608 alignment = queue_limit_discard_alignment(b, start);
609
610 if (t->discard_granularity != 0 &&
611 t->discard_alignment != alignment) {
612 top = t->discard_granularity + t->discard_alignment;
613 bottom = b->discard_granularity + alignment;
614
615 /* Verify that top and bottom intervals line up */
616 if (max(top, bottom) & (min(top, bottom) - 1))
617 t->discard_misaligned = 1;
618 }
619
620 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
621 b->max_discard_sectors);
622 t->discard_granularity = max(t->discard_granularity,
623 b->discard_granularity);
624 t->discard_alignment = lcm(t->discard_alignment, alignment) &
625 (t->discard_granularity - 1);
626 }
627
628 return ret;
629 }
630 EXPORT_SYMBOL(blk_stack_limits);
631
632 /**
633 * bdev_stack_limits - adjust queue limits for stacked drivers
634 * @t: the stacking driver limits (top device)
635 * @bdev: the component block_device (bottom)
636 * @start: first data sector within component device
637 *
638 * Description:
639 * Merges queue limits for a top device and a block_device. Returns
640 * 0 if alignment didn't change. Returns -1 if adding the bottom
641 * device caused misalignment.
642 */
643 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
644 sector_t start)
645 {
646 struct request_queue *bq = bdev_get_queue(bdev);
647
648 start += get_start_sect(bdev);
649
650 return blk_stack_limits(t, &bq->limits, start);
651 }
652 EXPORT_SYMBOL(bdev_stack_limits);
653
654 /**
655 * disk_stack_limits - adjust queue limits for stacked drivers
656 * @disk: MD/DM gendisk (top)
657 * @bdev: the underlying block device (bottom)
658 * @offset: offset to beginning of data within component device
659 *
660 * Description:
661 * Merges the limits for a top level gendisk and a bottom level
662 * block_device.
663 */
664 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
665 sector_t offset)
666 {
667 struct request_queue *t = disk->queue;
668 struct request_queue *b = bdev_get_queue(bdev);
669
670 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
671 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
672
673 disk_name(disk, 0, top);
674 bdevname(bdev, bottom);
675
676 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
677 top, bottom);
678 }
679
680 if (!t->queue_lock)
681 WARN_ON_ONCE(1);
682 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
683 unsigned long flags;
684
685 spin_lock_irqsave(t->queue_lock, flags);
686 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
687 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
688 spin_unlock_irqrestore(t->queue_lock, flags);
689 }
690 }
691 EXPORT_SYMBOL(disk_stack_limits);
692
693 /**
694 * blk_queue_dma_pad - set pad mask
695 * @q: the request queue for the device
696 * @mask: pad mask
697 *
698 * Set dma pad mask.
699 *
700 * Appending pad buffer to a request modifies the last entry of a
701 * scatter list such that it includes the pad buffer.
702 **/
703 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
704 {
705 q->dma_pad_mask = mask;
706 }
707 EXPORT_SYMBOL(blk_queue_dma_pad);
708
709 /**
710 * blk_queue_update_dma_pad - update pad mask
711 * @q: the request queue for the device
712 * @mask: pad mask
713 *
714 * Update dma pad mask.
715 *
716 * Appending pad buffer to a request modifies the last entry of a
717 * scatter list such that it includes the pad buffer.
718 **/
719 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
720 {
721 if (mask > q->dma_pad_mask)
722 q->dma_pad_mask = mask;
723 }
724 EXPORT_SYMBOL(blk_queue_update_dma_pad);
725
726 /**
727 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
728 * @q: the request queue for the device
729 * @dma_drain_needed: fn which returns non-zero if drain is necessary
730 * @buf: physically contiguous buffer
731 * @size: size of the buffer in bytes
732 *
733 * Some devices have excess DMA problems and can't simply discard (or
734 * zero fill) the unwanted piece of the transfer. They have to have a
735 * real area of memory to transfer it into. The use case for this is
736 * ATAPI devices in DMA mode. If the packet command causes a transfer
737 * bigger than the transfer size some HBAs will lock up if there
738 * aren't DMA elements to contain the excess transfer. What this API
739 * does is adjust the queue so that the buf is always appended
740 * silently to the scatterlist.
741 *
742 * Note: This routine adjusts max_hw_segments to make room for
743 * appending the drain buffer. If you call
744 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
745 * calling this routine, you must set the limit to one fewer than your
746 * device can support otherwise there won't be room for the drain
747 * buffer.
748 */
749 int blk_queue_dma_drain(struct request_queue *q,
750 dma_drain_needed_fn *dma_drain_needed,
751 void *buf, unsigned int size)
752 {
753 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
754 return -EINVAL;
755 /* make room for appending the drain */
756 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
757 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
758 q->dma_drain_needed = dma_drain_needed;
759 q->dma_drain_buffer = buf;
760 q->dma_drain_size = size;
761
762 return 0;
763 }
764 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
765
766 /**
767 * blk_queue_segment_boundary - set boundary rules for segment merging
768 * @q: the request queue for the device
769 * @mask: the memory boundary mask
770 **/
771 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
772 {
773 if (mask < PAGE_CACHE_SIZE - 1) {
774 mask = PAGE_CACHE_SIZE - 1;
775 printk(KERN_INFO "%s: set to minimum %lx\n",
776 __func__, mask);
777 }
778
779 q->limits.seg_boundary_mask = mask;
780 }
781 EXPORT_SYMBOL(blk_queue_segment_boundary);
782
783 /**
784 * blk_queue_dma_alignment - set dma length and memory alignment
785 * @q: the request queue for the device
786 * @mask: alignment mask
787 *
788 * description:
789 * set required memory and length alignment for direct dma transactions.
790 * this is used when building direct io requests for the queue.
791 *
792 **/
793 void blk_queue_dma_alignment(struct request_queue *q, int mask)
794 {
795 q->dma_alignment = mask;
796 }
797 EXPORT_SYMBOL(blk_queue_dma_alignment);
798
799 /**
800 * blk_queue_update_dma_alignment - update dma length and memory alignment
801 * @q: the request queue for the device
802 * @mask: alignment mask
803 *
804 * description:
805 * update required memory and length alignment for direct dma transactions.
806 * If the requested alignment is larger than the current alignment, then
807 * the current queue alignment is updated to the new value, otherwise it
808 * is left alone. The design of this is to allow multiple objects
809 * (driver, device, transport etc) to set their respective
810 * alignments without having them interfere.
811 *
812 **/
813 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
814 {
815 BUG_ON(mask > PAGE_SIZE);
816
817 if (mask > q->dma_alignment)
818 q->dma_alignment = mask;
819 }
820 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
821
822 static int __init blk_settings_init(void)
823 {
824 blk_max_low_pfn = max_low_pfn - 1;
825 blk_max_pfn = max_pfn - 1;
826 return 0;
827 }
828 subsys_initcall(blk_settings_init);