]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - block/blk-settings.c
block: Optimal I/O limit wrapper
[mirror_ubuntu-zesty-kernel.git] / block / blk-settings.c
1 /*
2 * Functions related to setting various queue properties from drivers
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11
12 #include "blk.h"
13
14 unsigned long blk_max_low_pfn;
15 EXPORT_SYMBOL(blk_max_low_pfn);
16
17 unsigned long blk_max_pfn;
18
19 /**
20 * blk_queue_prep_rq - set a prepare_request function for queue
21 * @q: queue
22 * @pfn: prepare_request function
23 *
24 * It's possible for a queue to register a prepare_request callback which
25 * is invoked before the request is handed to the request_fn. The goal of
26 * the function is to prepare a request for I/O, it can be used to build a
27 * cdb from the request data for instance.
28 *
29 */
30 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
31 {
32 q->prep_rq_fn = pfn;
33 }
34 EXPORT_SYMBOL(blk_queue_prep_rq);
35
36 /**
37 * blk_queue_set_discard - set a discard_sectors function for queue
38 * @q: queue
39 * @dfn: prepare_discard function
40 *
41 * It's possible for a queue to register a discard callback which is used
42 * to transform a discard request into the appropriate type for the
43 * hardware. If none is registered, then discard requests are failed
44 * with %EOPNOTSUPP.
45 *
46 */
47 void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
48 {
49 q->prepare_discard_fn = dfn;
50 }
51 EXPORT_SYMBOL(blk_queue_set_discard);
52
53 /**
54 * blk_queue_merge_bvec - set a merge_bvec function for queue
55 * @q: queue
56 * @mbfn: merge_bvec_fn
57 *
58 * Usually queues have static limitations on the max sectors or segments that
59 * we can put in a request. Stacking drivers may have some settings that
60 * are dynamic, and thus we have to query the queue whether it is ok to
61 * add a new bio_vec to a bio at a given offset or not. If the block device
62 * has such limitations, it needs to register a merge_bvec_fn to control
63 * the size of bio's sent to it. Note that a block device *must* allow a
64 * single page to be added to an empty bio. The block device driver may want
65 * to use the bio_split() function to deal with these bio's. By default
66 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
67 * honored.
68 */
69 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
70 {
71 q->merge_bvec_fn = mbfn;
72 }
73 EXPORT_SYMBOL(blk_queue_merge_bvec);
74
75 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
76 {
77 q->softirq_done_fn = fn;
78 }
79 EXPORT_SYMBOL(blk_queue_softirq_done);
80
81 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
82 {
83 q->rq_timeout = timeout;
84 }
85 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
86
87 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
88 {
89 q->rq_timed_out_fn = fn;
90 }
91 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
92
93 void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
94 {
95 q->lld_busy_fn = fn;
96 }
97 EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
98
99 /**
100 * blk_set_default_limits - reset limits to default values
101 * @lim: the queue_limits structure to reset
102 *
103 * Description:
104 * Returns a queue_limit struct to its default state. Can be used by
105 * stacking drivers like DM that stage table swaps and reuse an
106 * existing device queue.
107 */
108 void blk_set_default_limits(struct queue_limits *lim)
109 {
110 lim->max_phys_segments = MAX_PHYS_SEGMENTS;
111 lim->max_hw_segments = MAX_HW_SEGMENTS;
112 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
113 lim->max_segment_size = MAX_SEGMENT_SIZE;
114 lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
115 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
116 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
117 lim->alignment_offset = 0;
118 lim->io_opt = 0;
119 lim->misaligned = 0;
120 lim->no_cluster = 0;
121 }
122 EXPORT_SYMBOL(blk_set_default_limits);
123
124 /**
125 * blk_queue_make_request - define an alternate make_request function for a device
126 * @q: the request queue for the device to be affected
127 * @mfn: the alternate make_request function
128 *
129 * Description:
130 * The normal way for &struct bios to be passed to a device
131 * driver is for them to be collected into requests on a request
132 * queue, and then to allow the device driver to select requests
133 * off that queue when it is ready. This works well for many block
134 * devices. However some block devices (typically virtual devices
135 * such as md or lvm) do not benefit from the processing on the
136 * request queue, and are served best by having the requests passed
137 * directly to them. This can be achieved by providing a function
138 * to blk_queue_make_request().
139 *
140 * Caveat:
141 * The driver that does this *must* be able to deal appropriately
142 * with buffers in "highmemory". This can be accomplished by either calling
143 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
144 * blk_queue_bounce() to create a buffer in normal memory.
145 **/
146 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
147 {
148 /*
149 * set defaults
150 */
151 q->nr_requests = BLKDEV_MAX_RQ;
152
153 q->make_request_fn = mfn;
154 blk_queue_dma_alignment(q, 511);
155 blk_queue_congestion_threshold(q);
156 q->nr_batching = BLK_BATCH_REQ;
157
158 q->unplug_thresh = 4; /* hmm */
159 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
160 if (q->unplug_delay == 0)
161 q->unplug_delay = 1;
162
163 q->unplug_timer.function = blk_unplug_timeout;
164 q->unplug_timer.data = (unsigned long)q;
165
166 blk_set_default_limits(&q->limits);
167
168 /*
169 * If the caller didn't supply a lock, fall back to our embedded
170 * per-queue locks
171 */
172 if (!q->queue_lock)
173 q->queue_lock = &q->__queue_lock;
174
175 /*
176 * by default assume old behaviour and bounce for any highmem page
177 */
178 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
179 }
180 EXPORT_SYMBOL(blk_queue_make_request);
181
182 /**
183 * blk_queue_bounce_limit - set bounce buffer limit for queue
184 * @q: the request queue for the device
185 * @dma_mask: the maximum address the device can handle
186 *
187 * Description:
188 * Different hardware can have different requirements as to what pages
189 * it can do I/O directly to. A low level driver can call
190 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
191 * buffers for doing I/O to pages residing above @dma_mask.
192 **/
193 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
194 {
195 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
196 int dma = 0;
197
198 q->bounce_gfp = GFP_NOIO;
199 #if BITS_PER_LONG == 64
200 /*
201 * Assume anything <= 4GB can be handled by IOMMU. Actually
202 * some IOMMUs can handle everything, but I don't know of a
203 * way to test this here.
204 */
205 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
206 dma = 1;
207 q->limits.bounce_pfn = max_low_pfn;
208 #else
209 if (b_pfn < blk_max_low_pfn)
210 dma = 1;
211 q->limits.bounce_pfn = b_pfn;
212 #endif
213 if (dma) {
214 init_emergency_isa_pool();
215 q->bounce_gfp = GFP_NOIO | GFP_DMA;
216 q->limits.bounce_pfn = b_pfn;
217 }
218 }
219 EXPORT_SYMBOL(blk_queue_bounce_limit);
220
221 /**
222 * blk_queue_max_sectors - set max sectors for a request for this queue
223 * @q: the request queue for the device
224 * @max_sectors: max sectors in the usual 512b unit
225 *
226 * Description:
227 * Enables a low level driver to set an upper limit on the size of
228 * received requests.
229 **/
230 void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
231 {
232 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
233 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
234 printk(KERN_INFO "%s: set to minimum %d\n",
235 __func__, max_sectors);
236 }
237
238 if (BLK_DEF_MAX_SECTORS > max_sectors)
239 q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
240 else {
241 q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
242 q->limits.max_hw_sectors = max_sectors;
243 }
244 }
245 EXPORT_SYMBOL(blk_queue_max_sectors);
246
247 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
248 {
249 if (BLK_DEF_MAX_SECTORS > max_sectors)
250 q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
251 else
252 q->limits.max_hw_sectors = max_sectors;
253 }
254 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
255
256 /**
257 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
258 * @q: the request queue for the device
259 * @max_segments: max number of segments
260 *
261 * Description:
262 * Enables a low level driver to set an upper limit on the number of
263 * physical data segments in a request. This would be the largest sized
264 * scatter list the driver could handle.
265 **/
266 void blk_queue_max_phys_segments(struct request_queue *q,
267 unsigned short max_segments)
268 {
269 if (!max_segments) {
270 max_segments = 1;
271 printk(KERN_INFO "%s: set to minimum %d\n",
272 __func__, max_segments);
273 }
274
275 q->limits.max_phys_segments = max_segments;
276 }
277 EXPORT_SYMBOL(blk_queue_max_phys_segments);
278
279 /**
280 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
281 * @q: the request queue for the device
282 * @max_segments: max number of segments
283 *
284 * Description:
285 * Enables a low level driver to set an upper limit on the number of
286 * hw data segments in a request. This would be the largest number of
287 * address/length pairs the host adapter can actually give at once
288 * to the device.
289 **/
290 void blk_queue_max_hw_segments(struct request_queue *q,
291 unsigned short max_segments)
292 {
293 if (!max_segments) {
294 max_segments = 1;
295 printk(KERN_INFO "%s: set to minimum %d\n",
296 __func__, max_segments);
297 }
298
299 q->limits.max_hw_segments = max_segments;
300 }
301 EXPORT_SYMBOL(blk_queue_max_hw_segments);
302
303 /**
304 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
305 * @q: the request queue for the device
306 * @max_size: max size of segment in bytes
307 *
308 * Description:
309 * Enables a low level driver to set an upper limit on the size of a
310 * coalesced segment
311 **/
312 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
313 {
314 if (max_size < PAGE_CACHE_SIZE) {
315 max_size = PAGE_CACHE_SIZE;
316 printk(KERN_INFO "%s: set to minimum %d\n",
317 __func__, max_size);
318 }
319
320 q->limits.max_segment_size = max_size;
321 }
322 EXPORT_SYMBOL(blk_queue_max_segment_size);
323
324 /**
325 * blk_queue_logical_block_size - set logical block size for the queue
326 * @q: the request queue for the device
327 * @size: the logical block size, in bytes
328 *
329 * Description:
330 * This should be set to the lowest possible block size that the
331 * storage device can address. The default of 512 covers most
332 * hardware.
333 **/
334 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
335 {
336 q->limits.logical_block_size = size;
337
338 if (q->limits.physical_block_size < size)
339 q->limits.physical_block_size = size;
340
341 if (q->limits.io_min < q->limits.physical_block_size)
342 q->limits.io_min = q->limits.physical_block_size;
343 }
344 EXPORT_SYMBOL(blk_queue_logical_block_size);
345
346 /**
347 * blk_queue_physical_block_size - set physical block size for the queue
348 * @q: the request queue for the device
349 * @size: the physical block size, in bytes
350 *
351 * Description:
352 * This should be set to the lowest possible sector size that the
353 * hardware can operate on without reverting to read-modify-write
354 * operations.
355 */
356 void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
357 {
358 q->limits.physical_block_size = size;
359
360 if (q->limits.physical_block_size < q->limits.logical_block_size)
361 q->limits.physical_block_size = q->limits.logical_block_size;
362
363 if (q->limits.io_min < q->limits.physical_block_size)
364 q->limits.io_min = q->limits.physical_block_size;
365 }
366 EXPORT_SYMBOL(blk_queue_physical_block_size);
367
368 /**
369 * blk_queue_alignment_offset - set physical block alignment offset
370 * @q: the request queue for the device
371 * @offset: alignment offset in bytes
372 *
373 * Description:
374 * Some devices are naturally misaligned to compensate for things like
375 * the legacy DOS partition table 63-sector offset. Low-level drivers
376 * should call this function for devices whose first sector is not
377 * naturally aligned.
378 */
379 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
380 {
381 q->limits.alignment_offset =
382 offset & (q->limits.physical_block_size - 1);
383 q->limits.misaligned = 0;
384 }
385 EXPORT_SYMBOL(blk_queue_alignment_offset);
386
387 /**
388 * blk_limits_io_min - set minimum request size for a device
389 * @limits: the queue limits
390 * @min: smallest I/O size in bytes
391 *
392 * Description:
393 * Some devices have an internal block size bigger than the reported
394 * hardware sector size. This function can be used to signal the
395 * smallest I/O the device can perform without incurring a performance
396 * penalty.
397 */
398 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
399 {
400 limits->io_min = min;
401
402 if (limits->io_min < limits->logical_block_size)
403 limits->io_min = limits->logical_block_size;
404
405 if (limits->io_min < limits->physical_block_size)
406 limits->io_min = limits->physical_block_size;
407 }
408 EXPORT_SYMBOL(blk_limits_io_min);
409
410 /**
411 * blk_queue_io_min - set minimum request size for the queue
412 * @q: the request queue for the device
413 * @min: smallest I/O size in bytes
414 *
415 * Description:
416 * Storage devices may report a granularity or preferred minimum I/O
417 * size which is the smallest request the device can perform without
418 * incurring a performance penalty. For disk drives this is often the
419 * physical block size. For RAID arrays it is often the stripe chunk
420 * size. A properly aligned multiple of minimum_io_size is the
421 * preferred request size for workloads where a high number of I/O
422 * operations is desired.
423 */
424 void blk_queue_io_min(struct request_queue *q, unsigned int min)
425 {
426 blk_limits_io_min(&q->limits, min);
427 }
428 EXPORT_SYMBOL(blk_queue_io_min);
429
430 /**
431 * blk_limits_io_opt - set optimal request size for a device
432 * @limits: the queue limits
433 * @opt: smallest I/O size in bytes
434 *
435 * Description:
436 * Storage devices may report an optimal I/O size, which is the
437 * device's preferred unit for sustained I/O. This is rarely reported
438 * for disk drives. For RAID arrays it is usually the stripe width or
439 * the internal track size. A properly aligned multiple of
440 * optimal_io_size is the preferred request size for workloads where
441 * sustained throughput is desired.
442 */
443 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
444 {
445 limits->io_opt = opt;
446 }
447 EXPORT_SYMBOL(blk_limits_io_opt);
448
449 /**
450 * blk_queue_io_opt - set optimal request size for the queue
451 * @q: the request queue for the device
452 * @opt: optimal request size in bytes
453 *
454 * Description:
455 * Storage devices may report an optimal I/O size, which is the
456 * device's preferred unit for sustained I/O. This is rarely reported
457 * for disk drives. For RAID arrays it is usually the stripe width or
458 * the internal track size. A properly aligned multiple of
459 * optimal_io_size is the preferred request size for workloads where
460 * sustained throughput is desired.
461 */
462 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
463 {
464 blk_limits_io_opt(&q->limits, opt);
465 }
466 EXPORT_SYMBOL(blk_queue_io_opt);
467
468 /*
469 * Returns the minimum that is _not_ zero, unless both are zero.
470 */
471 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
472
473 /**
474 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
475 * @t: the stacking driver (top)
476 * @b: the underlying device (bottom)
477 **/
478 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
479 {
480 blk_stack_limits(&t->limits, &b->limits, 0);
481
482 if (!t->queue_lock)
483 WARN_ON_ONCE(1);
484 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
485 unsigned long flags;
486 spin_lock_irqsave(t->queue_lock, flags);
487 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
488 spin_unlock_irqrestore(t->queue_lock, flags);
489 }
490 }
491 EXPORT_SYMBOL(blk_queue_stack_limits);
492
493 /**
494 * blk_stack_limits - adjust queue_limits for stacked devices
495 * @t: the stacking driver limits (top)
496 * @b: the underlying queue limits (bottom)
497 * @offset: offset to beginning of data within component device
498 *
499 * Description:
500 * Merges two queue_limit structs. Returns 0 if alignment didn't
501 * change. Returns -1 if adding the bottom device caused
502 * misalignment.
503 */
504 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
505 sector_t offset)
506 {
507 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
508 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
509 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
510
511 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
512 b->seg_boundary_mask);
513
514 t->max_phys_segments = min_not_zero(t->max_phys_segments,
515 b->max_phys_segments);
516
517 t->max_hw_segments = min_not_zero(t->max_hw_segments,
518 b->max_hw_segments);
519
520 t->max_segment_size = min_not_zero(t->max_segment_size,
521 b->max_segment_size);
522
523 t->logical_block_size = max(t->logical_block_size,
524 b->logical_block_size);
525
526 t->physical_block_size = max(t->physical_block_size,
527 b->physical_block_size);
528
529 t->io_min = max(t->io_min, b->io_min);
530 t->no_cluster |= b->no_cluster;
531
532 /* Bottom device offset aligned? */
533 if (offset &&
534 (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
535 t->misaligned = 1;
536 return -1;
537 }
538
539 /* If top has no alignment offset, inherit from bottom */
540 if (!t->alignment_offset)
541 t->alignment_offset =
542 b->alignment_offset & (b->physical_block_size - 1);
543
544 /* Top device aligned on logical block boundary? */
545 if (t->alignment_offset & (t->logical_block_size - 1)) {
546 t->misaligned = 1;
547 return -1;
548 }
549
550 /* Find lcm() of optimal I/O size */
551 if (t->io_opt && b->io_opt)
552 t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
553 else if (b->io_opt)
554 t->io_opt = b->io_opt;
555
556 /* Verify that optimal I/O size is a multiple of io_min */
557 if (t->io_min && t->io_opt % t->io_min)
558 return -1;
559
560 return 0;
561 }
562 EXPORT_SYMBOL(blk_stack_limits);
563
564 /**
565 * disk_stack_limits - adjust queue limits for stacked drivers
566 * @disk: MD/DM gendisk (top)
567 * @bdev: the underlying block device (bottom)
568 * @offset: offset to beginning of data within component device
569 *
570 * Description:
571 * Merges the limits for two queues. Returns 0 if alignment
572 * didn't change. Returns -1 if adding the bottom device caused
573 * misalignment.
574 */
575 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
576 sector_t offset)
577 {
578 struct request_queue *t = disk->queue;
579 struct request_queue *b = bdev_get_queue(bdev);
580
581 offset += get_start_sect(bdev) << 9;
582
583 if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
584 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
585
586 disk_name(disk, 0, top);
587 bdevname(bdev, bottom);
588
589 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
590 top, bottom);
591 }
592
593 if (!t->queue_lock)
594 WARN_ON_ONCE(1);
595 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
596 unsigned long flags;
597
598 spin_lock_irqsave(t->queue_lock, flags);
599 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
600 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
601 spin_unlock_irqrestore(t->queue_lock, flags);
602 }
603 }
604 EXPORT_SYMBOL(disk_stack_limits);
605
606 /**
607 * blk_queue_dma_pad - set pad mask
608 * @q: the request queue for the device
609 * @mask: pad mask
610 *
611 * Set dma pad mask.
612 *
613 * Appending pad buffer to a request modifies the last entry of a
614 * scatter list such that it includes the pad buffer.
615 **/
616 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
617 {
618 q->dma_pad_mask = mask;
619 }
620 EXPORT_SYMBOL(blk_queue_dma_pad);
621
622 /**
623 * blk_queue_update_dma_pad - update pad mask
624 * @q: the request queue for the device
625 * @mask: pad mask
626 *
627 * Update dma pad mask.
628 *
629 * Appending pad buffer to a request modifies the last entry of a
630 * scatter list such that it includes the pad buffer.
631 **/
632 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
633 {
634 if (mask > q->dma_pad_mask)
635 q->dma_pad_mask = mask;
636 }
637 EXPORT_SYMBOL(blk_queue_update_dma_pad);
638
639 /**
640 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
641 * @q: the request queue for the device
642 * @dma_drain_needed: fn which returns non-zero if drain is necessary
643 * @buf: physically contiguous buffer
644 * @size: size of the buffer in bytes
645 *
646 * Some devices have excess DMA problems and can't simply discard (or
647 * zero fill) the unwanted piece of the transfer. They have to have a
648 * real area of memory to transfer it into. The use case for this is
649 * ATAPI devices in DMA mode. If the packet command causes a transfer
650 * bigger than the transfer size some HBAs will lock up if there
651 * aren't DMA elements to contain the excess transfer. What this API
652 * does is adjust the queue so that the buf is always appended
653 * silently to the scatterlist.
654 *
655 * Note: This routine adjusts max_hw_segments to make room for
656 * appending the drain buffer. If you call
657 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
658 * calling this routine, you must set the limit to one fewer than your
659 * device can support otherwise there won't be room for the drain
660 * buffer.
661 */
662 int blk_queue_dma_drain(struct request_queue *q,
663 dma_drain_needed_fn *dma_drain_needed,
664 void *buf, unsigned int size)
665 {
666 if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
667 return -EINVAL;
668 /* make room for appending the drain */
669 blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
670 blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
671 q->dma_drain_needed = dma_drain_needed;
672 q->dma_drain_buffer = buf;
673 q->dma_drain_size = size;
674
675 return 0;
676 }
677 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
678
679 /**
680 * blk_queue_segment_boundary - set boundary rules for segment merging
681 * @q: the request queue for the device
682 * @mask: the memory boundary mask
683 **/
684 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
685 {
686 if (mask < PAGE_CACHE_SIZE - 1) {
687 mask = PAGE_CACHE_SIZE - 1;
688 printk(KERN_INFO "%s: set to minimum %lx\n",
689 __func__, mask);
690 }
691
692 q->limits.seg_boundary_mask = mask;
693 }
694 EXPORT_SYMBOL(blk_queue_segment_boundary);
695
696 /**
697 * blk_queue_dma_alignment - set dma length and memory alignment
698 * @q: the request queue for the device
699 * @mask: alignment mask
700 *
701 * description:
702 * set required memory and length alignment for direct dma transactions.
703 * this is used when building direct io requests for the queue.
704 *
705 **/
706 void blk_queue_dma_alignment(struct request_queue *q, int mask)
707 {
708 q->dma_alignment = mask;
709 }
710 EXPORT_SYMBOL(blk_queue_dma_alignment);
711
712 /**
713 * blk_queue_update_dma_alignment - update dma length and memory alignment
714 * @q: the request queue for the device
715 * @mask: alignment mask
716 *
717 * description:
718 * update required memory and length alignment for direct dma transactions.
719 * If the requested alignment is larger than the current alignment, then
720 * the current queue alignment is updated to the new value, otherwise it
721 * is left alone. The design of this is to allow multiple objects
722 * (driver, device, transport etc) to set their respective
723 * alignments without having them interfere.
724 *
725 **/
726 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
727 {
728 BUG_ON(mask > PAGE_SIZE);
729
730 if (mask > q->dma_alignment)
731 q->dma_alignment = mask;
732 }
733 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
734
735 static int __init blk_settings_init(void)
736 {
737 blk_max_low_pfn = max_low_pfn - 1;
738 blk_max_pfn = max_pfn - 1;
739 return 0;
740 }
741 subsys_initcall(blk_settings_init);