]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - block/blk-settings.c
BCM270X_DT: Add bcm2708-rpi-0-w.dts
[mirror_ubuntu-zesty-kernel.git] / block / blk-settings.c
index f679ae12284351fdb53f8cfc80a2a662269c164f..529e55f52a03d7126fbbb77d0eef771e3d7ebe2d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/gfp.h>
 
 #include "blk.h"
+#include "blk-wbt.h"
 
 unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
@@ -95,6 +96,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->max_dev_sectors = 0;
        lim->chunk_sectors = 0;
        lim->max_write_same_sectors = 0;
+       lim->max_write_zeroes_sectors = 0;
        lim->max_discard_sectors = 0;
        lim->max_hw_discard_sectors = 0;
        lim->discard_granularity = 0;
@@ -107,6 +109,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->io_opt = 0;
        lim->misaligned = 0;
        lim->cluster = 1;
+       lim->zoned = BLK_ZONED_NONE;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
 
@@ -130,6 +133,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
        lim->max_sectors = UINT_MAX;
        lim->max_dev_sectors = UINT_MAX;
        lim->max_write_same_sectors = UINT_MAX;
+       lim->max_write_zeroes_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 
@@ -249,6 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
        max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
        limits->max_sectors = max_sectors;
+       q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
@@ -297,6 +302,19 @@ void blk_queue_max_write_same_sectors(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 
+/**
+ * blk_queue_max_write_zeroes_sectors - set max sectors for a single
+ *                                      write zeroes
+ * @q:  the request queue for the device
+ * @max_write_zeroes_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+               unsigned int max_write_zeroes_sectors)
+{
+       q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+
 /**
  * blk_queue_max_segments - set max hw segments for a request for this queue
  * @q:  the request queue for the device
@@ -525,6 +543,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
        t->max_write_same_sectors = min(t->max_write_same_sectors,
                                        b->max_write_same_sectors);
+       t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+                                       b->max_write_zeroes_sectors);
        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -630,6 +650,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                        t->discard_granularity;
        }
 
+       if (b->chunk_sectors)
+               t->chunk_sectors = min_not_zero(t->chunk_sectors,
+                                               b->chunk_sectors);
+
        return ret;
 }
 EXPORT_SYMBOL(blk_stack_limits);
@@ -831,6 +855,19 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+/**
+ * blk_set_queue_depth - tell the block layer about the device queue depth
+ * @q:         the request queue for the device
+ * @depth:             queue depth
+ *
+ */
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
+{
+       q->queue_depth = depth;
+       wbt_set_queue_depth(q->rq_wb, depth);
+}
+EXPORT_SYMBOL(blk_set_queue_depth);
+
 /**
  * blk_queue_write_cache - configure queue's write cache
  * @q:         the request queue for the device
@@ -851,6 +888,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
        else
                queue_flag_clear(QUEUE_FLAG_FUA, q);
        spin_unlock_irq(q->queue_lock);
+
+       wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 }
 EXPORT_SYMBOL_GPL(blk_queue_write_cache);