])
])
-dnl #
-dnl # See if kernel supports block multi-queue and blk_status_t.
-dnl # blk_status_t represents the new status codes introduced in the 4.13
-dnl # kernel patch:
-dnl #
-dnl # block: introduce new block status code type
-dnl #
-dnl # We do not currently support the "old" block multi-queue interfaces from
-dnl # prior kernels.
-dnl #
-AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ], [
- ZFS_LINUX_TEST_SRC([blk_mq], [
- #include <linux/blk-mq.h>
- ], [
- struct blk_mq_tag_set tag_set __attribute__ ((unused)) = {0};
- (void) blk_mq_alloc_tag_set(&tag_set);
- return BLK_STS_OK;
- ], [])
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX], [
ZFS_LINUX_TEST_SRC([blk_mq_rq_hctx], [
#include <linux/blk-mq.h>
#include <linux/blkdev.h>
], [])
])
-AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ], [
- AC_MSG_CHECKING([whether block multiqueue with blk_status_t is available])
- ZFS_LINUX_TEST_RESULT([blk_mq], [
+AC_DEFUN([ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX], [
+ AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request])
+ ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_BLK_MQ, 1, [block multiqueue is available])
- AC_MSG_CHECKING([whether block multiqueue hardware context is cached in struct request])
- ZFS_LINUX_TEST_RESULT([blk_mq_rq_hctx], [
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_BLK_MQ_RQ_HCTX, 1, [block multiqueue hardware context is cached in struct request])
- ], [
- AC_MSG_RESULT(no)
- ])
+ AC_DEFINE(HAVE_BLK_MQ_RQ_HCTX, 1, [block multiqueue hardware context is cached in struct request])
], [
AC_MSG_RESULT(no)
])
ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS
- ZFS_AC_KERNEL_SRC_BLK_MQ
+ ZFS_AC_KERNEL_SRC_BLK_MQ_RQ_HCTX
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
- ZFS_AC_KERNEL_BLK_MQ
+ ZFS_AC_KERNEL_BLK_MQ_RQ_HCTX
])
#include <linux/major.h>
#include <linux/msdos_fs.h> /* for SECTOR_* */
#include <linux/bio.h>
-
-#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
-#endif
-
/*
* 6.11 API
static inline int
io_data_dir(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL) {
if (op_is_write(req_op(rq))) {
return (WRITE);
return (READ);
}
}
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_data_dir(bio));
}
static inline int
io_is_flush(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_FLUSH);
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_is_flush(bio));
}
static inline int
io_is_discard(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_DISCARD);
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_is_discard(bio));
}
static inline int
io_is_secure_erase(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (req_op(rq) == REQ_OP_SECURE_ERASE);
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_is_secure_erase(bio));
}
static inline int
io_is_fua(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (rq->cmd_flags & REQ_FUA);
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_is_fua(bio));
}
static inline uint64_t
io_offset(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_pos(rq) << 9);
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (BIO_BI_SECTOR(bio) << 9);
}
static inline uint64_t
io_size(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (blk_rq_bytes(rq));
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (BIO_BI_SIZE(bio));
}
static inline int
io_has_data(struct bio *bio, struct request *rq)
{
-#ifdef HAVE_BLK_MQ
if (rq != NULL)
return (bio_has_data(rq->bio));
-#else
- ASSERT3P(rq, ==, NULL);
-#endif
return (bio_has_data(bio));
}
#endif /* _ZFS_BLKDEV_H */
return (0);
}
-#ifdef HAVE_BLK_MQ
static void
zfs_copy_bvec(void *p, size_t skip, size_t cnt, zfs_uio_rw_t rw,
struct bio_vec *bv)
}
return (0);
}
-#endif
static int
zfs_uiomove_bvec(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio)
{
-#ifdef HAVE_BLK_MQ
if (uio->rq != NULL)
return (zfs_uiomove_bvec_rq(p, n, rw, uio));
-#else
- ASSERT3P(uio->rq, ==, NULL);
-#endif
return (zfs_uiomove_bvec_impl(p, n, rw, uio));
}
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/workqueue.h>
-
-#ifdef HAVE_BLK_MQ
#include <linux/blk-mq.h>
-#endif
static void zvol_request_impl(zvol_state_t *zv, struct bio *bio,
struct request *rq, boolean_t force_sync);
#endif
static unsigned int zvol_threads = 0;
-#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_threads = 0;
static unsigned int zvol_blk_mq_actual_threads;
static boolean_t zvol_use_blk_mq = B_FALSE;
* read and write tests to a zvol in an NVMe pool (with 16 CPUs).
*/
static unsigned int zvol_blk_mq_blocks_per_thread = 8;
-#endif
static unsigned int zvol_num_taskqs = 0;
/*
* Finalize our BIO or request.
*/
-#ifdef HAVE_BLK_MQ
#define END_IO(zv, bio, rq, error) do { \
if (bio) { \
bio->bi_status = errno_to_bi_status(-error); \
blk_mq_end_request(rq, errno_to_bi_status(error)); \
} \
} while (0)
-#else
-#define END_IO(zv, bio, rq, error) do { \
- bio->bi_status = errno_to_bi_status(-error); \
- bio_endio(bio); \
-} while (0)
-#endif
-#ifdef HAVE_BLK_MQ
static unsigned int zvol_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
static unsigned int zvol_actual_blk_mq_queue_depth;
-#endif
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
-#ifdef HAVE_BLK_MQ
struct blk_mq_tag_set tag_set;
-#endif
/* Set from the global 'zvol_use_blk_mq' at zvol load */
boolean_t use_blk_mq;
kmem_free(task, sizeof (*task));
}
-#ifdef HAVE_BLK_MQ
-
/*
* This is called when a new block multiqueue request comes in. A request
* contains one or more BIOs.
return (blk_mq_alloc_tag_set(&zso->tag_set));
}
-#endif /* HAVE_BLK_MQ */
/*
* Given a path, return TRUE if path is a ZVOL.
uint_t blk_mq_hw_queue = 0;
uint_t tq_idx;
uint_t taskq_hash;
-#ifdef HAVE_BLK_MQ
if (rq)
#ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else
blk_mq_hw_queue =
rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
-#endif
#endif
taskq_hash = cityhash4((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue, 0);
* the correct number of segments for the volblocksize and
* number of chunks you want.
*/
-#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_blocks_per_thread != 0) {
unsigned int chunks;
chunks = MIN(zvol_blk_mq_blocks_per_thread, UINT16_MAX);
limits->zql_max_segment_size = UINT_MAX;
}
} else {
-#endif
limits->zql_max_segments = UINT16_MAX;
limits->zql_max_segment_size = UINT_MAX;
}
static int
zvol_alloc_blk_mq(zvol_state_t *zv, zvol_queue_limits_t *limits)
{
-#ifdef HAVE_BLK_MQ
struct zvol_state_os *zso = zv->zv_zso;
/* Allocate our blk-mq tag_set */
#endif
zvol_queue_limits_apply(limits, zso->zvo_queue);
-#endif
return (0);
}
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&zv->zv_removing_cv, NULL, CV_DEFAULT, NULL);
-#ifdef HAVE_BLK_MQ
zv->zv_zso->use_blk_mq = zvol_use_blk_mq;
-#endif
zvol_queue_limits_t limits;
zvol_queue_limits_init(&limits, zv, zv->zv_zso->use_blk_mq);
put_disk(zv->zv_zso->zvo_disk);
#endif
-#ifdef HAVE_BLK_MQ
if (zv->zv_zso->use_blk_mq)
blk_mq_free_tag_set(&zv->zv_zso->tag_set);
-#endif
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
return (error);
}
-#ifdef HAVE_BLK_MQ
if (zvol_blk_mq_queue_depth == 0) {
zvol_actual_blk_mq_queue_depth = BLKDEV_DEFAULT_RQ;
} else {
zvol_blk_mq_actual_threads = MIN(MAX(zvol_blk_mq_threads, 1),
1024);
}
-#endif
+
for (uint_t i = 0; i < num_tqs; i++) {
char name[32];
(void) snprintf(name, sizeof (name), "%s_tq-%u",
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
-#ifdef HAVE_BLK_MQ
module_param(zvol_blk_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_queue_depth, "Default blk-mq queue depth");
module_param(zvol_blk_mq_blocks_per_thread, uint, 0644);
MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
"Process volblocksize blocks per thread");
-#endif
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
module_param(zvol_open_timeout_ms, uint, 0644);