]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/lib/nvme/nvme_ns_cmd.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / lib / nvme / nvme_ns_cmd.c
index 0ed3b14080eb3240a6992a0432c1b3c2aa20345d..eaa825fa88b9c8cfb43a0b68ad6a737cb927c57c 100644 (file)
@@ -42,8 +42,8 @@ static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
 
 
 static bool
-spdk_nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
-                                 uint32_t sectors_per_stripe, uint32_t qdepth)
+nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
+                            uint32_t sectors_per_stripe, uint32_t qdepth)
 {
        uint32_t child_per_io = UINT32_MAX;
 
@@ -62,78 +62,6 @@ spdk_nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_i
        return child_per_io >= qdepth;
 }
 
-static void
-nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
-{
-       struct nvme_request *child = child_arg;
-       struct nvme_request *parent = child->parent;
-
-       nvme_request_remove_child(parent, child);
-
-       if (spdk_nvme_cpl_is_error(cpl)) {
-               memcpy(&parent->parent_status, cpl, sizeof(*cpl));
-       }
-
-       if (parent->num_children == 0) {
-               nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
-                                     parent, &parent->parent_status);
-               nvme_free_request(parent);
-       }
-}
-
-static void
-nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
-{
-       assert(parent->num_children != UINT16_MAX);
-
-       if (parent->num_children == 0) {
-               /*
-                * Defer initialization of the children TAILQ since it falls
-                *  on a separate cacheline.  This ensures we do not touch this
-                *  cacheline except on request splitting cases, which are
-                *  relatively rare.
-                */
-               TAILQ_INIT(&parent->children);
-               parent->parent = NULL;
-               memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
-       }
-
-       parent->num_children++;
-       TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
-       child->parent = parent;
-       child->cb_fn = nvme_cb_complete_child;
-       child->cb_arg = child;
-}
-
-void
-nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
-{
-       assert(parent != NULL);
-       assert(child != NULL);
-       assert(child->parent == parent);
-       assert(parent->num_children != 0);
-
-       parent->num_children--;
-       TAILQ_REMOVE(&parent->children, child, child_tailq);
-}
-
-static void
-nvme_request_free_children(struct nvme_request *req)
-{
-       struct nvme_request *child, *tmp;
-
-       if (req->num_children == 0) {
-               return;
-       }
-
-       /* free all child nvme_request */
-       TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
-               nvme_request_remove_child(req, child);
-               nvme_request_free_children(child);
-               nvme_free_request(child);
-       }
-}
-
 static struct nvme_request *
 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
                        const struct nvme_payload *payload,
@@ -201,6 +129,18 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
        return req;
 }
 
+static inline bool
+_is_io_flags_valid(uint32_t io_flags)
+{
+       if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) {
+               /* Invalid io_flags */
+               SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
+               return false;
+       }
+
+       return true;
+}
+
 static void
 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
                           uint32_t opc, uint64_t lba, uint32_t lba_count,
@@ -208,6 +148,8 @@ _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
 {
        struct spdk_nvme_cmd    *cmd;
 
+       assert(_is_io_flags_valid(io_flags));
+
        cmd = &req->cmd;
        cmd->opc = opc;
        cmd->nsid = ns->id;
@@ -223,8 +165,10 @@ _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
                }
        }
 
+       cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
+
        cmd->cdw12 = lba_count - 1;
-       cmd->cdw12 |= io_flags;
+       cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
 
        cmd->cdw15 = apptag_mask;
        cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
@@ -447,13 +391,6 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
        uint32_t                sectors_per_max_io;
        uint32_t                sectors_per_stripe;
 
-       if (io_flags & 0xFFFF) {
-               /* The bottom 16 bits must be empty */
-               SPDK_ERRLOG("io_flags 0x%x bottom 16 bits is not empty\n",
-                           io_flags);
-               return NULL;
-       }
-
        sector_size = ns->extended_lba_size;
        sectors_per_max_io = ns->sectors_per_max_io;
        sectors_per_stripe = ns->sectors_per_stripe;
@@ -465,7 +402,8 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
                sector_size -= 8;
        }
 
-       req = nvme_allocate_request(qpair, payload, lba_count * sector_size, cb_fn, cb_arg);
+       req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
+                                   cb_fn, cb_arg);
        if (req == NULL) {
                return NULL;
        }
@@ -516,6 +454,10 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
@@ -524,10 +466,10 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
                              0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -545,6 +487,10 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
@@ -553,10 +499,10 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
                              apptag_mask, apptag, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -573,6 +519,10 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
                return -EINVAL;
        }
@@ -584,10 +534,45 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
                              io_flags, 0, 0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
+               return -EINVAL;
+       } else {
+               return -ENOMEM;
+       }
+}
+
+int
+spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+                                 uint64_t lba, uint32_t lba_count,
+                                 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+                                 spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+                                 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+                                 uint16_t apptag_mask, uint16_t apptag)
+{
+       struct nvme_request *req;
+       struct nvme_payload payload;
+
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
+       if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
+               return -EINVAL;
+       }
+
+       payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
+
+       req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
+                             SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, true);
+       if (req != NULL) {
+               return nvme_qpair_submit_request(qpair, req);
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -603,6 +588,10 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
@@ -610,10 +599,10 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
                              0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -630,6 +619,10 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
@@ -637,10 +630,10 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
                              apptag_mask, apptag, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -657,6 +650,10 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
                return -EINVAL;
        }
@@ -667,10 +664,10 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
                              io_flags, 0, 0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -688,6 +685,10 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
                return -EINVAL;
        }
@@ -698,10 +699,10 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
                              io_flags, apptag_mask, apptag, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -717,16 +718,20 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
                              io_flags, 0, 0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -742,16 +747,20 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
 
        req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
                              io_flags, apptag_mask, apptag, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -768,6 +777,10 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
                return -EINVAL;
        }
@@ -778,10 +791,10 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
                              io_flags, 0, 0, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -799,6 +812,10 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
        struct nvme_request *req;
        struct nvme_payload payload;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
                return -EINVAL;
        }
@@ -809,10 +826,10 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
                              io_flags, apptag_mask, apptag, true);
        if (req != NULL) {
                return nvme_qpair_submit_request(qpair, req);
-       } else if (spdk_nvme_ns_check_request_length(lba_count,
-                       ns->sectors_per_max_io,
-                       ns->sectors_per_stripe,
-                       qpair->ctrlr->opts.io_queue_requests)) {
+       } else if (nvme_ns_check_request_length(lba_count,
+                                               ns->sectors_per_max_io,
+                                               ns->sectors_per_stripe,
+                                               qpair->ctrlr->opts.io_queue_requests)) {
                return -EINVAL;
        } else {
                return -ENOMEM;
@@ -829,6 +846,10 @@ spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
        struct spdk_nvme_cmd    *cmd;
        uint64_t                *tmp_lba;
 
+       if (!_is_io_flags_valid(io_flags)) {
+               return -EINVAL;
+       }
+
        if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
                return -EINVAL;
        }
@@ -845,7 +866,37 @@ spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
        tmp_lba = (uint64_t *)&cmd->cdw10;
        *tmp_lba = lba;
        cmd->cdw12 = lba_count - 1;
-       cmd->cdw12 |= io_flags;
+       cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
+       cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
+
+       return nvme_qpair_submit_request(qpair, req);
+}
+
+int
+spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+                                    uint64_t lba, uint32_t lba_count,
+                                    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+       struct nvme_request     *req;
+       struct spdk_nvme_cmd    *cmd;
+       uint64_t                *tmp_lba;
+
+       if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
+               return -EINVAL;
+       }
+
+       req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
+       if (req == NULL) {
+               return -ENOMEM;
+       }
+
+       cmd = &req->cmd;
+       cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
+       cmd->nsid = ns->id;
+
+       tmp_lba = (uint64_t *)&cmd->cdw10;
+       *tmp_lba = lba;
+       cmd->cdw12 = lba_count - 1;
 
        return nvme_qpair_submit_request(qpair, req);
 }
@@ -878,7 +929,7 @@ spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qp
        cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
        cmd->nsid = ns->id;
 
-       cmd->cdw10 = num_ranges - 1;
+       cmd->cdw10_bits.dsm.nr = num_ranges - 1;
        cmd->cdw11 = type;
 
        return nvme_qpair_submit_request(qpair, req);
@@ -926,12 +977,9 @@ spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
        cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
        cmd->nsid = ns->id;
 
-       /* Bits 0-2 */
-       cmd->cdw10 = action;
-       /* Bit 3 */
-       cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
-       /* Bits 30-31 */
-       cmd->cdw10 |= (uint32_t)cptpl << 30;
+       cmd->cdw10_bits.resv_register.rrega = action;
+       cmd->cdw10_bits.resv_register.iekey = ignore_key;
+       cmd->cdw10_bits.resv_register.cptpl = cptpl;
 
        return nvme_qpair_submit_request(qpair, req);
 }
@@ -959,12 +1007,9 @@ spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
        cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
        cmd->nsid = ns->id;
 
-       /* Bits 0-2 */
-       cmd->cdw10 = action;
-       /* Bit 3 */
-       cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
-       /* Bits 8-15 */
-       cmd->cdw10 |= (uint32_t)type << 8;
+       cmd->cdw10_bits.resv_release.rrela = action;
+       cmd->cdw10_bits.resv_release.iekey = ignore_key;
+       cmd->cdw10_bits.resv_release.rtype = type;
 
        return nvme_qpair_submit_request(qpair, req);
 }
@@ -992,12 +1037,9 @@ spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
        cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
        cmd->nsid = ns->id;
 
-       /* Bits 0-2 */
-       cmd->cdw10 = action;
-       /* Bit 3 */
-       cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
-       /* Bits 8-15 */
-       cmd->cdw10 |= (uint32_t)type << 8;
+       cmd->cdw10_bits.resv_acquire.racqa = action;
+       cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
+       cmd->cdw10_bits.resv_acquire.rtype = type;
 
        return nvme_qpair_submit_request(qpair, req);
 }