static bool
-spdk_nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
- uint32_t sectors_per_stripe, uint32_t qdepth)
+nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
+ uint32_t sectors_per_stripe, uint32_t qdepth)
{
uint32_t child_per_io = UINT32_MAX;
return child_per_io >= qdepth;
}
-static void
-nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
-{
- struct nvme_request *child = child_arg;
- struct nvme_request *parent = child->parent;
-
- nvme_request_remove_child(parent, child);
-
- if (spdk_nvme_cpl_is_error(cpl)) {
- memcpy(&parent->parent_status, cpl, sizeof(*cpl));
- }
-
- if (parent->num_children == 0) {
- nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
- parent, &parent->parent_status);
- nvme_free_request(parent);
- }
-}
-
-static void
-nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
-{
- assert(parent->num_children != UINT16_MAX);
-
- if (parent->num_children == 0) {
- /*
- * Defer initialization of the children TAILQ since it falls
- * on a separate cacheline. This ensures we do not touch this
- * cacheline except on request splitting cases, which are
- * relatively rare.
- */
- TAILQ_INIT(&parent->children);
- parent->parent = NULL;
- memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
- }
-
- parent->num_children++;
- TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
- child->parent = parent;
- child->cb_fn = nvme_cb_complete_child;
- child->cb_arg = child;
-}
-
-void
-nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
-{
- assert(parent != NULL);
- assert(child != NULL);
- assert(child->parent == parent);
- assert(parent->num_children != 0);
-
- parent->num_children--;
- TAILQ_REMOVE(&parent->children, child, child_tailq);
-}
-
-static void
-nvme_request_free_children(struct nvme_request *req)
-{
- struct nvme_request *child, *tmp;
-
- if (req->num_children == 0) {
- return;
- }
-
- /* free all child nvme_request */
- TAILQ_FOREACH_SAFE(child, &req->children, child_tailq, tmp) {
- nvme_request_remove_child(req, child);
- nvme_request_free_children(child);
- nvme_free_request(child);
- }
-}
-
static struct nvme_request *
_nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
const struct nvme_payload *payload,
return req;
}
+static inline bool
+_is_io_flags_valid(uint32_t io_flags)
+{
+ if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) {
+ /* Invalid io_flags */
+ SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
+ return false;
+ }
+
+ return true;
+}
+
static void
_nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
uint32_t opc, uint64_t lba, uint32_t lba_count,
{
struct spdk_nvme_cmd *cmd;
+ assert(_is_io_flags_valid(io_flags));
+
cmd = &req->cmd;
cmd->opc = opc;
cmd->nsid = ns->id;
}
}
+ cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
+
cmd->cdw12 = lba_count - 1;
- cmd->cdw12 |= io_flags;
+ cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
cmd->cdw15 = apptag_mask;
cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
uint32_t sectors_per_max_io;
uint32_t sectors_per_stripe;
- if (io_flags & 0xFFFF) {
- /* The bottom 16 bits must be empty */
- SPDK_ERRLOG("io_flags 0x%x bottom 16 bits is not empty\n",
- io_flags);
- return NULL;
- }
-
sector_size = ns->extended_lba_size;
sectors_per_max_io = ns->sectors_per_max_io;
sectors_per_stripe = ns->sectors_per_stripe;
sector_size -= 8;
}
- req = nvme_allocate_request(qpair, payload, lba_count * sector_size, cb_fn, cb_arg);
+ req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
+ cb_fn, cb_arg);
if (req == NULL) {
return NULL;
}
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
+ return -EINVAL;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+int
+spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
+ spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
+ spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
+ uint16_t apptag_mask, uint16_t apptag)
+{
+ struct nvme_request *req;
+ struct nvme_payload payload;
+
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
+ if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
+ return -EINVAL;
+ }
+
+ payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
+
+ req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
+ SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, true);
+ if (req != NULL) {
+ return nvme_qpair_submit_request(qpair, req);
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct nvme_request *req;
struct nvme_payload payload;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
return -EINVAL;
}
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
- } else if (spdk_nvme_ns_check_request_length(lba_count,
- ns->sectors_per_max_io,
- ns->sectors_per_stripe,
- qpair->ctrlr->opts.io_queue_requests)) {
+ } else if (nvme_ns_check_request_length(lba_count,
+ ns->sectors_per_max_io,
+ ns->sectors_per_stripe,
+ qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
struct spdk_nvme_cmd *cmd;
uint64_t *tmp_lba;
+ if (!_is_io_flags_valid(io_flags)) {
+ return -EINVAL;
+ }
+
if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
return -EINVAL;
}
tmp_lba = (uint64_t *)&cmd->cdw10;
*tmp_lba = lba;
cmd->cdw12 = lba_count - 1;
- cmd->cdw12 |= io_flags;
+ cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
+ cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
+
+ return nvme_qpair_submit_request(qpair, req);
+}
+
+int
+spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
+ uint64_t lba, uint32_t lba_count,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct spdk_nvme_cmd *cmd;
+ uint64_t *tmp_lba;
+
+ if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
+ return -EINVAL;
+ }
+
+ req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
+ if (req == NULL) {
+ return -ENOMEM;
+ }
+
+ cmd = &req->cmd;
+ cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
+ cmd->nsid = ns->id;
+
+ tmp_lba = (uint64_t *)&cmd->cdw10;
+ *tmp_lba = lba;
+ cmd->cdw12 = lba_count - 1;
return nvme_qpair_submit_request(qpair, req);
}
cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
cmd->nsid = ns->id;
- cmd->cdw10 = num_ranges - 1;
+ cmd->cdw10_bits.dsm.nr = num_ranges - 1;
cmd->cdw11 = type;
return nvme_qpair_submit_request(qpair, req);
cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
cmd->nsid = ns->id;
- /* Bits 0-2 */
- cmd->cdw10 = action;
- /* Bit 3 */
- cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
- /* Bits 30-31 */
- cmd->cdw10 |= (uint32_t)cptpl << 30;
+ cmd->cdw10_bits.resv_register.rrega = action;
+ cmd->cdw10_bits.resv_register.iekey = ignore_key;
+ cmd->cdw10_bits.resv_register.cptpl = cptpl;
return nvme_qpair_submit_request(qpair, req);
}
cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
cmd->nsid = ns->id;
- /* Bits 0-2 */
- cmd->cdw10 = action;
- /* Bit 3 */
- cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
- /* Bits 8-15 */
- cmd->cdw10 |= (uint32_t)type << 8;
+ cmd->cdw10_bits.resv_release.rrela = action;
+ cmd->cdw10_bits.resv_release.iekey = ignore_key;
+ cmd->cdw10_bits.resv_release.rtype = type;
return nvme_qpair_submit_request(qpair, req);
}
cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
cmd->nsid = ns->id;
- /* Bits 0-2 */
- cmd->cdw10 = action;
- /* Bit 3 */
- cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
- /* Bits 8-15 */
- cmd->cdw10 |= (uint32_t)type << 8;
+ cmd->cdw10_bits.resv_acquire.racqa = action;
+ cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
+ cmd->cdw10_bits.resv_acquire.rtype = type;
return nvme_qpair_submit_request(qpair, req);
}