ublk_queue_cmd(ubq, req);
}
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static inline int ublk_check_cmd_op(u32 cmd_op)
+{
+ u32 ioc_type = _IOC_TYPE(cmd_op);
+
+ if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
+ return -EOPNOTSUPP;
+
+ if (ioc_type != 'u' && ioc_type != 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
- struct ublksrv_io_cmd *ub_cmd)
++ const struct ublksrv_io_cmd *ub_cmd)
{
- const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe);
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
return -EIOCBQUEUED;
}
- struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
- struct ublksrv_io_cmd ub_cmd;
-
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
- ub_cmd.q_id = READ_ONCE(ub_src->q_id);
- ub_cmd.tag = READ_ONCE(ub_src->tag);
- ub_cmd.result = READ_ONCE(ub_src->result);
- ub_cmd.addr = READ_ONCE(ub_src->addr);
+ /*
+ * Not necessary for async retry, but let's keep it simple and always
+ * copy the values to avoid any potential reuse.
+ */
++ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
++ const struct ublksrv_io_cmd ub_cmd = {
++ .q_id = READ_ONCE(ub_src->q_id),
++ .tag = READ_ONCE(ub_src->tag),
++ .result = READ_ONCE(ub_src->result),
++ .addr = READ_ONCE(ub_src->addr)
++ };
+
+ return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
struct ublk_device *ub = NULL;
+ u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
if (issue_flags & IO_URING_F_NONBLOCK)