return ns;
}
+void nvme_requeue_req(struct request *req)
+{
+ unsigned long flags;
+
+ blk_mq_requeue_request(req);
+ spin_lock_irqsave(req->q->queue_lock, flags);
+ if (!blk_queue_stopped(req->q))
+ blk_mq_kick_requeue_list(req->q);
+ spin_unlock_irqrestore(req->q->queue_lock, flags);
+}
+
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags)
{
return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
+#ifdef CONFIG_BLK_DEV_NVME_SCSI
case SG_GET_VERSION_NUM:
return nvme_sg_get_version_num((void __user *)arg);
case SG_IO:
return nvme_sg_io(ns, (void __user *)arg);
+#endif
default:
return -ENOTTY;
}
ns->type = NVME_NS_LIGHTNVM;
}
+ if (ns->ctrl->vs >= NVME_VS(1, 1))
+ memcpy(ns->eui, id->eui64, sizeof(ns->eui));
+ if (ns->ctrl->vs >= NVME_VS(1, 2))
+ memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
+
old_ms = ns->ms;
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
ns->lba_shift = id->lbaf[lbaf].ds;
return 0;
}
+static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
+{
+ struct nvme_ns *ns;
+ int ret;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ if (list_empty(&ctrl->namespaces)) {
+ ret = -ENOTTY;
+ goto out_unlock;
+ }
+
+ ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
+ dev_warn(ctrl->dev,
+ "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ dev_warn(ctrl->dev,
+ "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
+ kref_get(&ns->kref);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
+ ret = nvme_user_cmd(ctrl, ns, argp);
+ nvme_put_ns(ns);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
+}
+
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct nvme_ctrl *ctrl = file->private_data;
void __user *argp = (void __user *)arg;
- struct nvme_ns *ns;
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
- if (list_empty(&ctrl->namespaces))
- return -ENOTTY;
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
- return nvme_user_cmd(ctrl, ns, argp);
+ return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
dev_warn(ctrl->dev, "resetting controller\n");
return ctrl->ops->reset_ctrl(ctrl);
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ return sprintf(buf, "%pU\n", ns->uuid);
+}
+static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
+
+static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ return sprintf(buf, "%8phd\n", ns->eui);
+}
+static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
+
+static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ return sprintf(buf, "%d\n", ns->ns_id);
+}
+static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
+
+static struct attribute *nvme_ns_attrs[] = {
+ &dev_attr_uuid.attr,
+ &dev_attr_eui.attr,
+ &dev_attr_nsid.attr,
+ NULL,
+};
+
+static umode_t nvme_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+
+ if (a == &dev_attr_uuid.attr) {
+ if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+ return 0;
+ }
+ if (a == &dev_attr_eui.attr) {
+ if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+ return 0;
+ }
+ return a->mode;
+}
+
+static const struct attribute_group nvme_ns_attr_group = {
+ .attrs = nvme_ns_attrs,
+ .is_visible = nvme_attrs_are_visible,
+};
+
+#define nvme_show_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_function(model);
+nvme_show_function(serial);
+nvme_show_function(firmware_rev);
+
+static struct attribute *nvme_dev_attrs[] = {
+ &dev_attr_reset_controller.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_firmware_rev.attr,
+ NULL
+};
+
+static struct attribute_group nvme_dev_attrs_group = {
+ .attrs = nvme_dev_attrs,
+};
+
+static const struct attribute_group *nvme_dev_attr_groups[] = {
+ &nvme_dev_attrs_group,
+ NULL,
+};
+
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
{
struct nvme_ns *ns;
+ lockdep_assert_held(&ctrl->namespaces_mutex);
+
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->ns_id == nsid)
return ns;
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
+ lockdep_assert_held(&ctrl->namespaces_mutex);
+
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
list_add_tail(&ns->list, &ctrl->namespaces);
kref_get(&ctrl->kref);
- if (ns->type != NVME_NS_LIGHTNVM)
- add_disk(ns->disk);
+ if (ns->type == NVME_NS_LIGHTNVM)
+ return;
+ add_disk(ns->disk);
+ if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+ &nvme_ns_attr_group))
+ pr_warn("%s: failed to create sysfs group for identification\n",
+ ns->disk->disk_name);
return;
out_free_disk:
kfree(disk);
bool kill = nvme_io_incapable(ns->ctrl) &&
!blk_queue_dying(ns->queue);
- if (kill)
+ lockdep_assert_held(&ns->ctrl->namespaces_mutex);
+
+ if (kill) {
blk_set_queue_dying(ns->queue);
+
+ /*
+ * The controller was shutdown first if we got here through
+ * device removal. The shutdown may requeue outstanding
+ * requests. These need to be aborted immediately so
+ * del_gendisk doesn't block indefinitely for their completion.
+ */
+ blk_mq_abort_requeue_list(ns->queue);
+ }
if (ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
+ sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+ &nvme_ns_attr_group);
del_gendisk(ns->disk);
}
if (kill || !blk_queue_dying(ns->queue)) {
struct nvme_ns *ns, *next;
unsigned i;
+ lockdep_assert_held(&ctrl->namespaces_mutex);
+
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
if (nvme_identify_ctrl(ctrl, &id))
return;
+ mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
__nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
done:
list_sort(NULL, &ctrl->namespaces, ns_cmp);
+ mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
}
{
struct nvme_ns *ns, *next;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
+ mutex_unlock(&ctrl->namespaces_mutex);
}
static DEFINE_IDA(nvme_instance_ida);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
- device_remove_file(ctrl->device, &dev_attr_reset_controller);
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
int ret;
INIT_LIST_HEAD(&ctrl->namespaces);
+ mutex_init(&ctrl->namespaces_mutex);
kref_init(&ctrl->kref);
ctrl->dev = dev;
ctrl->ops = ops;
if (ret)
goto out;
- ctrl->device = device_create(nvme_class, ctrl->dev,
+ ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
MKDEV(nvme_char_major, ctrl->instance),
- dev, "nvme%d", ctrl->instance);
+ dev, nvme_dev_attr_groups,
+ "nvme%d", ctrl->instance);
if (IS_ERR(ctrl->device)) {
ret = PTR_ERR(ctrl->device);
goto out_release_instance;
get_device(ctrl->device);
dev_set_drvdata(ctrl->device, ctrl);
- ret = device_create_file(ctrl->device, &dev_attr_reset_controller);
- if (ret)
- goto out_put_device;
-
spin_lock(&dev_list_lock);
list_add_tail(&ctrl->node, &nvme_ctrl_list);
spin_unlock(&dev_list_lock);
return 0;
-
-out_put_device:
- put_device(ctrl->device);
- device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
out_release_instance:
nvme_release_instance(ctrl);
out:
return ret;
}
+void nvme_stop_queues(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ spin_lock_irq(ns->queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+ spin_unlock_irq(ns->queue->queue_lock);
+
+ blk_mq_cancel_requeue_work(ns->queue);
+ blk_mq_stop_hw_queues(ns->queue);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+void nvme_start_queues(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+ blk_mq_kick_requeue_list(ns->queue);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
int __init nvme_core_init(void)
{
int result;