]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/block/virtio_blk.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[mirror_ubuntu-focal-kernel.git] / drivers / block / virtio_blk.c
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
16
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
19
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
22
23 static struct workqueue_struct *virtblk_wq;
24
25 struct virtio_blk_vq {
26 struct virtqueue *vq;
27 spinlock_t lock;
28 char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
30
31 struct virtio_blk {
32 struct virtio_device *vdev;
33
34 /* The disk structure for the kernel. */
35 struct gendisk *disk;
36
37 /* Block layer tags. */
38 struct blk_mq_tag_set tag_set;
39
40 /* Process context for config space updates */
41 struct work_struct config_work;
42
43 /* What host tells us, plus 2 for header & tailer. */
44 unsigned int sg_elems;
45
46 /* Ida index - used to track minor number allocations. */
47 int index;
48
49 /* num of vqs */
50 int num_vqs;
51 struct virtio_blk_vq *vqs;
52 };
53
54 struct virtblk_req {
55 struct request *req;
56 struct virtio_blk_outhdr out_hdr;
57 struct virtio_scsi_inhdr in_hdr;
58 u8 status;
59 struct scatterlist sg[];
60 };
61
62 static inline int virtblk_result(struct virtblk_req *vbr)
63 {
64 switch (vbr->status) {
65 case VIRTIO_BLK_S_OK:
66 return 0;
67 case VIRTIO_BLK_S_UNSUPP:
68 return -ENOTTY;
69 default:
70 return -EIO;
71 }
72 }
73
74 static int __virtblk_add_req(struct virtqueue *vq,
75 struct virtblk_req *vbr,
76 struct scatterlist *data_sg,
77 bool have_data)
78 {
79 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
80 unsigned int num_out = 0, num_in = 0;
81 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
82
83 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
84 sgs[num_out++] = &hdr;
85
86 /*
87 * If this is a packet command we need a couple of additional headers.
88 * Behind the normal outhdr we put a segment with the scsi command
89 * block, and before the normal inhdr we put the sense data and the
90 * inhdr with additional status information.
91 */
92 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
93 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
94 sgs[num_out++] = &cmd;
95 }
96
97 if (have_data) {
98 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
99 sgs[num_out++] = data_sg;
100 else
101 sgs[num_out + num_in++] = data_sg;
102 }
103
104 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
105 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
106 sgs[num_out + num_in++] = &sense;
107 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108 sgs[num_out + num_in++] = &inhdr;
109 }
110
111 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
112 sgs[num_out + num_in++] = &status;
113
114 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
115 }
116
117 static inline void virtblk_request_done(struct request *req)
118 {
119 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
120 struct virtio_blk *vblk = req->q->queuedata;
121 int error = virtblk_result(vbr);
122
123 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
124 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
125 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
126 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
127 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
128 req->errors = (error != 0);
129 }
130
131 blk_mq_end_request(req, error);
132 }
133
134 static void virtblk_done(struct virtqueue *vq)
135 {
136 struct virtio_blk *vblk = vq->vdev->priv;
137 bool req_done = false;
138 int qid = vq->index;
139 struct virtblk_req *vbr;
140 unsigned long flags;
141 unsigned int len;
142
143 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
144 do {
145 virtqueue_disable_cb(vq);
146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
147 blk_mq_complete_request(vbr->req);
148 req_done = true;
149 }
150 if (unlikely(virtqueue_is_broken(vq)))
151 break;
152 } while (!virtqueue_enable_cb(vq));
153
154 /* In case queue is stopped waiting for more buffers. */
155 if (req_done)
156 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
157 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
158 }
159
160 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
161 const struct blk_mq_queue_data *bd)
162 {
163 struct virtio_blk *vblk = hctx->queue->queuedata;
164 struct request *req = bd->rq;
165 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
166 unsigned long flags;
167 unsigned int num;
168 int qid = hctx->queue_num;
169 int err;
170 bool notify = false;
171
172 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
173
174 vbr->req = req;
175 if (req->cmd_flags & REQ_FLUSH) {
176 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
177 vbr->out_hdr.sector = 0;
178 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
179 } else {
180 switch (req->cmd_type) {
181 case REQ_TYPE_FS:
182 vbr->out_hdr.type = 0;
183 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
184 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
185 break;
186 case REQ_TYPE_BLOCK_PC:
187 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
188 vbr->out_hdr.sector = 0;
189 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
190 break;
191 case REQ_TYPE_SPECIAL:
192 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
193 vbr->out_hdr.sector = 0;
194 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
195 break;
196 default:
197 /* We don't put anything else in the queue. */
198 BUG();
199 }
200 }
201
202 blk_mq_start_request(req);
203
204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
205 if (num) {
206 if (rq_data_dir(vbr->req) == WRITE)
207 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
208 else
209 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
210 }
211
212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
213 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
214 if (err) {
215 virtqueue_kick(vblk->vqs[qid].vq);
216 blk_mq_stop_hw_queue(hctx);
217 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
218 /* Out of mem doesn't actually happen, since we fall back
219 * to direct descriptors */
220 if (err == -ENOMEM || err == -ENOSPC)
221 return BLK_MQ_RQ_QUEUE_BUSY;
222 return BLK_MQ_RQ_QUEUE_ERROR;
223 }
224
225 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
226 notify = true;
227 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
228
229 if (notify)
230 virtqueue_notify(vblk->vqs[qid].vq);
231 return BLK_MQ_RQ_QUEUE_OK;
232 }
233
234 /* return id (s/n) string for *disk to *id_str
235 */
236 static int virtblk_get_id(struct gendisk *disk, char *id_str)
237 {
238 struct virtio_blk *vblk = disk->private_data;
239 struct request *req;
240 struct bio *bio;
241 int err;
242
243 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
244 GFP_KERNEL);
245 if (IS_ERR(bio))
246 return PTR_ERR(bio);
247
248 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
249 if (IS_ERR(req)) {
250 bio_put(bio);
251 return PTR_ERR(req);
252 }
253
254 req->cmd_type = REQ_TYPE_SPECIAL;
255 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
256 blk_put_request(req);
257
258 return err;
259 }
260
261 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
262 unsigned int cmd, unsigned long data)
263 {
264 struct gendisk *disk = bdev->bd_disk;
265 struct virtio_blk *vblk = disk->private_data;
266
267 /*
268 * Only allow the generic SCSI ioctls if the host can support it.
269 */
270 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
271 return -ENOTTY;
272
273 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
274 (void __user *)data);
275 }
276
277 /* We provide getgeo only to please some old bootloader/partitioning tools */
278 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
279 {
280 struct virtio_blk *vblk = bd->bd_disk->private_data;
281
282 /* see if the host passed in geometry config */
283 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
284 virtio_cread(vblk->vdev, struct virtio_blk_config,
285 geometry.cylinders, &geo->cylinders);
286 virtio_cread(vblk->vdev, struct virtio_blk_config,
287 geometry.heads, &geo->heads);
288 virtio_cread(vblk->vdev, struct virtio_blk_config,
289 geometry.sectors, &geo->sectors);
290 } else {
291 /* some standard values, similar to sd */
292 geo->heads = 1 << 6;
293 geo->sectors = 1 << 5;
294 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
295 }
296 return 0;
297 }
298
299 static const struct block_device_operations virtblk_fops = {
300 .ioctl = virtblk_ioctl,
301 .owner = THIS_MODULE,
302 .getgeo = virtblk_getgeo,
303 };
304
305 static int index_to_minor(int index)
306 {
307 return index << PART_BITS;
308 }
309
310 static int minor_to_index(int minor)
311 {
312 return minor >> PART_BITS;
313 }
314
315 static ssize_t virtblk_serial_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317 {
318 struct gendisk *disk = dev_to_disk(dev);
319 int err;
320
321 /* sysfs gives us a PAGE_SIZE buffer */
322 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
323
324 buf[VIRTIO_BLK_ID_BYTES] = '\0';
325 err = virtblk_get_id(disk, buf);
326 if (!err)
327 return strlen(buf);
328
329 if (err == -EIO) /* Unsupported? Make it empty. */
330 return 0;
331
332 return err;
333 }
334
335 static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
336
337 static void virtblk_config_changed_work(struct work_struct *work)
338 {
339 struct virtio_blk *vblk =
340 container_of(work, struct virtio_blk, config_work);
341 struct virtio_device *vdev = vblk->vdev;
342 struct request_queue *q = vblk->disk->queue;
343 char cap_str_2[10], cap_str_10[10];
344 char *envp[] = { "RESIZE=1", NULL };
345 u64 capacity, size;
346
347 /* Host must always specify the capacity. */
348 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
349
350 /* If capacity is too big, truncate with warning. */
351 if ((sector_t)capacity != capacity) {
352 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
353 (unsigned long long)capacity);
354 capacity = (sector_t)-1;
355 }
356
357 size = capacity * queue_logical_block_size(q);
358 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
359 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
360
361 dev_notice(&vdev->dev,
362 "new size: %llu %d-byte logical blocks (%s/%s)\n",
363 (unsigned long long)capacity,
364 queue_logical_block_size(q),
365 cap_str_10, cap_str_2);
366
367 set_capacity(vblk->disk, capacity);
368 revalidate_disk(vblk->disk);
369 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
370 }
371
372 static void virtblk_config_changed(struct virtio_device *vdev)
373 {
374 struct virtio_blk *vblk = vdev->priv;
375
376 queue_work(virtblk_wq, &vblk->config_work);
377 }
378
379 static int init_vq(struct virtio_blk *vblk)
380 {
381 int err = 0;
382 int i;
383 vq_callback_t **callbacks;
384 const char **names;
385 struct virtqueue **vqs;
386 unsigned short num_vqs;
387 struct virtio_device *vdev = vblk->vdev;
388
389 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
390 struct virtio_blk_config, num_queues,
391 &num_vqs);
392 if (err)
393 num_vqs = 1;
394
395 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
396 if (!vblk->vqs) {
397 err = -ENOMEM;
398 goto out;
399 }
400
401 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
402 if (!names)
403 goto err_names;
404
405 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
406 if (!callbacks)
407 goto err_callbacks;
408
409 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
410 if (!vqs)
411 goto err_vqs;
412
413 for (i = 0; i < num_vqs; i++) {
414 callbacks[i] = virtblk_done;
415 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
416 names[i] = vblk->vqs[i].name;
417 }
418
419 /* Discover virtqueues and write information to configuration. */
420 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
421 if (err)
422 goto err_find_vqs;
423
424 for (i = 0; i < num_vqs; i++) {
425 spin_lock_init(&vblk->vqs[i].lock);
426 vblk->vqs[i].vq = vqs[i];
427 }
428 vblk->num_vqs = num_vqs;
429
430 err_find_vqs:
431 kfree(vqs);
432 err_vqs:
433 kfree(callbacks);
434 err_callbacks:
435 kfree(names);
436 err_names:
437 if (err)
438 kfree(vblk->vqs);
439 out:
440 return err;
441 }
442
443 /*
444 * Legacy naming scheme used for virtio devices. We are stuck with it for
445 * virtio blk but don't ever use it for any new driver.
446 */
447 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
448 {
449 const int base = 'z' - 'a' + 1;
450 char *begin = buf + strlen(prefix);
451 char *end = buf + buflen;
452 char *p;
453 int unit;
454
455 p = end - 1;
456 *p = '\0';
457 unit = base;
458 do {
459 if (p == begin)
460 return -EINVAL;
461 *--p = 'a' + (index % unit);
462 index = (index / unit) - 1;
463 } while (index >= 0);
464
465 memmove(begin, p, end - p);
466 memcpy(buf, prefix, strlen(prefix));
467
468 return 0;
469 }
470
471 static int virtblk_get_cache_mode(struct virtio_device *vdev)
472 {
473 u8 writeback;
474 int err;
475
476 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
477 struct virtio_blk_config, wce,
478 &writeback);
479 if (err)
480 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
481 virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
482
483 return writeback;
484 }
485
486 static void virtblk_update_cache_mode(struct virtio_device *vdev)
487 {
488 u8 writeback = virtblk_get_cache_mode(vdev);
489 struct virtio_blk *vblk = vdev->priv;
490
491 if (writeback)
492 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
493 else
494 blk_queue_flush(vblk->disk->queue, 0);
495
496 revalidate_disk(vblk->disk);
497 }
498
499 static const char *const virtblk_cache_types[] = {
500 "write through", "write back"
501 };
502
503 static ssize_t
504 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
505 const char *buf, size_t count)
506 {
507 struct gendisk *disk = dev_to_disk(dev);
508 struct virtio_blk *vblk = disk->private_data;
509 struct virtio_device *vdev = vblk->vdev;
510 int i;
511
512 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
513 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
514 if (sysfs_streq(buf, virtblk_cache_types[i]))
515 break;
516
517 if (i < 0)
518 return -EINVAL;
519
520 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
521 virtblk_update_cache_mode(vdev);
522 return count;
523 }
524
525 static ssize_t
526 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
527 char *buf)
528 {
529 struct gendisk *disk = dev_to_disk(dev);
530 struct virtio_blk *vblk = disk->private_data;
531 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
532
533 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
534 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
535 }
536
537 static const struct device_attribute dev_attr_cache_type_ro =
538 __ATTR(cache_type, S_IRUGO,
539 virtblk_cache_type_show, NULL);
540 static const struct device_attribute dev_attr_cache_type_rw =
541 __ATTR(cache_type, S_IRUGO|S_IWUSR,
542 virtblk_cache_type_show, virtblk_cache_type_store);
543
544 static int virtblk_init_request(void *data, struct request *rq,
545 unsigned int hctx_idx, unsigned int request_idx,
546 unsigned int numa_node)
547 {
548 struct virtio_blk *vblk = data;
549 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
550
551 sg_init_table(vbr->sg, vblk->sg_elems);
552 return 0;
553 }
554
555 static struct blk_mq_ops virtio_mq_ops = {
556 .queue_rq = virtio_queue_rq,
557 .map_queue = blk_mq_map_queue,
558 .complete = virtblk_request_done,
559 .init_request = virtblk_init_request,
560 };
561
562 static unsigned int virtblk_queue_depth;
563 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
564
565 static int virtblk_probe(struct virtio_device *vdev)
566 {
567 struct virtio_blk *vblk;
568 struct request_queue *q;
569 int err, index;
570
571 u64 cap;
572 u32 v, blk_size, sg_elems, opt_io_size;
573 u16 min_io_size;
574 u8 physical_block_exp, alignment_offset;
575
576 if (!vdev->config->get) {
577 dev_err(&vdev->dev, "%s failure: config access disabled\n",
578 __func__);
579 return -EINVAL;
580 }
581
582 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
583 GFP_KERNEL);
584 if (err < 0)
585 goto out;
586 index = err;
587
588 /* We need to know how many segments before we allocate. */
589 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
590 struct virtio_blk_config, seg_max,
591 &sg_elems);
592
593 /* We need at least one SG element, whatever they say. */
594 if (err || !sg_elems)
595 sg_elems = 1;
596
597 /* We need an extra sg elements at head and tail. */
598 sg_elems += 2;
599 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
600 if (!vblk) {
601 err = -ENOMEM;
602 goto out_free_index;
603 }
604
605 vblk->vdev = vdev;
606 vblk->sg_elems = sg_elems;
607
608 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
609
610 err = init_vq(vblk);
611 if (err)
612 goto out_free_vblk;
613
614 /* FIXME: How many partitions? How long is a piece of string? */
615 vblk->disk = alloc_disk(1 << PART_BITS);
616 if (!vblk->disk) {
617 err = -ENOMEM;
618 goto out_free_vq;
619 }
620
621 /* Default queue sizing is to fill the ring. */
622 if (!virtblk_queue_depth) {
623 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
624 /* ... but without indirect descs, we use 2 descs per req */
625 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
626 virtblk_queue_depth /= 2;
627 }
628
629 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
630 vblk->tag_set.ops = &virtio_mq_ops;
631 vblk->tag_set.queue_depth = virtblk_queue_depth;
632 vblk->tag_set.numa_node = NUMA_NO_NODE;
633 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
634 vblk->tag_set.cmd_size =
635 sizeof(struct virtblk_req) +
636 sizeof(struct scatterlist) * sg_elems;
637 vblk->tag_set.driver_data = vblk;
638 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
639
640 err = blk_mq_alloc_tag_set(&vblk->tag_set);
641 if (err)
642 goto out_put_disk;
643
644 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
645 if (IS_ERR(q)) {
646 err = -ENOMEM;
647 goto out_free_tags;
648 }
649
650 q->queuedata = vblk;
651
652 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
653
654 vblk->disk->major = major;
655 vblk->disk->first_minor = index_to_minor(index);
656 vblk->disk->private_data = vblk;
657 vblk->disk->fops = &virtblk_fops;
658 vblk->disk->driverfs_dev = &vdev->dev;
659 vblk->index = index;
660
661 /* configure queue flush support */
662 virtblk_update_cache_mode(vdev);
663
664 /* If disk is read-only in the host, the guest should obey */
665 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
666 set_disk_ro(vblk->disk, 1);
667
668 /* Host must always specify the capacity. */
669 virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
670
671 /* If capacity is too big, truncate with warning. */
672 if ((sector_t)cap != cap) {
673 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
674 (unsigned long long)cap);
675 cap = (sector_t)-1;
676 }
677 set_capacity(vblk->disk, cap);
678
679 /* We can handle whatever the host told us to handle. */
680 blk_queue_max_segments(q, vblk->sg_elems-2);
681
682 /* No need to bounce any requests */
683 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
684
685 /* No real sector limit. */
686 blk_queue_max_hw_sectors(q, -1U);
687
688 /* Host can optionally specify maximum segment size and number of
689 * segments. */
690 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
691 struct virtio_blk_config, size_max, &v);
692 if (!err)
693 blk_queue_max_segment_size(q, v);
694 else
695 blk_queue_max_segment_size(q, -1U);
696
697 /* Host can optionally specify the block size of the device */
698 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
699 struct virtio_blk_config, blk_size,
700 &blk_size);
701 if (!err)
702 blk_queue_logical_block_size(q, blk_size);
703 else
704 blk_size = queue_logical_block_size(q);
705
706 /* Use topology information if available */
707 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
708 struct virtio_blk_config, physical_block_exp,
709 &physical_block_exp);
710 if (!err && physical_block_exp)
711 blk_queue_physical_block_size(q,
712 blk_size * (1 << physical_block_exp));
713
714 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
715 struct virtio_blk_config, alignment_offset,
716 &alignment_offset);
717 if (!err && alignment_offset)
718 blk_queue_alignment_offset(q, blk_size * alignment_offset);
719
720 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
721 struct virtio_blk_config, min_io_size,
722 &min_io_size);
723 if (!err && min_io_size)
724 blk_queue_io_min(q, blk_size * min_io_size);
725
726 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
727 struct virtio_blk_config, opt_io_size,
728 &opt_io_size);
729 if (!err && opt_io_size)
730 blk_queue_io_opt(q, blk_size * opt_io_size);
731
732 virtio_device_ready(vdev);
733
734 add_disk(vblk->disk);
735 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
736 if (err)
737 goto out_del_disk;
738
739 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
740 err = device_create_file(disk_to_dev(vblk->disk),
741 &dev_attr_cache_type_rw);
742 else
743 err = device_create_file(disk_to_dev(vblk->disk),
744 &dev_attr_cache_type_ro);
745 if (err)
746 goto out_del_disk;
747 return 0;
748
749 out_del_disk:
750 del_gendisk(vblk->disk);
751 blk_cleanup_queue(vblk->disk->queue);
752 out_free_tags:
753 blk_mq_free_tag_set(&vblk->tag_set);
754 out_put_disk:
755 put_disk(vblk->disk);
756 out_free_vq:
757 vdev->config->del_vqs(vdev);
758 out_free_vblk:
759 kfree(vblk);
760 out_free_index:
761 ida_simple_remove(&vd_index_ida, index);
762 out:
763 return err;
764 }
765
766 static void virtblk_remove(struct virtio_device *vdev)
767 {
768 struct virtio_blk *vblk = vdev->priv;
769 int index = vblk->index;
770 int refc;
771
772 /* Make sure no work handler is accessing the device. */
773 flush_work(&vblk->config_work);
774
775 del_gendisk(vblk->disk);
776 blk_cleanup_queue(vblk->disk->queue);
777
778 blk_mq_free_tag_set(&vblk->tag_set);
779
780 /* Stop all the virtqueues. */
781 vdev->config->reset(vdev);
782
783 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
784 put_disk(vblk->disk);
785 vdev->config->del_vqs(vdev);
786 kfree(vblk->vqs);
787 kfree(vblk);
788
789 /* Only free device id if we don't have any users */
790 if (refc == 1)
791 ida_simple_remove(&vd_index_ida, index);
792 }
793
794 #ifdef CONFIG_PM_SLEEP
795 static int virtblk_freeze(struct virtio_device *vdev)
796 {
797 struct virtio_blk *vblk = vdev->priv;
798
799 /* Ensure we don't receive any more interrupts */
800 vdev->config->reset(vdev);
801
802 /* Make sure no work handler is accessing the device. */
803 flush_work(&vblk->config_work);
804
805 blk_mq_stop_hw_queues(vblk->disk->queue);
806
807 vdev->config->del_vqs(vdev);
808 return 0;
809 }
810
811 static int virtblk_restore(struct virtio_device *vdev)
812 {
813 struct virtio_blk *vblk = vdev->priv;
814 int ret;
815
816 ret = init_vq(vdev->priv);
817 if (ret)
818 return ret;
819
820 virtio_device_ready(vdev);
821
822 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
823 return 0;
824 }
825 #endif
826
827 static const struct virtio_device_id id_table[] = {
828 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
829 { 0 },
830 };
831
832 static unsigned int features_legacy[] = {
833 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
834 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
835 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
836 VIRTIO_BLK_F_MQ,
837 }
838 ;
839 static unsigned int features[] = {
840 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
841 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
842 VIRTIO_BLK_F_TOPOLOGY,
843 VIRTIO_BLK_F_MQ,
844 };
845
846 static struct virtio_driver virtio_blk = {
847 .feature_table = features,
848 .feature_table_size = ARRAY_SIZE(features),
849 .feature_table_legacy = features_legacy,
850 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
851 .driver.name = KBUILD_MODNAME,
852 .driver.owner = THIS_MODULE,
853 .id_table = id_table,
854 .probe = virtblk_probe,
855 .remove = virtblk_remove,
856 .config_changed = virtblk_config_changed,
857 #ifdef CONFIG_PM_SLEEP
858 .freeze = virtblk_freeze,
859 .restore = virtblk_restore,
860 #endif
861 };
862
863 static int __init init(void)
864 {
865 int error;
866
867 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
868 if (!virtblk_wq)
869 return -ENOMEM;
870
871 major = register_blkdev(0, "virtblk");
872 if (major < 0) {
873 error = major;
874 goto out_destroy_workqueue;
875 }
876
877 error = register_virtio_driver(&virtio_blk);
878 if (error)
879 goto out_unregister_blkdev;
880 return 0;
881
882 out_unregister_blkdev:
883 unregister_blkdev(major, "virtblk");
884 out_destroy_workqueue:
885 destroy_workqueue(virtblk_wq);
886 return error;
887 }
888
889 static void __exit fini(void)
890 {
891 unregister_virtio_driver(&virtio_blk);
892 unregister_blkdev(major, "virtblk");
893 destroy_workqueue(virtblk_wq);
894 }
895 module_init(init);
896 module_exit(fini);
897
898 MODULE_DEVICE_TABLE(virtio, id_table);
899 MODULE_DESCRIPTION("Virtio block driver");
900 MODULE_LICENSE("GPL");