]>
Commit | Line | Data |
---|---|---|
1 | //#define DEBUG | |
2 | #include <linux/spinlock.h> | |
3 | #include <linux/slab.h> | |
4 | #include <linux/blkdev.h> | |
5 | #include <linux/hdreg.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/mutex.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/virtio.h> | |
10 | #include <linux/virtio_blk.h> | |
11 | #include <linux/scatterlist.h> | |
12 | #include <linux/string_helpers.h> | |
13 | #include <scsi/scsi_cmnd.h> | |
14 | #include <linux/idr.h> | |
15 | #include <linux/blk-mq.h> | |
16 | #include <linux/blk-mq-virtio.h> | |
17 | #include <linux/numa.h> | |
18 | ||
19 | #define PART_BITS 4 | |
20 | #define VQ_NAME_LEN 16 | |
21 | ||
22 | static int major; | |
23 | static DEFINE_IDA(vd_index_ida); | |
24 | ||
25 | static struct workqueue_struct *virtblk_wq; | |
26 | ||
27 | struct virtio_blk_vq { | |
28 | struct virtqueue *vq; | |
29 | spinlock_t lock; | |
30 | char name[VQ_NAME_LEN]; | |
31 | } ____cacheline_aligned_in_smp; | |
32 | ||
33 | struct virtio_blk { | |
34 | struct virtio_device *vdev; | |
35 | ||
36 | /* The disk structure for the kernel. */ | |
37 | struct gendisk *disk; | |
38 | ||
39 | /* Block layer tags. */ | |
40 | struct blk_mq_tag_set tag_set; | |
41 | ||
42 | /* Process context for config space updates */ | |
43 | struct work_struct config_work; | |
44 | ||
45 | /* What host tells us, plus 2 for header & tailer. */ | |
46 | unsigned int sg_elems; | |
47 | ||
48 | /* Ida index - used to track minor number allocations. */ | |
49 | int index; | |
50 | ||
51 | /* num of vqs */ | |
52 | int num_vqs; | |
53 | struct virtio_blk_vq *vqs; | |
54 | }; | |
55 | ||
56 | struct virtblk_req { | |
57 | #ifdef CONFIG_VIRTIO_BLK_SCSI | |
58 | struct scsi_request sreq; /* for SCSI passthrough, must be first */ | |
59 | u8 sense[SCSI_SENSE_BUFFERSIZE]; | |
60 | struct virtio_scsi_inhdr in_hdr; | |
61 | #endif | |
62 | struct virtio_blk_outhdr out_hdr; | |
63 | u8 status; | |
64 | struct scatterlist sg[]; | |
65 | }; | |
66 | ||
67 | static inline int virtblk_result(struct virtblk_req *vbr) | |
68 | { | |
69 | switch (vbr->status) { | |
70 | case VIRTIO_BLK_S_OK: | |
71 | return 0; | |
72 | case VIRTIO_BLK_S_UNSUPP: | |
73 | return -ENOTTY; | |
74 | default: | |
75 | return -EIO; | |
76 | } | |
77 | } | |
78 | ||
79 | /* | |
80 | * If this is a packet command we need a couple of additional headers. Behind | |
81 | * the normal outhdr we put a segment with the scsi command block, and before | |
82 | * the normal inhdr we put the sense data and the inhdr with additional status | |
83 | * information. | |
84 | */ | |
85 | #ifdef CONFIG_VIRTIO_BLK_SCSI | |
86 | static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr, | |
87 | struct scatterlist *data_sg, bool have_data) | |
88 | { | |
89 | struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; | |
90 | unsigned int num_out = 0, num_in = 0; | |
91 | ||
92 | sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); | |
93 | sgs[num_out++] = &hdr; | |
94 | sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len); | |
95 | sgs[num_out++] = &cmd; | |
96 | ||
97 | if (have_data) { | |
98 | if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) | |
99 | sgs[num_out++] = data_sg; | |
100 | else | |
101 | sgs[num_out + num_in++] = data_sg; | |
102 | } | |
103 | ||
104 | sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); | |
105 | sgs[num_out + num_in++] = &sense; | |
106 | sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); | |
107 | sgs[num_out + num_in++] = &inhdr; | |
108 | sg_init_one(&status, &vbr->status, sizeof(vbr->status)); | |
109 | sgs[num_out + num_in++] = &status; | |
110 | ||
111 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | |
112 | } | |
113 | ||
114 | static inline void virtblk_scsi_request_done(struct request *req) | |
115 | { | |
116 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); | |
117 | struct virtio_blk *vblk = req->q->queuedata; | |
118 | struct scsi_request *sreq = &vbr->sreq; | |
119 | ||
120 | sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); | |
121 | sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); | |
122 | sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); | |
123 | } | |
124 | ||
125 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, | |
126 | unsigned int cmd, unsigned long data) | |
127 | { | |
128 | struct gendisk *disk = bdev->bd_disk; | |
129 | struct virtio_blk *vblk = disk->private_data; | |
130 | ||
131 | /* | |
132 | * Only allow the generic SCSI ioctls if the host can support it. | |
133 | */ | |
134 | if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) | |
135 | return -ENOTTY; | |
136 | ||
137 | return scsi_cmd_blk_ioctl(bdev, mode, cmd, | |
138 | (void __user *)data); | |
139 | } | |
140 | #else | |
141 | static inline int virtblk_add_req_scsi(struct virtqueue *vq, | |
142 | struct virtblk_req *vbr, struct scatterlist *data_sg, | |
143 | bool have_data) | |
144 | { | |
145 | return -EIO; | |
146 | } | |
147 | static inline void virtblk_scsi_request_done(struct request *req) | |
148 | { | |
149 | } | |
150 | #define virtblk_ioctl NULL | |
151 | #endif /* CONFIG_VIRTIO_BLK_SCSI */ | |
152 | ||
153 | static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, | |
154 | struct scatterlist *data_sg, bool have_data) | |
155 | { | |
156 | struct scatterlist hdr, status, *sgs[3]; | |
157 | unsigned int num_out = 0, num_in = 0; | |
158 | ||
159 | sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); | |
160 | sgs[num_out++] = &hdr; | |
161 | ||
162 | if (have_data) { | |
163 | if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) | |
164 | sgs[num_out++] = data_sg; | |
165 | else | |
166 | sgs[num_out + num_in++] = data_sg; | |
167 | } | |
168 | ||
169 | sg_init_one(&status, &vbr->status, sizeof(vbr->status)); | |
170 | sgs[num_out + num_in++] = &status; | |
171 | ||
172 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | |
173 | } | |
174 | ||
175 | static inline void virtblk_request_done(struct request *req) | |
176 | { | |
177 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); | |
178 | ||
179 | switch (req_op(req)) { | |
180 | case REQ_OP_SCSI_IN: | |
181 | case REQ_OP_SCSI_OUT: | |
182 | virtblk_scsi_request_done(req); | |
183 | break; | |
184 | } | |
185 | ||
186 | blk_mq_end_request(req, virtblk_result(vbr)); | |
187 | } | |
188 | ||
189 | static void virtblk_done(struct virtqueue *vq) | |
190 | { | |
191 | struct virtio_blk *vblk = vq->vdev->priv; | |
192 | bool req_done = false; | |
193 | int qid = vq->index; | |
194 | struct virtblk_req *vbr; | |
195 | unsigned long flags; | |
196 | unsigned int len; | |
197 | ||
198 | spin_lock_irqsave(&vblk->vqs[qid].lock, flags); | |
199 | do { | |
200 | virtqueue_disable_cb(vq); | |
201 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { | |
202 | struct request *req = blk_mq_rq_from_pdu(vbr); | |
203 | ||
204 | blk_mq_complete_request(req); | |
205 | req_done = true; | |
206 | } | |
207 | if (unlikely(virtqueue_is_broken(vq))) | |
208 | break; | |
209 | } while (!virtqueue_enable_cb(vq)); | |
210 | ||
211 | /* In case queue is stopped waiting for more buffers. */ | |
212 | if (req_done) | |
213 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); | |
214 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); | |
215 | } | |
216 | ||
217 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, | |
218 | const struct blk_mq_queue_data *bd) | |
219 | { | |
220 | struct virtio_blk *vblk = hctx->queue->queuedata; | |
221 | struct request *req = bd->rq; | |
222 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); | |
223 | unsigned long flags; | |
224 | unsigned int num; | |
225 | int qid = hctx->queue_num; | |
226 | int err; | |
227 | bool notify = false; | |
228 | u32 type; | |
229 | ||
230 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); | |
231 | ||
232 | switch (req_op(req)) { | |
233 | case REQ_OP_READ: | |
234 | case REQ_OP_WRITE: | |
235 | type = 0; | |
236 | break; | |
237 | case REQ_OP_FLUSH: | |
238 | type = VIRTIO_BLK_T_FLUSH; | |
239 | break; | |
240 | case REQ_OP_SCSI_IN: | |
241 | case REQ_OP_SCSI_OUT: | |
242 | type = VIRTIO_BLK_T_SCSI_CMD; | |
243 | break; | |
244 | case REQ_OP_DRV_IN: | |
245 | type = VIRTIO_BLK_T_GET_ID; | |
246 | break; | |
247 | default: | |
248 | WARN_ON_ONCE(1); | |
249 | return BLK_MQ_RQ_QUEUE_ERROR; | |
250 | } | |
251 | ||
252 | vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); | |
253 | vbr->out_hdr.sector = type ? | |
254 | 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); | |
255 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); | |
256 | ||
257 | blk_mq_start_request(req); | |
258 | ||
259 | num = blk_rq_map_sg(hctx->queue, req, vbr->sg); | |
260 | if (num) { | |
261 | if (rq_data_dir(req) == WRITE) | |
262 | vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); | |
263 | else | |
264 | vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); | |
265 | } | |
266 | ||
267 | spin_lock_irqsave(&vblk->vqs[qid].lock, flags); | |
268 | if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT) | |
269 | err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); | |
270 | else | |
271 | err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); | |
272 | if (err) { | |
273 | virtqueue_kick(vblk->vqs[qid].vq); | |
274 | blk_mq_stop_hw_queue(hctx); | |
275 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); | |
276 | /* Out of mem doesn't actually happen, since we fall back | |
277 | * to direct descriptors */ | |
278 | if (err == -ENOMEM || err == -ENOSPC) | |
279 | return BLK_MQ_RQ_QUEUE_BUSY; | |
280 | return BLK_MQ_RQ_QUEUE_ERROR; | |
281 | } | |
282 | ||
283 | if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) | |
284 | notify = true; | |
285 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); | |
286 | ||
287 | if (notify) | |
288 | virtqueue_notify(vblk->vqs[qid].vq); | |
289 | return BLK_MQ_RQ_QUEUE_OK; | |
290 | } | |
291 | ||
292 | /* return id (s/n) string for *disk to *id_str | |
293 | */ | |
294 | static int virtblk_get_id(struct gendisk *disk, char *id_str) | |
295 | { | |
296 | struct virtio_blk *vblk = disk->private_data; | |
297 | struct request_queue *q = vblk->disk->queue; | |
298 | struct request *req; | |
299 | int err; | |
300 | ||
301 | req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL); | |
302 | if (IS_ERR(req)) | |
303 | return PTR_ERR(req); | |
304 | ||
305 | err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); | |
306 | if (err) | |
307 | goto out; | |
308 | ||
309 | blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); | |
310 | err = virtblk_result(blk_mq_rq_to_pdu(req)); | |
311 | out: | |
312 | blk_put_request(req); | |
313 | return err; | |
314 | } | |
315 | ||
316 | /* We provide getgeo only to please some old bootloader/partitioning tools */ | |
317 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | |
318 | { | |
319 | struct virtio_blk *vblk = bd->bd_disk->private_data; | |
320 | ||
321 | /* see if the host passed in geometry config */ | |
322 | if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { | |
323 | virtio_cread(vblk->vdev, struct virtio_blk_config, | |
324 | geometry.cylinders, &geo->cylinders); | |
325 | virtio_cread(vblk->vdev, struct virtio_blk_config, | |
326 | geometry.heads, &geo->heads); | |
327 | virtio_cread(vblk->vdev, struct virtio_blk_config, | |
328 | geometry.sectors, &geo->sectors); | |
329 | } else { | |
330 | /* some standard values, similar to sd */ | |
331 | geo->heads = 1 << 6; | |
332 | geo->sectors = 1 << 5; | |
333 | geo->cylinders = get_capacity(bd->bd_disk) >> 11; | |
334 | } | |
335 | return 0; | |
336 | } | |
337 | ||
338 | static const struct block_device_operations virtblk_fops = { | |
339 | .ioctl = virtblk_ioctl, | |
340 | .owner = THIS_MODULE, | |
341 | .getgeo = virtblk_getgeo, | |
342 | }; | |
343 | ||
344 | static int index_to_minor(int index) | |
345 | { | |
346 | return index << PART_BITS; | |
347 | } | |
348 | ||
349 | static int minor_to_index(int minor) | |
350 | { | |
351 | return minor >> PART_BITS; | |
352 | } | |
353 | ||
354 | static ssize_t virtblk_serial_show(struct device *dev, | |
355 | struct device_attribute *attr, char *buf) | |
356 | { | |
357 | struct gendisk *disk = dev_to_disk(dev); | |
358 | int err; | |
359 | ||
360 | /* sysfs gives us a PAGE_SIZE buffer */ | |
361 | BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); | |
362 | ||
363 | buf[VIRTIO_BLK_ID_BYTES] = '\0'; | |
364 | err = virtblk_get_id(disk, buf); | |
365 | if (!err) | |
366 | return strlen(buf); | |
367 | ||
368 | if (err == -EIO) /* Unsupported? Make it empty. */ | |
369 | return 0; | |
370 | ||
371 | return err; | |
372 | } | |
373 | ||
374 | static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); | |
375 | ||
376 | static void virtblk_config_changed_work(struct work_struct *work) | |
377 | { | |
378 | struct virtio_blk *vblk = | |
379 | container_of(work, struct virtio_blk, config_work); | |
380 | struct virtio_device *vdev = vblk->vdev; | |
381 | struct request_queue *q = vblk->disk->queue; | |
382 | char cap_str_2[10], cap_str_10[10]; | |
383 | char *envp[] = { "RESIZE=1", NULL }; | |
384 | u64 capacity; | |
385 | ||
386 | /* Host must always specify the capacity. */ | |
387 | virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); | |
388 | ||
389 | /* If capacity is too big, truncate with warning. */ | |
390 | if ((sector_t)capacity != capacity) { | |
391 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
392 | (unsigned long long)capacity); | |
393 | capacity = (sector_t)-1; | |
394 | } | |
395 | ||
396 | string_get_size(capacity, queue_logical_block_size(q), | |
397 | STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); | |
398 | string_get_size(capacity, queue_logical_block_size(q), | |
399 | STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); | |
400 | ||
401 | dev_notice(&vdev->dev, | |
402 | "new size: %llu %d-byte logical blocks (%s/%s)\n", | |
403 | (unsigned long long)capacity, | |
404 | queue_logical_block_size(q), | |
405 | cap_str_10, cap_str_2); | |
406 | ||
407 | set_capacity(vblk->disk, capacity); | |
408 | revalidate_disk(vblk->disk); | |
409 | kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); | |
410 | } | |
411 | ||
412 | static void virtblk_config_changed(struct virtio_device *vdev) | |
413 | { | |
414 | struct virtio_blk *vblk = vdev->priv; | |
415 | ||
416 | queue_work(virtblk_wq, &vblk->config_work); | |
417 | } | |
418 | ||
419 | static int init_vq(struct virtio_blk *vblk) | |
420 | { | |
421 | int err; | |
422 | int i; | |
423 | vq_callback_t **callbacks; | |
424 | const char **names; | |
425 | struct virtqueue **vqs; | |
426 | unsigned short num_vqs; | |
427 | struct virtio_device *vdev = vblk->vdev; | |
428 | struct irq_affinity desc = { 0, }; | |
429 | ||
430 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, | |
431 | struct virtio_blk_config, num_queues, | |
432 | &num_vqs); | |
433 | if (err) | |
434 | num_vqs = 1; | |
435 | ||
436 | vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); | |
437 | if (!vblk->vqs) | |
438 | return -ENOMEM; | |
439 | ||
440 | names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL); | |
441 | callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL); | |
442 | vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); | |
443 | if (!names || !callbacks || !vqs) { | |
444 | err = -ENOMEM; | |
445 | goto out; | |
446 | } | |
447 | ||
448 | for (i = 0; i < num_vqs; i++) { | |
449 | callbacks[i] = virtblk_done; | |
450 | snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); | |
451 | names[i] = vblk->vqs[i].name; | |
452 | } | |
453 | ||
454 | /* Discover virtqueues and write information to configuration. */ | |
455 | err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); | |
456 | if (err) | |
457 | goto out; | |
458 | ||
459 | for (i = 0; i < num_vqs; i++) { | |
460 | spin_lock_init(&vblk->vqs[i].lock); | |
461 | vblk->vqs[i].vq = vqs[i]; | |
462 | } | |
463 | vblk->num_vqs = num_vqs; | |
464 | ||
465 | out: | |
466 | kfree(vqs); | |
467 | kfree(callbacks); | |
468 | kfree(names); | |
469 | if (err) | |
470 | kfree(vblk->vqs); | |
471 | return err; | |
472 | } | |
473 | ||
474 | /* | |
475 | * Legacy naming scheme used for virtio devices. We are stuck with it for | |
476 | * virtio blk but don't ever use it for any new driver. | |
477 | */ | |
478 | static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) | |
479 | { | |
480 | const int base = 'z' - 'a' + 1; | |
481 | char *begin = buf + strlen(prefix); | |
482 | char *end = buf + buflen; | |
483 | char *p; | |
484 | int unit; | |
485 | ||
486 | p = end - 1; | |
487 | *p = '\0'; | |
488 | unit = base; | |
489 | do { | |
490 | if (p == begin) | |
491 | return -EINVAL; | |
492 | *--p = 'a' + (index % unit); | |
493 | index = (index / unit) - 1; | |
494 | } while (index >= 0); | |
495 | ||
496 | memmove(begin, p, end - p); | |
497 | memcpy(buf, prefix, strlen(prefix)); | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | static int virtblk_get_cache_mode(struct virtio_device *vdev) | |
503 | { | |
504 | u8 writeback; | |
505 | int err; | |
506 | ||
507 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, | |
508 | struct virtio_blk_config, wce, | |
509 | &writeback); | |
510 | ||
511 | /* | |
512 | * If WCE is not configurable and flush is not available, | |
513 | * assume no writeback cache is in use. | |
514 | */ | |
515 | if (err) | |
516 | writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); | |
517 | ||
518 | return writeback; | |
519 | } | |
520 | ||
521 | static void virtblk_update_cache_mode(struct virtio_device *vdev) | |
522 | { | |
523 | u8 writeback = virtblk_get_cache_mode(vdev); | |
524 | struct virtio_blk *vblk = vdev->priv; | |
525 | ||
526 | blk_queue_write_cache(vblk->disk->queue, writeback, false); | |
527 | revalidate_disk(vblk->disk); | |
528 | } | |
529 | ||
530 | static const char *const virtblk_cache_types[] = { | |
531 | "write through", "write back" | |
532 | }; | |
533 | ||
534 | static ssize_t | |
535 | virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, | |
536 | const char *buf, size_t count) | |
537 | { | |
538 | struct gendisk *disk = dev_to_disk(dev); | |
539 | struct virtio_blk *vblk = disk->private_data; | |
540 | struct virtio_device *vdev = vblk->vdev; | |
541 | int i; | |
542 | ||
543 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); | |
544 | for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) | |
545 | if (sysfs_streq(buf, virtblk_cache_types[i])) | |
546 | break; | |
547 | ||
548 | if (i < 0) | |
549 | return -EINVAL; | |
550 | ||
551 | virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); | |
552 | virtblk_update_cache_mode(vdev); | |
553 | return count; | |
554 | } | |
555 | ||
556 | static ssize_t | |
557 | virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, | |
558 | char *buf) | |
559 | { | |
560 | struct gendisk *disk = dev_to_disk(dev); | |
561 | struct virtio_blk *vblk = disk->private_data; | |
562 | u8 writeback = virtblk_get_cache_mode(vblk->vdev); | |
563 | ||
564 | BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); | |
565 | return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); | |
566 | } | |
567 | ||
568 | static const struct device_attribute dev_attr_cache_type_ro = | |
569 | __ATTR(cache_type, S_IRUGO, | |
570 | virtblk_cache_type_show, NULL); | |
571 | static const struct device_attribute dev_attr_cache_type_rw = | |
572 | __ATTR(cache_type, S_IRUGO|S_IWUSR, | |
573 | virtblk_cache_type_show, virtblk_cache_type_store); | |
574 | ||
575 | static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, | |
576 | unsigned int hctx_idx, unsigned int numa_node) | |
577 | { | |
578 | struct virtio_blk *vblk = set->driver_data; | |
579 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); | |
580 | ||
581 | #ifdef CONFIG_VIRTIO_BLK_SCSI | |
582 | vbr->sreq.sense = vbr->sense; | |
583 | #endif | |
584 | sg_init_table(vbr->sg, vblk->sg_elems); | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static int virtblk_map_queues(struct blk_mq_tag_set *set) | |
589 | { | |
590 | struct virtio_blk *vblk = set->driver_data; | |
591 | ||
592 | return blk_mq_virtio_map_queues(set, vblk->vdev, 0); | |
593 | } | |
594 | ||
595 | static const struct blk_mq_ops virtio_mq_ops = { | |
596 | .queue_rq = virtio_queue_rq, | |
597 | .complete = virtblk_request_done, | |
598 | .init_request = virtblk_init_request, | |
599 | .map_queues = virtblk_map_queues, | |
600 | }; | |
601 | ||
602 | static unsigned int virtblk_queue_depth; | |
603 | module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); | |
604 | ||
605 | static int virtblk_probe(struct virtio_device *vdev) | |
606 | { | |
607 | struct virtio_blk *vblk; | |
608 | struct request_queue *q; | |
609 | int err, index; | |
610 | ||
611 | u64 cap; | |
612 | u32 v, blk_size, sg_elems, opt_io_size; | |
613 | u16 min_io_size; | |
614 | u8 physical_block_exp, alignment_offset; | |
615 | ||
616 | if (!vdev->config->get) { | |
617 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
618 | __func__); | |
619 | return -EINVAL; | |
620 | } | |
621 | ||
622 | err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), | |
623 | GFP_KERNEL); | |
624 | if (err < 0) | |
625 | goto out; | |
626 | index = err; | |
627 | ||
628 | /* We need to know how many segments before we allocate. */ | |
629 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, | |
630 | struct virtio_blk_config, seg_max, | |
631 | &sg_elems); | |
632 | ||
633 | /* We need at least one SG element, whatever they say. */ | |
634 | if (err || !sg_elems) | |
635 | sg_elems = 1; | |
636 | ||
637 | /* We need an extra sg elements at head and tail. */ | |
638 | sg_elems += 2; | |
639 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); | |
640 | if (!vblk) { | |
641 | err = -ENOMEM; | |
642 | goto out_free_index; | |
643 | } | |
644 | ||
645 | vblk->vdev = vdev; | |
646 | vblk->sg_elems = sg_elems; | |
647 | ||
648 | INIT_WORK(&vblk->config_work, virtblk_config_changed_work); | |
649 | ||
650 | err = init_vq(vblk); | |
651 | if (err) | |
652 | goto out_free_vblk; | |
653 | ||
654 | /* FIXME: How many partitions? How long is a piece of string? */ | |
655 | vblk->disk = alloc_disk(1 << PART_BITS); | |
656 | if (!vblk->disk) { | |
657 | err = -ENOMEM; | |
658 | goto out_free_vq; | |
659 | } | |
660 | ||
661 | /* Default queue sizing is to fill the ring. */ | |
662 | if (!virtblk_queue_depth) { | |
663 | virtblk_queue_depth = vblk->vqs[0].vq->num_free; | |
664 | /* ... but without indirect descs, we use 2 descs per req */ | |
665 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) | |
666 | virtblk_queue_depth /= 2; | |
667 | } | |
668 | ||
669 | memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); | |
670 | vblk->tag_set.ops = &virtio_mq_ops; | |
671 | vblk->tag_set.queue_depth = virtblk_queue_depth; | |
672 | vblk->tag_set.numa_node = NUMA_NO_NODE; | |
673 | vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
674 | vblk->tag_set.cmd_size = | |
675 | sizeof(struct virtblk_req) + | |
676 | sizeof(struct scatterlist) * sg_elems; | |
677 | vblk->tag_set.driver_data = vblk; | |
678 | vblk->tag_set.nr_hw_queues = vblk->num_vqs; | |
679 | ||
680 | err = blk_mq_alloc_tag_set(&vblk->tag_set); | |
681 | if (err) | |
682 | goto out_put_disk; | |
683 | ||
684 | q = blk_mq_init_queue(&vblk->tag_set); | |
685 | if (IS_ERR(q)) { | |
686 | err = -ENOMEM; | |
687 | goto out_free_tags; | |
688 | } | |
689 | vblk->disk->queue = q; | |
690 | ||
691 | q->queuedata = vblk; | |
692 | ||
693 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); | |
694 | ||
695 | vblk->disk->major = major; | |
696 | vblk->disk->first_minor = index_to_minor(index); | |
697 | vblk->disk->private_data = vblk; | |
698 | vblk->disk->fops = &virtblk_fops; | |
699 | vblk->disk->flags |= GENHD_FL_EXT_DEVT; | |
700 | vblk->index = index; | |
701 | ||
702 | /* configure queue flush support */ | |
703 | virtblk_update_cache_mode(vdev); | |
704 | ||
705 | /* If disk is read-only in the host, the guest should obey */ | |
706 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | |
707 | set_disk_ro(vblk->disk, 1); | |
708 | ||
709 | /* Host must always specify the capacity. */ | |
710 | virtio_cread(vdev, struct virtio_blk_config, capacity, &cap); | |
711 | ||
712 | /* If capacity is too big, truncate with warning. */ | |
713 | if ((sector_t)cap != cap) { | |
714 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
715 | (unsigned long long)cap); | |
716 | cap = (sector_t)-1; | |
717 | } | |
718 | set_capacity(vblk->disk, cap); | |
719 | ||
720 | /* We can handle whatever the host told us to handle. */ | |
721 | blk_queue_max_segments(q, vblk->sg_elems-2); | |
722 | ||
723 | /* No need to bounce any requests */ | |
724 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | |
725 | ||
726 | /* No real sector limit. */ | |
727 | blk_queue_max_hw_sectors(q, -1U); | |
728 | ||
729 | /* Host can optionally specify maximum segment size and number of | |
730 | * segments. */ | |
731 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, | |
732 | struct virtio_blk_config, size_max, &v); | |
733 | if (!err) | |
734 | blk_queue_max_segment_size(q, v); | |
735 | else | |
736 | blk_queue_max_segment_size(q, -1U); | |
737 | ||
738 | /* Host can optionally specify the block size of the device */ | |
739 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, | |
740 | struct virtio_blk_config, blk_size, | |
741 | &blk_size); | |
742 | if (!err) | |
743 | blk_queue_logical_block_size(q, blk_size); | |
744 | else | |
745 | blk_size = queue_logical_block_size(q); | |
746 | ||
747 | /* Use topology information if available */ | |
748 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
749 | struct virtio_blk_config, physical_block_exp, | |
750 | &physical_block_exp); | |
751 | if (!err && physical_block_exp) | |
752 | blk_queue_physical_block_size(q, | |
753 | blk_size * (1 << physical_block_exp)); | |
754 | ||
755 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
756 | struct virtio_blk_config, alignment_offset, | |
757 | &alignment_offset); | |
758 | if (!err && alignment_offset) | |
759 | blk_queue_alignment_offset(q, blk_size * alignment_offset); | |
760 | ||
761 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
762 | struct virtio_blk_config, min_io_size, | |
763 | &min_io_size); | |
764 | if (!err && min_io_size) | |
765 | blk_queue_io_min(q, blk_size * min_io_size); | |
766 | ||
767 | err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
768 | struct virtio_blk_config, opt_io_size, | |
769 | &opt_io_size); | |
770 | if (!err && opt_io_size) | |
771 | blk_queue_io_opt(q, blk_size * opt_io_size); | |
772 | ||
773 | virtio_device_ready(vdev); | |
774 | ||
775 | device_add_disk(&vdev->dev, vblk->disk); | |
776 | err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); | |
777 | if (err) | |
778 | goto out_del_disk; | |
779 | ||
780 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) | |
781 | err = device_create_file(disk_to_dev(vblk->disk), | |
782 | &dev_attr_cache_type_rw); | |
783 | else | |
784 | err = device_create_file(disk_to_dev(vblk->disk), | |
785 | &dev_attr_cache_type_ro); | |
786 | if (err) | |
787 | goto out_del_disk; | |
788 | return 0; | |
789 | ||
790 | out_del_disk: | |
791 | del_gendisk(vblk->disk); | |
792 | blk_cleanup_queue(vblk->disk->queue); | |
793 | out_free_tags: | |
794 | blk_mq_free_tag_set(&vblk->tag_set); | |
795 | out_put_disk: | |
796 | put_disk(vblk->disk); | |
797 | out_free_vq: | |
798 | vdev->config->del_vqs(vdev); | |
799 | out_free_vblk: | |
800 | kfree(vblk); | |
801 | out_free_index: | |
802 | ida_simple_remove(&vd_index_ida, index); | |
803 | out: | |
804 | return err; | |
805 | } | |
806 | ||
807 | static void virtblk_remove(struct virtio_device *vdev) | |
808 | { | |
809 | struct virtio_blk *vblk = vdev->priv; | |
810 | int index = vblk->index; | |
811 | int refc; | |
812 | ||
813 | /* Make sure no work handler is accessing the device. */ | |
814 | flush_work(&vblk->config_work); | |
815 | ||
816 | del_gendisk(vblk->disk); | |
817 | blk_cleanup_queue(vblk->disk->queue); | |
818 | ||
819 | blk_mq_free_tag_set(&vblk->tag_set); | |
820 | ||
821 | /* Stop all the virtqueues. */ | |
822 | vdev->config->reset(vdev); | |
823 | ||
824 | refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); | |
825 | put_disk(vblk->disk); | |
826 | vdev->config->del_vqs(vdev); | |
827 | kfree(vblk->vqs); | |
828 | kfree(vblk); | |
829 | ||
830 | /* Only free device id if we don't have any users */ | |
831 | if (refc == 1) | |
832 | ida_simple_remove(&vd_index_ida, index); | |
833 | } | |
834 | ||
835 | #ifdef CONFIG_PM_SLEEP | |
836 | static int virtblk_freeze(struct virtio_device *vdev) | |
837 | { | |
838 | struct virtio_blk *vblk = vdev->priv; | |
839 | ||
840 | /* Ensure we don't receive any more interrupts */ | |
841 | vdev->config->reset(vdev); | |
842 | ||
843 | /* Make sure no work handler is accessing the device. */ | |
844 | flush_work(&vblk->config_work); | |
845 | ||
846 | blk_mq_stop_hw_queues(vblk->disk->queue); | |
847 | ||
848 | vdev->config->del_vqs(vdev); | |
849 | return 0; | |
850 | } | |
851 | ||
852 | static int virtblk_restore(struct virtio_device *vdev) | |
853 | { | |
854 | struct virtio_blk *vblk = vdev->priv; | |
855 | int ret; | |
856 | ||
857 | ret = init_vq(vdev->priv); | |
858 | if (ret) | |
859 | return ret; | |
860 | ||
861 | virtio_device_ready(vdev); | |
862 | ||
863 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); | |
864 | return 0; | |
865 | } | |
866 | #endif | |
867 | ||
868 | static const struct virtio_device_id id_table[] = { | |
869 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, | |
870 | { 0 }, | |
871 | }; | |
872 | ||
873 | static unsigned int features_legacy[] = { | |
874 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, | |
875 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, | |
876 | #ifdef CONFIG_VIRTIO_BLK_SCSI | |
877 | VIRTIO_BLK_F_SCSI, | |
878 | #endif | |
879 | VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, | |
880 | VIRTIO_BLK_F_MQ, | |
881 | } | |
882 | ; | |
883 | static unsigned int features[] = { | |
884 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, | |
885 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, | |
886 | VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, | |
887 | VIRTIO_BLK_F_MQ, | |
888 | }; | |
889 | ||
890 | static struct virtio_driver virtio_blk = { | |
891 | .feature_table = features, | |
892 | .feature_table_size = ARRAY_SIZE(features), | |
893 | .feature_table_legacy = features_legacy, | |
894 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | |
895 | .driver.name = KBUILD_MODNAME, | |
896 | .driver.owner = THIS_MODULE, | |
897 | .id_table = id_table, | |
898 | .probe = virtblk_probe, | |
899 | .remove = virtblk_remove, | |
900 | .config_changed = virtblk_config_changed, | |
901 | #ifdef CONFIG_PM_SLEEP | |
902 | .freeze = virtblk_freeze, | |
903 | .restore = virtblk_restore, | |
904 | #endif | |
905 | }; | |
906 | ||
907 | static int __init init(void) | |
908 | { | |
909 | int error; | |
910 | ||
911 | virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); | |
912 | if (!virtblk_wq) | |
913 | return -ENOMEM; | |
914 | ||
915 | major = register_blkdev(0, "virtblk"); | |
916 | if (major < 0) { | |
917 | error = major; | |
918 | goto out_destroy_workqueue; | |
919 | } | |
920 | ||
921 | error = register_virtio_driver(&virtio_blk); | |
922 | if (error) | |
923 | goto out_unregister_blkdev; | |
924 | return 0; | |
925 | ||
926 | out_unregister_blkdev: | |
927 | unregister_blkdev(major, "virtblk"); | |
928 | out_destroy_workqueue: | |
929 | destroy_workqueue(virtblk_wq); | |
930 | return error; | |
931 | } | |
932 | ||
933 | static void __exit fini(void) | |
934 | { | |
935 | unregister_virtio_driver(&virtio_blk); | |
936 | unregister_blkdev(major, "virtblk"); | |
937 | destroy_workqueue(virtblk_wq); | |
938 | } | |
939 | module_init(init); | |
940 | module_exit(fini); | |
941 | ||
942 | MODULE_DEVICE_TABLE(virtio, id_table); | |
943 | MODULE_DESCRIPTION("Virtio block driver"); | |
944 | MODULE_LICENSE("GPL"); |