]>
Commit | Line | Data |
---|---|---|
e467cde2 RR |
1 | //#define DEBUG |
2 | #include <linux/spinlock.h> | |
5a0e3ad6 | 3 | #include <linux/slab.h> |
e467cde2 RR |
4 | #include <linux/blkdev.h> |
5 | #include <linux/hdreg.h> | |
0c8d44f2 | 6 | #include <linux/module.h> |
4678d6f9 | 7 | #include <linux/mutex.h> |
e467cde2 RR |
8 | #include <linux/virtio.h> |
9 | #include <linux/virtio_blk.h> | |
3d1266c7 | 10 | #include <linux/scatterlist.h> |
7a7c924c | 11 | #include <linux/string_helpers.h> |
6917f83f | 12 | #include <scsi/scsi_cmnd.h> |
5087a50e | 13 | #include <linux/idr.h> |
3d1266c7 | 14 | |
4f3bf19c | 15 | #define PART_BITS 4 |
e467cde2 | 16 | |
a98755c5 AH |
17 | static bool use_bio; |
18 | module_param(use_bio, bool, S_IRUGO); | |
19 | ||
5087a50e MT |
20 | static int major; |
21 | static DEFINE_IDA(vd_index_ida); | |
22 | ||
7a7c924c | 23 | struct workqueue_struct *virtblk_wq; |
4f3bf19c | 24 | |
e467cde2 RR |
25 | struct virtio_blk |
26 | { | |
e467cde2 RR |
27 | struct virtio_device *vdev; |
28 | struct virtqueue *vq; | |
a98755c5 | 29 | wait_queue_head_t queue_wait; |
e467cde2 RR |
30 | |
31 | /* The disk structure for the kernel. */ | |
32 | struct gendisk *disk; | |
33 | ||
e467cde2 RR |
34 | mempool_t *pool; |
35 | ||
7a7c924c CH |
36 | /* Process context for config space updates */ |
37 | struct work_struct config_work; | |
38 | ||
4678d6f9 MT |
39 | /* Lock for config space updates */ |
40 | struct mutex config_lock; | |
41 | ||
42 | /* enable config space updates */ | |
43 | bool config_enable; | |
44 | ||
0864b79a RR |
45 | /* What host tells us, plus 2 for header & tailer. */ |
46 | unsigned int sg_elems; | |
47 | ||
5087a50e MT |
48 | /* Ida index - used to track minor number allocations. */ |
49 | int index; | |
50 | ||
e467cde2 | 51 | /* Scatterlist: can be too big for stack. */ |
0864b79a | 52 | struct scatterlist sg[/*sg_elems*/]; |
e467cde2 RR |
53 | }; |
54 | ||
55 | struct virtblk_req | |
56 | { | |
e467cde2 | 57 | struct request *req; |
a98755c5 | 58 | struct bio *bio; |
e467cde2 | 59 | struct virtio_blk_outhdr out_hdr; |
1cde26f9 | 60 | struct virtio_scsi_inhdr in_hdr; |
c85a1f91 AH |
61 | struct work_struct work; |
62 | struct virtio_blk *vblk; | |
63 | int flags; | |
cb38fa23 | 64 | u8 status; |
a98755c5 | 65 | struct scatterlist sg[]; |
e467cde2 RR |
66 | }; |
67 | ||
c85a1f91 AH |
68 | enum { |
69 | VBLK_IS_FLUSH = 1, | |
70 | VBLK_REQ_FLUSH = 2, | |
71 | VBLK_REQ_DATA = 4, | |
72 | VBLK_REQ_FUA = 8, | |
73 | }; | |
74 | ||
a98755c5 AH |
75 | static inline int virtblk_result(struct virtblk_req *vbr) |
76 | { | |
77 | switch (vbr->status) { | |
78 | case VIRTIO_BLK_S_OK: | |
79 | return 0; | |
80 | case VIRTIO_BLK_S_UNSUPP: | |
81 | return -ENOTTY; | |
82 | default: | |
83 | return -EIO; | |
84 | } | |
85 | } | |
86 | ||
c85a1f91 AH |
87 | static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, |
88 | gfp_t gfp_mask) | |
89 | { | |
90 | struct virtblk_req *vbr; | |
91 | ||
92 | vbr = mempool_alloc(vblk->pool, gfp_mask); | |
f22cf8eb DC |
93 | if (!vbr) |
94 | return NULL; | |
c85a1f91 AH |
95 | |
96 | vbr->vblk = vblk; | |
f22cf8eb DC |
97 | if (use_bio) |
98 | sg_init_table(vbr->sg, vblk->sg_elems); | |
c85a1f91 AH |
99 | |
100 | return vbr; | |
101 | } | |
102 | ||
8f39db9d | 103 | static int __virtblk_add_req(struct virtqueue *vq, |
20af3cfd PB |
104 | struct virtblk_req *vbr, |
105 | struct scatterlist *data_sg, | |
0a11cc36 | 106 | bool have_data) |
c85a1f91 | 107 | { |
20af3cfd | 108 | struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; |
8f39db9d | 109 | unsigned int num_out = 0, num_in = 0; |
20af3cfd | 110 | int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT; |
8f39db9d PB |
111 | |
112 | sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); | |
113 | sgs[num_out++] = &hdr; | |
114 | ||
20af3cfd PB |
115 | /* |
116 | * If this is a packet command we need a couple of additional headers. | |
117 | * Behind the normal outhdr we put a segment with the scsi command | |
118 | * block, and before the normal inhdr we put the sense data and the | |
119 | * inhdr with additional status information. | |
120 | */ | |
121 | if (type == VIRTIO_BLK_T_SCSI_CMD) { | |
122 | sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); | |
123 | sgs[num_out++] = &cmd; | |
124 | } | |
125 | ||
0a11cc36 | 126 | if (have_data) { |
8f39db9d | 127 | if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT) |
20af3cfd | 128 | sgs[num_out++] = data_sg; |
8f39db9d | 129 | else |
20af3cfd PB |
130 | sgs[num_out + num_in++] = data_sg; |
131 | } | |
132 | ||
133 | if (type == VIRTIO_BLK_T_SCSI_CMD) { | |
134 | sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); | |
135 | sgs[num_out + num_in++] = &sense; | |
136 | sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); | |
137 | sgs[num_out + num_in++] = &inhdr; | |
8f39db9d PB |
138 | } |
139 | ||
140 | sg_init_one(&status, &vbr->status, sizeof(vbr->status)); | |
141 | sgs[num_out + num_in++] = &status; | |
142 | ||
143 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | |
5ee21a52 PB |
144 | } |
145 | ||
0a11cc36 | 146 | static void virtblk_add_req(struct virtblk_req *vbr, bool have_data) |
5ee21a52 PB |
147 | { |
148 | struct virtio_blk *vblk = vbr->vblk; | |
c85a1f91 | 149 | DEFINE_WAIT(wait); |
5ee21a52 | 150 | int ret; |
c85a1f91 | 151 | |
5ee21a52 | 152 | spin_lock_irq(vblk->disk->queue->queue_lock); |
20af3cfd | 153 | while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg, |
0a11cc36 | 154 | have_data)) < 0)) { |
c85a1f91 AH |
155 | prepare_to_wait_exclusive(&vblk->queue_wait, &wait, |
156 | TASK_UNINTERRUPTIBLE); | |
157 | ||
5ee21a52 PB |
158 | spin_unlock_irq(vblk->disk->queue->queue_lock); |
159 | io_schedule(); | |
c85a1f91 | 160 | spin_lock_irq(vblk->disk->queue->queue_lock); |
c85a1f91 | 161 | |
5ee21a52 | 162 | finish_wait(&vblk->queue_wait, &wait); |
c85a1f91 AH |
163 | } |
164 | ||
c85a1f91 AH |
165 | virtqueue_kick(vblk->vq); |
166 | spin_unlock_irq(vblk->disk->queue->queue_lock); | |
167 | } | |
168 | ||
5ee21a52 | 169 | static void virtblk_bio_send_flush(struct virtblk_req *vbr) |
c85a1f91 | 170 | { |
c85a1f91 AH |
171 | vbr->flags |= VBLK_IS_FLUSH; |
172 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | |
173 | vbr->out_hdr.sector = 0; | |
174 | vbr->out_hdr.ioprio = 0; | |
c85a1f91 | 175 | |
0a11cc36 | 176 | virtblk_add_req(vbr, false); |
c85a1f91 AH |
177 | } |
178 | ||
5ee21a52 | 179 | static void virtblk_bio_send_data(struct virtblk_req *vbr) |
c85a1f91 AH |
180 | { |
181 | struct virtio_blk *vblk = vbr->vblk; | |
c85a1f91 | 182 | struct bio *bio = vbr->bio; |
0a11cc36 | 183 | bool have_data; |
c85a1f91 AH |
184 | |
185 | vbr->flags &= ~VBLK_IS_FLUSH; | |
186 | vbr->out_hdr.type = 0; | |
187 | vbr->out_hdr.sector = bio->bi_sector; | |
188 | vbr->out_hdr.ioprio = bio_prio(bio); | |
189 | ||
0a11cc36 RR |
190 | if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) { |
191 | have_data = true; | |
8f39db9d | 192 | if (bio->bi_rw & REQ_WRITE) |
c85a1f91 | 193 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; |
8f39db9d | 194 | else |
c85a1f91 | 195 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; |
0a11cc36 RR |
196 | } else |
197 | have_data = false; | |
c85a1f91 | 198 | |
0a11cc36 | 199 | virtblk_add_req(vbr, have_data); |
c85a1f91 AH |
200 | } |
201 | ||
202 | static void virtblk_bio_send_data_work(struct work_struct *work) | |
203 | { | |
204 | struct virtblk_req *vbr; | |
205 | ||
206 | vbr = container_of(work, struct virtblk_req, work); | |
207 | ||
208 | virtblk_bio_send_data(vbr); | |
209 | } | |
210 | ||
211 | static void virtblk_bio_send_flush_work(struct work_struct *work) | |
212 | { | |
213 | struct virtblk_req *vbr; | |
214 | ||
215 | vbr = container_of(work, struct virtblk_req, work); | |
216 | ||
217 | virtblk_bio_send_flush(vbr); | |
218 | } | |
219 | ||
220 | static inline void virtblk_request_done(struct virtblk_req *vbr) | |
a98755c5 | 221 | { |
c85a1f91 | 222 | struct virtio_blk *vblk = vbr->vblk; |
a98755c5 AH |
223 | struct request *req = vbr->req; |
224 | int error = virtblk_result(vbr); | |
225 | ||
226 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { | |
227 | req->resid_len = vbr->in_hdr.residual; | |
228 | req->sense_len = vbr->in_hdr.sense_len; | |
229 | req->errors = vbr->in_hdr.errors; | |
230 | } else if (req->cmd_type == REQ_TYPE_SPECIAL) { | |
231 | req->errors = (error != 0); | |
232 | } | |
233 | ||
234 | __blk_end_request_all(req, error); | |
235 | mempool_free(vbr, vblk->pool); | |
236 | } | |
237 | ||
c85a1f91 | 238 | static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) |
a98755c5 | 239 | { |
c85a1f91 AH |
240 | struct virtio_blk *vblk = vbr->vblk; |
241 | ||
242 | if (vbr->flags & VBLK_REQ_DATA) { | |
243 | /* Send out the actual write data */ | |
244 | INIT_WORK(&vbr->work, virtblk_bio_send_data_work); | |
245 | queue_work(virtblk_wq, &vbr->work); | |
246 | } else { | |
247 | bio_endio(vbr->bio, virtblk_result(vbr)); | |
248 | mempool_free(vbr, vblk->pool); | |
249 | } | |
250 | } | |
251 | ||
252 | static inline void virtblk_bio_data_done(struct virtblk_req *vbr) | |
253 | { | |
254 | struct virtio_blk *vblk = vbr->vblk; | |
255 | ||
256 | if (unlikely(vbr->flags & VBLK_REQ_FUA)) { | |
257 | /* Send out a flush before end the bio */ | |
258 | vbr->flags &= ~VBLK_REQ_DATA; | |
259 | INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); | |
260 | queue_work(virtblk_wq, &vbr->work); | |
261 | } else { | |
262 | bio_endio(vbr->bio, virtblk_result(vbr)); | |
263 | mempool_free(vbr, vblk->pool); | |
264 | } | |
265 | } | |
266 | ||
267 | static inline void virtblk_bio_done(struct virtblk_req *vbr) | |
268 | { | |
269 | if (unlikely(vbr->flags & VBLK_IS_FLUSH)) | |
270 | virtblk_bio_flush_done(vbr); | |
271 | else | |
272 | virtblk_bio_data_done(vbr); | |
a98755c5 AH |
273 | } |
274 | ||
275 | static void virtblk_done(struct virtqueue *vq) | |
e467cde2 RR |
276 | { |
277 | struct virtio_blk *vblk = vq->vdev->priv; | |
c85a1f91 | 278 | bool bio_done = false, req_done = false; |
e467cde2 | 279 | struct virtblk_req *vbr; |
e467cde2 | 280 | unsigned long flags; |
a98755c5 | 281 | unsigned int len; |
e467cde2 | 282 | |
2c95a329 | 283 | spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); |
bb811108 AH |
284 | do { |
285 | virtqueue_disable_cb(vq); | |
286 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { | |
287 | if (vbr->bio) { | |
288 | virtblk_bio_done(vbr); | |
289 | bio_done = true; | |
290 | } else { | |
291 | virtblk_request_done(vbr); | |
292 | req_done = true; | |
293 | } | |
33659ebb | 294 | } |
bb811108 | 295 | } while (!virtqueue_enable_cb(vq)); |
e467cde2 | 296 | /* In case queue is stopped waiting for more buffers. */ |
a98755c5 AH |
297 | if (req_done) |
298 | blk_start_queue(vblk->disk->queue); | |
2c95a329 | 299 | spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); |
a98755c5 AH |
300 | |
301 | if (bio_done) | |
302 | wake_up(&vblk->queue_wait); | |
303 | } | |
304 | ||
e467cde2 RR |
305 | static bool do_req(struct request_queue *q, struct virtio_blk *vblk, |
306 | struct request *req) | |
307 | { | |
20af3cfd | 308 | unsigned int num; |
e467cde2 RR |
309 | struct virtblk_req *vbr; |
310 | ||
a98755c5 | 311 | vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); |
e467cde2 RR |
312 | if (!vbr) |
313 | /* When another request finishes we'll try again. */ | |
314 | return false; | |
315 | ||
316 | vbr->req = req; | |
a98755c5 | 317 | vbr->bio = NULL; |
dd40e456 FT |
318 | if (req->cmd_flags & REQ_FLUSH) { |
319 | vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; | |
4cb2ea28 | 320 | vbr->out_hdr.sector = 0; |
321 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | |
dd40e456 FT |
322 | } else { |
323 | switch (req->cmd_type) { | |
324 | case REQ_TYPE_FS: | |
325 | vbr->out_hdr.type = 0; | |
326 | vbr->out_hdr.sector = blk_rq_pos(vbr->req); | |
327 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | |
328 | break; | |
329 | case REQ_TYPE_BLOCK_PC: | |
330 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | |
f1b0ef06 CH |
331 | vbr->out_hdr.sector = 0; |
332 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | |
333 | break; | |
dd40e456 FT |
334 | case REQ_TYPE_SPECIAL: |
335 | vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; | |
336 | vbr->out_hdr.sector = 0; | |
337 | vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); | |
338 | break; | |
339 | default: | |
340 | /* We don't put anything else in the queue. */ | |
341 | BUG(); | |
f1b0ef06 | 342 | } |
e467cde2 RR |
343 | } |
344 | ||
20af3cfd | 345 | num = blk_rq_map_sg(q, vbr->req, vblk->sg); |
1cde26f9 | 346 | if (num) { |
20af3cfd | 347 | if (rq_data_dir(vbr->req) == WRITE) |
1cde26f9 | 348 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; |
20af3cfd | 349 | else |
1cde26f9 | 350 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; |
e467cde2 RR |
351 | } |
352 | ||
20af3cfd | 353 | if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { |
e467cde2 RR |
354 | mempool_free(vbr, vblk->pool); |
355 | return false; | |
356 | } | |
357 | ||
e467cde2 RR |
358 | return true; |
359 | } | |
360 | ||
a98755c5 | 361 | static void virtblk_request(struct request_queue *q) |
e467cde2 | 362 | { |
6c3b46f7 | 363 | struct virtio_blk *vblk = q->queuedata; |
e467cde2 RR |
364 | struct request *req; |
365 | unsigned int issued = 0; | |
366 | ||
9934c8c0 | 367 | while ((req = blk_peek_request(q)) != NULL) { |
0864b79a | 368 | BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); |
e467cde2 RR |
369 | |
370 | /* If this request fails, stop queue and wait for something to | |
371 | finish to restart it. */ | |
372 | if (!do_req(q, vblk, req)) { | |
373 | blk_stop_queue(q); | |
374 | break; | |
375 | } | |
9934c8c0 | 376 | blk_start_request(req); |
e467cde2 RR |
377 | issued++; |
378 | } | |
379 | ||
380 | if (issued) | |
09ec6b69 | 381 | virtqueue_kick(vblk->vq); |
e467cde2 RR |
382 | } |
383 | ||
a98755c5 AH |
384 | static void virtblk_make_request(struct request_queue *q, struct bio *bio) |
385 | { | |
386 | struct virtio_blk *vblk = q->queuedata; | |
a98755c5 AH |
387 | struct virtblk_req *vbr; |
388 | ||
389 | BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); | |
a98755c5 AH |
390 | |
391 | vbr = virtblk_alloc_req(vblk, GFP_NOIO); | |
392 | if (!vbr) { | |
393 | bio_endio(bio, -ENOMEM); | |
394 | return; | |
395 | } | |
396 | ||
397 | vbr->bio = bio; | |
c85a1f91 AH |
398 | vbr->flags = 0; |
399 | if (bio->bi_rw & REQ_FLUSH) | |
400 | vbr->flags |= VBLK_REQ_FLUSH; | |
401 | if (bio->bi_rw & REQ_FUA) | |
402 | vbr->flags |= VBLK_REQ_FUA; | |
403 | if (bio->bi_size) | |
404 | vbr->flags |= VBLK_REQ_DATA; | |
405 | ||
406 | if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) | |
407 | virtblk_bio_send_flush(vbr); | |
408 | else | |
409 | virtblk_bio_send_data(vbr); | |
a98755c5 AH |
410 | } |
411 | ||
4cb2ea28 | 412 | /* return id (s/n) string for *disk to *id_str |
413 | */ | |
414 | static int virtblk_get_id(struct gendisk *disk, char *id_str) | |
415 | { | |
416 | struct virtio_blk *vblk = disk->private_data; | |
417 | struct request *req; | |
418 | struct bio *bio; | |
e4c4776d | 419 | int err; |
4cb2ea28 | 420 | |
421 | bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, | |
422 | GFP_KERNEL); | |
423 | if (IS_ERR(bio)) | |
424 | return PTR_ERR(bio); | |
425 | ||
426 | req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); | |
427 | if (IS_ERR(req)) { | |
428 | bio_put(bio); | |
429 | return PTR_ERR(req); | |
430 | } | |
431 | ||
432 | req->cmd_type = REQ_TYPE_SPECIAL; | |
e4c4776d MS |
433 | err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); |
434 | blk_put_request(req); | |
435 | ||
436 | return err; | |
4cb2ea28 | 437 | } |
438 | ||
fe5a50a1 CH |
439 | static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, |
440 | unsigned int cmd, unsigned long data) | |
e467cde2 | 441 | { |
1cde26f9 HR |
442 | struct gendisk *disk = bdev->bd_disk; |
443 | struct virtio_blk *vblk = disk->private_data; | |
444 | ||
445 | /* | |
446 | * Only allow the generic SCSI ioctls if the host can support it. | |
447 | */ | |
448 | if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) | |
d9ecdea7 | 449 | return -ENOTTY; |
1cde26f9 | 450 | |
577ebb37 PB |
451 | return scsi_cmd_blk_ioctl(bdev, mode, cmd, |
452 | (void __user *)data); | |
e467cde2 RR |
453 | } |
454 | ||
135da0b0 CB |
455 | /* We provide getgeo only to please some old bootloader/partitioning tools */ |
456 | static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) | |
457 | { | |
48e4043d RH |
458 | struct virtio_blk *vblk = bd->bd_disk->private_data; |
459 | struct virtio_blk_geometry vgeo; | |
460 | int err; | |
461 | ||
462 | /* see if the host passed in geometry config */ | |
463 | err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, | |
464 | offsetof(struct virtio_blk_config, geometry), | |
465 | &vgeo); | |
466 | ||
467 | if (!err) { | |
468 | geo->heads = vgeo.heads; | |
469 | geo->sectors = vgeo.sectors; | |
470 | geo->cylinders = vgeo.cylinders; | |
471 | } else { | |
472 | /* some standard values, similar to sd */ | |
473 | geo->heads = 1 << 6; | |
474 | geo->sectors = 1 << 5; | |
475 | geo->cylinders = get_capacity(bd->bd_disk) >> 11; | |
476 | } | |
135da0b0 CB |
477 | return 0; |
478 | } | |
479 | ||
83d5cde4 | 480 | static const struct block_device_operations virtblk_fops = { |
8a6cfeb6 | 481 | .ioctl = virtblk_ioctl, |
135da0b0 CB |
482 | .owner = THIS_MODULE, |
483 | .getgeo = virtblk_getgeo, | |
e467cde2 RR |
484 | }; |
485 | ||
d50ed907 CB |
486 | static int index_to_minor(int index) |
487 | { | |
488 | return index << PART_BITS; | |
489 | } | |
490 | ||
5087a50e MT |
491 | static int minor_to_index(int minor) |
492 | { | |
493 | return minor >> PART_BITS; | |
494 | } | |
495 | ||
a5eb9e4f RH |
496 | static ssize_t virtblk_serial_show(struct device *dev, |
497 | struct device_attribute *attr, char *buf) | |
498 | { | |
499 | struct gendisk *disk = dev_to_disk(dev); | |
500 | int err; | |
501 | ||
502 | /* sysfs gives us a PAGE_SIZE buffer */ | |
503 | BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); | |
504 | ||
505 | buf[VIRTIO_BLK_ID_BYTES] = '\0'; | |
506 | err = virtblk_get_id(disk, buf); | |
507 | if (!err) | |
508 | return strlen(buf); | |
509 | ||
510 | if (err == -EIO) /* Unsupported? Make it empty. */ | |
511 | return 0; | |
512 | ||
513 | return err; | |
514 | } | |
515 | DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); | |
516 | ||
7a7c924c CH |
517 | static void virtblk_config_changed_work(struct work_struct *work) |
518 | { | |
519 | struct virtio_blk *vblk = | |
520 | container_of(work, struct virtio_blk, config_work); | |
521 | struct virtio_device *vdev = vblk->vdev; | |
522 | struct request_queue *q = vblk->disk->queue; | |
523 | char cap_str_2[10], cap_str_10[10]; | |
9d9598b8 | 524 | char *envp[] = { "RESIZE=1", NULL }; |
7a7c924c CH |
525 | u64 capacity, size; |
526 | ||
4678d6f9 MT |
527 | mutex_lock(&vblk->config_lock); |
528 | if (!vblk->config_enable) | |
529 | goto done; | |
530 | ||
7a7c924c CH |
531 | /* Host must always specify the capacity. */ |
532 | vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), | |
533 | &capacity, sizeof(capacity)); | |
534 | ||
535 | /* If capacity is too big, truncate with warning. */ | |
536 | if ((sector_t)capacity != capacity) { | |
537 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
538 | (unsigned long long)capacity); | |
539 | capacity = (sector_t)-1; | |
540 | } | |
541 | ||
542 | size = capacity * queue_logical_block_size(q); | |
543 | string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); | |
544 | string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); | |
545 | ||
546 | dev_notice(&vdev->dev, | |
547 | "new size: %llu %d-byte logical blocks (%s/%s)\n", | |
548 | (unsigned long long)capacity, | |
549 | queue_logical_block_size(q), | |
550 | cap_str_10, cap_str_2); | |
551 | ||
552 | set_capacity(vblk->disk, capacity); | |
e9986f30 | 553 | revalidate_disk(vblk->disk); |
9d9598b8 | 554 | kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); |
4678d6f9 MT |
555 | done: |
556 | mutex_unlock(&vblk->config_lock); | |
7a7c924c CH |
557 | } |
558 | ||
559 | static void virtblk_config_changed(struct virtio_device *vdev) | |
560 | { | |
561 | struct virtio_blk *vblk = vdev->priv; | |
562 | ||
563 | queue_work(virtblk_wq, &vblk->config_work); | |
564 | } | |
565 | ||
6abd6e5a AS |
566 | static int init_vq(struct virtio_blk *vblk) |
567 | { | |
568 | int err = 0; | |
569 | ||
570 | /* We expect one virtqueue, for output. */ | |
a98755c5 | 571 | vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests"); |
6abd6e5a AS |
572 | if (IS_ERR(vblk->vq)) |
573 | err = PTR_ERR(vblk->vq); | |
574 | ||
575 | return err; | |
576 | } | |
577 | ||
c0aa3e09 RM |
578 | /* |
579 | * Legacy naming scheme used for virtio devices. We are stuck with it for | |
580 | * virtio blk but don't ever use it for any new driver. | |
581 | */ | |
582 | static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) | |
583 | { | |
584 | const int base = 'z' - 'a' + 1; | |
585 | char *begin = buf + strlen(prefix); | |
586 | char *end = buf + buflen; | |
587 | char *p; | |
588 | int unit; | |
589 | ||
590 | p = end - 1; | |
591 | *p = '\0'; | |
592 | unit = base; | |
593 | do { | |
594 | if (p == begin) | |
595 | return -EINVAL; | |
596 | *--p = 'a' + (index % unit); | |
597 | index = (index / unit) - 1; | |
598 | } while (index >= 0); | |
599 | ||
600 | memmove(begin, p, end - p); | |
601 | memcpy(buf, prefix, strlen(prefix)); | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
cd5d5038 PB |
606 | static int virtblk_get_cache_mode(struct virtio_device *vdev) |
607 | { | |
608 | u8 writeback; | |
609 | int err; | |
610 | ||
611 | err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE, | |
612 | offsetof(struct virtio_blk_config, wce), | |
613 | &writeback); | |
614 | if (err) | |
615 | writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); | |
616 | ||
617 | return writeback; | |
618 | } | |
619 | ||
620 | static void virtblk_update_cache_mode(struct virtio_device *vdev) | |
621 | { | |
622 | u8 writeback = virtblk_get_cache_mode(vdev); | |
623 | struct virtio_blk *vblk = vdev->priv; | |
624 | ||
c85a1f91 | 625 | if (writeback) |
cd5d5038 PB |
626 | blk_queue_flush(vblk->disk->queue, REQ_FLUSH); |
627 | else | |
628 | blk_queue_flush(vblk->disk->queue, 0); | |
629 | ||
630 | revalidate_disk(vblk->disk); | |
631 | } | |
632 | ||
633 | static const char *const virtblk_cache_types[] = { | |
634 | "write through", "write back" | |
635 | }; | |
636 | ||
637 | static ssize_t | |
638 | virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, | |
639 | const char *buf, size_t count) | |
640 | { | |
641 | struct gendisk *disk = dev_to_disk(dev); | |
642 | struct virtio_blk *vblk = disk->private_data; | |
643 | struct virtio_device *vdev = vblk->vdev; | |
644 | int i; | |
645 | u8 writeback; | |
646 | ||
647 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); | |
648 | for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) | |
649 | if (sysfs_streq(buf, virtblk_cache_types[i])) | |
650 | break; | |
651 | ||
652 | if (i < 0) | |
653 | return -EINVAL; | |
654 | ||
655 | writeback = i; | |
656 | vdev->config->set(vdev, | |
657 | offsetof(struct virtio_blk_config, wce), | |
658 | &writeback, sizeof(writeback)); | |
659 | ||
660 | virtblk_update_cache_mode(vdev); | |
661 | return count; | |
662 | } | |
663 | ||
664 | static ssize_t | |
665 | virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, | |
666 | char *buf) | |
667 | { | |
668 | struct gendisk *disk = dev_to_disk(dev); | |
669 | struct virtio_blk *vblk = disk->private_data; | |
670 | u8 writeback = virtblk_get_cache_mode(vblk->vdev); | |
671 | ||
672 | BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); | |
673 | return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); | |
674 | } | |
675 | ||
676 | static const struct device_attribute dev_attr_cache_type_ro = | |
677 | __ATTR(cache_type, S_IRUGO, | |
678 | virtblk_cache_type_show, NULL); | |
679 | static const struct device_attribute dev_attr_cache_type_rw = | |
680 | __ATTR(cache_type, S_IRUGO|S_IWUSR, | |
681 | virtblk_cache_type_show, virtblk_cache_type_store); | |
682 | ||
8d85fce7 | 683 | static int virtblk_probe(struct virtio_device *vdev) |
e467cde2 RR |
684 | { |
685 | struct virtio_blk *vblk; | |
69740c8b | 686 | struct request_queue *q; |
5087a50e | 687 | int err, index; |
a98755c5 AH |
688 | int pool_size; |
689 | ||
e467cde2 | 690 | u64 cap; |
69740c8b CH |
691 | u32 v, blk_size, sg_elems, opt_io_size; |
692 | u16 min_io_size; | |
693 | u8 physical_block_exp, alignment_offset; | |
e467cde2 | 694 | |
5087a50e MT |
695 | err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), |
696 | GFP_KERNEL); | |
697 | if (err < 0) | |
698 | goto out; | |
699 | index = err; | |
4f3bf19c | 700 | |
0864b79a RR |
701 | /* We need to know how many segments before we allocate. */ |
702 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, | |
703 | offsetof(struct virtio_blk_config, seg_max), | |
704 | &sg_elems); | |
a5b365a6 CH |
705 | |
706 | /* We need at least one SG element, whatever they say. */ | |
707 | if (err || !sg_elems) | |
0864b79a RR |
708 | sg_elems = 1; |
709 | ||
710 | /* We need an extra sg elements at head and tail. */ | |
711 | sg_elems += 2; | |
712 | vdev->priv = vblk = kmalloc(sizeof(*vblk) + | |
713 | sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); | |
e467cde2 RR |
714 | if (!vblk) { |
715 | err = -ENOMEM; | |
5087a50e | 716 | goto out_free_index; |
e467cde2 RR |
717 | } |
718 | ||
a98755c5 | 719 | init_waitqueue_head(&vblk->queue_wait); |
e467cde2 | 720 | vblk->vdev = vdev; |
0864b79a RR |
721 | vblk->sg_elems = sg_elems; |
722 | sg_init_table(vblk->sg, vblk->sg_elems); | |
4678d6f9 | 723 | mutex_init(&vblk->config_lock); |
a98755c5 | 724 | |
7a7c924c | 725 | INIT_WORK(&vblk->config_work, virtblk_config_changed_work); |
4678d6f9 | 726 | vblk->config_enable = true; |
e467cde2 | 727 | |
6abd6e5a AS |
728 | err = init_vq(vblk); |
729 | if (err) | |
e467cde2 | 730 | goto out_free_vblk; |
e467cde2 | 731 | |
a98755c5 AH |
732 | pool_size = sizeof(struct virtblk_req); |
733 | if (use_bio) | |
734 | pool_size += sizeof(struct scatterlist) * sg_elems; | |
735 | vblk->pool = mempool_create_kmalloc_pool(1, pool_size); | |
e467cde2 RR |
736 | if (!vblk->pool) { |
737 | err = -ENOMEM; | |
738 | goto out_free_vq; | |
739 | } | |
740 | ||
e467cde2 | 741 | /* FIXME: How many partitions? How long is a piece of string? */ |
4f3bf19c | 742 | vblk->disk = alloc_disk(1 << PART_BITS); |
e467cde2 RR |
743 | if (!vblk->disk) { |
744 | err = -ENOMEM; | |
4f3bf19c | 745 | goto out_mempool; |
e467cde2 RR |
746 | } |
747 | ||
a98755c5 | 748 | q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); |
69740c8b | 749 | if (!q) { |
e467cde2 RR |
750 | err = -ENOMEM; |
751 | goto out_put_disk; | |
752 | } | |
753 | ||
a98755c5 AH |
754 | if (use_bio) |
755 | blk_queue_make_request(q, virtblk_make_request); | |
69740c8b | 756 | q->queuedata = vblk; |
7d116b62 | 757 | |
c0aa3e09 | 758 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); |
d50ed907 | 759 | |
e467cde2 | 760 | vblk->disk->major = major; |
d50ed907 | 761 | vblk->disk->first_minor = index_to_minor(index); |
e467cde2 RR |
762 | vblk->disk->private_data = vblk; |
763 | vblk->disk->fops = &virtblk_fops; | |
c4839346 | 764 | vblk->disk->driverfs_dev = &vdev->dev; |
5087a50e | 765 | vblk->index = index; |
4f3bf19c | 766 | |
02c42b7a | 767 | /* configure queue flush support */ |
cd5d5038 | 768 | virtblk_update_cache_mode(vdev); |
e467cde2 | 769 | |
3ef53609 CB |
770 | /* If disk is read-only in the host, the guest should obey */ |
771 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) | |
772 | set_disk_ro(vblk->disk, 1); | |
773 | ||
a586d4f6 | 774 | /* Host must always specify the capacity. */ |
72e61eb4 RR |
775 | vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), |
776 | &cap, sizeof(cap)); | |
e467cde2 RR |
777 | |
778 | /* If capacity is too big, truncate with warning. */ | |
779 | if ((sector_t)cap != cap) { | |
780 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
781 | (unsigned long long)cap); | |
782 | cap = (sector_t)-1; | |
783 | } | |
784 | set_capacity(vblk->disk, cap); | |
785 | ||
0864b79a | 786 | /* We can handle whatever the host told us to handle. */ |
ee714f2d | 787 | blk_queue_max_segments(q, vblk->sg_elems-2); |
0864b79a | 788 | |
4eff3cae | 789 | /* No need to bounce any requests */ |
69740c8b | 790 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
4eff3cae | 791 | |
4b7f7e20 | 792 | /* No real sector limit. */ |
ee714f2d | 793 | blk_queue_max_hw_sectors(q, -1U); |
4b7f7e20 | 794 | |
a586d4f6 RR |
795 | /* Host can optionally specify maximum segment size and number of |
796 | * segments. */ | |
797 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, | |
798 | offsetof(struct virtio_blk_config, size_max), | |
799 | &v); | |
e467cde2 | 800 | if (!err) |
69740c8b | 801 | blk_queue_max_segment_size(q, v); |
4b7f7e20 | 802 | else |
69740c8b | 803 | blk_queue_max_segment_size(q, -1U); |
e467cde2 | 804 | |
066f4d82 CB |
805 | /* Host can optionally specify the block size of the device */ |
806 | err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, | |
807 | offsetof(struct virtio_blk_config, blk_size), | |
808 | &blk_size); | |
809 | if (!err) | |
69740c8b CH |
810 | blk_queue_logical_block_size(q, blk_size); |
811 | else | |
812 | blk_size = queue_logical_block_size(q); | |
813 | ||
814 | /* Use topology information if available */ | |
815 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
816 | offsetof(struct virtio_blk_config, physical_block_exp), | |
817 | &physical_block_exp); | |
818 | if (!err && physical_block_exp) | |
819 | blk_queue_physical_block_size(q, | |
820 | blk_size * (1 << physical_block_exp)); | |
821 | ||
822 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
823 | offsetof(struct virtio_blk_config, alignment_offset), | |
824 | &alignment_offset); | |
825 | if (!err && alignment_offset) | |
826 | blk_queue_alignment_offset(q, blk_size * alignment_offset); | |
827 | ||
828 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
829 | offsetof(struct virtio_blk_config, min_io_size), | |
830 | &min_io_size); | |
831 | if (!err && min_io_size) | |
832 | blk_queue_io_min(q, blk_size * min_io_size); | |
833 | ||
834 | err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, | |
835 | offsetof(struct virtio_blk_config, opt_io_size), | |
836 | &opt_io_size); | |
837 | if (!err && opt_io_size) | |
838 | blk_queue_io_opt(q, blk_size * opt_io_size); | |
839 | ||
e467cde2 | 840 | add_disk(vblk->disk); |
a5eb9e4f RH |
841 | err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); |
842 | if (err) | |
843 | goto out_del_disk; | |
844 | ||
cd5d5038 PB |
845 | if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) |
846 | err = device_create_file(disk_to_dev(vblk->disk), | |
847 | &dev_attr_cache_type_rw); | |
848 | else | |
849 | err = device_create_file(disk_to_dev(vblk->disk), | |
850 | &dev_attr_cache_type_ro); | |
851 | if (err) | |
852 | goto out_del_disk; | |
e467cde2 RR |
853 | return 0; |
854 | ||
a5eb9e4f RH |
855 | out_del_disk: |
856 | del_gendisk(vblk->disk); | |
857 | blk_cleanup_queue(vblk->disk->queue); | |
e467cde2 RR |
858 | out_put_disk: |
859 | put_disk(vblk->disk); | |
e467cde2 RR |
860 | out_mempool: |
861 | mempool_destroy(vblk->pool); | |
862 | out_free_vq: | |
d2a7ddda | 863 | vdev->config->del_vqs(vdev); |
e467cde2 RR |
864 | out_free_vblk: |
865 | kfree(vblk); | |
5087a50e MT |
866 | out_free_index: |
867 | ida_simple_remove(&vd_index_ida, index); | |
e467cde2 RR |
868 | out: |
869 | return err; | |
870 | } | |
871 | ||
8d85fce7 | 872 | static void virtblk_remove(struct virtio_device *vdev) |
e467cde2 RR |
873 | { |
874 | struct virtio_blk *vblk = vdev->priv; | |
5087a50e | 875 | int index = vblk->index; |
f4953fe6 | 876 | int refc; |
e467cde2 | 877 | |
4678d6f9 MT |
878 | /* Prevent config work handler from accessing the device. */ |
879 | mutex_lock(&vblk->config_lock); | |
880 | vblk->config_enable = false; | |
881 | mutex_unlock(&vblk->config_lock); | |
7a7c924c | 882 | |
02e2b124 | 883 | del_gendisk(vblk->disk); |
483001c7 | 884 | blk_cleanup_queue(vblk->disk->queue); |
02e2b124 | 885 | |
6e5aa7ef RR |
886 | /* Stop all the virtqueues. */ |
887 | vdev->config->reset(vdev); | |
888 | ||
4678d6f9 MT |
889 | flush_work(&vblk->config_work); |
890 | ||
f4953fe6 | 891 | refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); |
e467cde2 | 892 | put_disk(vblk->disk); |
e467cde2 | 893 | mempool_destroy(vblk->pool); |
d2a7ddda | 894 | vdev->config->del_vqs(vdev); |
e467cde2 | 895 | kfree(vblk); |
f4953fe6 AG |
896 | |
897 | /* Only free device id if we don't have any users */ | |
898 | if (refc == 1) | |
899 | ida_simple_remove(&vd_index_ida, index); | |
e467cde2 RR |
900 | } |
901 | ||
f8fb5bc2 AS |
902 | #ifdef CONFIG_PM |
903 | static int virtblk_freeze(struct virtio_device *vdev) | |
904 | { | |
905 | struct virtio_blk *vblk = vdev->priv; | |
906 | ||
907 | /* Ensure we don't receive any more interrupts */ | |
908 | vdev->config->reset(vdev); | |
909 | ||
910 | /* Prevent config work handler from accessing the device. */ | |
911 | mutex_lock(&vblk->config_lock); | |
912 | vblk->config_enable = false; | |
913 | mutex_unlock(&vblk->config_lock); | |
914 | ||
915 | flush_work(&vblk->config_work); | |
916 | ||
917 | spin_lock_irq(vblk->disk->queue->queue_lock); | |
918 | blk_stop_queue(vblk->disk->queue); | |
919 | spin_unlock_irq(vblk->disk->queue->queue_lock); | |
920 | blk_sync_queue(vblk->disk->queue); | |
921 | ||
922 | vdev->config->del_vqs(vdev); | |
923 | return 0; | |
924 | } | |
925 | ||
926 | static int virtblk_restore(struct virtio_device *vdev) | |
927 | { | |
928 | struct virtio_blk *vblk = vdev->priv; | |
929 | int ret; | |
930 | ||
931 | vblk->config_enable = true; | |
932 | ret = init_vq(vdev->priv); | |
933 | if (!ret) { | |
934 | spin_lock_irq(vblk->disk->queue->queue_lock); | |
935 | blk_start_queue(vblk->disk->queue); | |
936 | spin_unlock_irq(vblk->disk->queue->queue_lock); | |
937 | } | |
938 | return ret; | |
939 | } | |
940 | #endif | |
941 | ||
47483e25 | 942 | static const struct virtio_device_id id_table[] = { |
e467cde2 RR |
943 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, |
944 | { 0 }, | |
945 | }; | |
946 | ||
c45a6816 | 947 | static unsigned int features[] = { |
02c42b7a TH |
948 | VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, |
949 | VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, | |
cd5d5038 | 950 | VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE |
c45a6816 RR |
951 | }; |
952 | ||
8d85fce7 | 953 | static struct virtio_driver virtio_blk = { |
7a7c924c CH |
954 | .feature_table = features, |
955 | .feature_table_size = ARRAY_SIZE(features), | |
956 | .driver.name = KBUILD_MODNAME, | |
957 | .driver.owner = THIS_MODULE, | |
958 | .id_table = id_table, | |
959 | .probe = virtblk_probe, | |
8d85fce7 | 960 | .remove = virtblk_remove, |
7a7c924c | 961 | .config_changed = virtblk_config_changed, |
f8fb5bc2 AS |
962 | #ifdef CONFIG_PM |
963 | .freeze = virtblk_freeze, | |
964 | .restore = virtblk_restore, | |
965 | #endif | |
e467cde2 RR |
966 | }; |
967 | ||
968 | static int __init init(void) | |
969 | { | |
7a7c924c CH |
970 | int error; |
971 | ||
972 | virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); | |
973 | if (!virtblk_wq) | |
974 | return -ENOMEM; | |
975 | ||
4f3bf19c | 976 | major = register_blkdev(0, "virtblk"); |
7a7c924c CH |
977 | if (major < 0) { |
978 | error = major; | |
979 | goto out_destroy_workqueue; | |
980 | } | |
981 | ||
982 | error = register_virtio_driver(&virtio_blk); | |
983 | if (error) | |
984 | goto out_unregister_blkdev; | |
985 | return 0; | |
986 | ||
987 | out_unregister_blkdev: | |
988 | unregister_blkdev(major, "virtblk"); | |
989 | out_destroy_workqueue: | |
990 | destroy_workqueue(virtblk_wq); | |
991 | return error; | |
e467cde2 RR |
992 | } |
993 | ||
994 | static void __exit fini(void) | |
995 | { | |
4f3bf19c | 996 | unregister_blkdev(major, "virtblk"); |
e467cde2 | 997 | unregister_virtio_driver(&virtio_blk); |
7a7c924c | 998 | destroy_workqueue(virtblk_wq); |
e467cde2 RR |
999 | } |
1000 | module_init(init); | |
1001 | module_exit(fini); | |
1002 | ||
1003 | MODULE_DEVICE_TABLE(virtio, id_table); | |
1004 | MODULE_DESCRIPTION("Virtio block driver"); | |
1005 | MODULE_LICENSE("GPL"); |