4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/cdrom.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/mutex.h>
45 #include <linux/scatterlist.h>
46 #include <linux/bitmap.h>
49 #include <xen/xenbus.h>
50 #include <xen/grant_table.h>
51 #include <xen/events.h>
53 #include <xen/platform_pci.h>
55 #include <xen/interface/grant_table.h>
56 #include <xen/interface/io/blkif.h>
57 #include <xen/interface/io/protocols.h>
59 #include <asm/xen/hypervisor.h>
62 BLKIF_STATE_DISCONNECTED
,
63 BLKIF_STATE_CONNECTED
,
64 BLKIF_STATE_SUSPENDED
,
68 struct blkif_request req
;
69 struct request
*request
;
70 unsigned long frame
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
73 static DEFINE_MUTEX(blkfront_mutex
);
74 static const struct block_device_operations xlvbd_block_fops
;
76 #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
79 * We have one of these per vbd, whether ide, scsi or 'other'. They
80 * hang in private_data off the gendisk structure. We may end up
81 * putting all kinds of interesting stuff here :-)
87 struct xenbus_device
*xbdev
;
91 enum blkif_state connected
;
93 struct blkif_front_ring ring
;
94 struct scatterlist sg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
95 unsigned int evtchn
, irq
;
96 struct request_queue
*rq
;
97 struct work_struct work
;
98 struct gnttab_free_callback callback
;
99 struct blk_shadow shadow
[BLK_RING_SIZE
];
100 unsigned long shadow_free
;
101 unsigned int feature_flush
;
102 unsigned int flush_op
;
103 unsigned int feature_discard
:1;
104 unsigned int feature_secdiscard
:1;
105 unsigned int discard_granularity
;
106 unsigned int discard_alignment
;
110 static unsigned int nr_minors
;
111 static unsigned long *minors
;
112 static DEFINE_SPINLOCK(minor_lock
);
114 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
115 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
116 #define GRANT_INVALID_REF 0
118 #define PARTS_PER_DISK 16
119 #define PARTS_PER_EXT_DISK 256
121 #define BLKIF_MAJOR(dev) ((dev)>>8)
122 #define BLKIF_MINOR(dev) ((dev) & 0xff)
125 #define EXTENDED (1<<EXT_SHIFT)
126 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
127 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
128 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
129 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
130 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
131 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
133 #define DEV_NAME "xvd" /* name in /dev */
135 static int get_id_from_freelist(struct blkfront_info
*info
)
137 unsigned long free
= info
->shadow_free
;
138 BUG_ON(free
>= BLK_RING_SIZE
);
139 info
->shadow_free
= info
->shadow
[free
].req
.u
.rw
.id
;
140 info
->shadow
[free
].req
.u
.rw
.id
= 0x0fffffee; /* debug */
144 static void add_id_to_freelist(struct blkfront_info
*info
,
147 info
->shadow
[id
].req
.u
.rw
.id
= info
->shadow_free
;
148 info
->shadow
[id
].request
= NULL
;
149 info
->shadow_free
= id
;
152 static int xlbd_reserve_minors(unsigned int minor
, unsigned int nr
)
154 unsigned int end
= minor
+ nr
;
157 if (end
> nr_minors
) {
158 unsigned long *bitmap
, *old
;
160 bitmap
= kcalloc(BITS_TO_LONGS(end
), sizeof(*bitmap
),
165 spin_lock(&minor_lock
);
166 if (end
> nr_minors
) {
168 memcpy(bitmap
, minors
,
169 BITS_TO_LONGS(nr_minors
) * sizeof(*bitmap
));
171 nr_minors
= BITS_TO_LONGS(end
) * BITS_PER_LONG
;
174 spin_unlock(&minor_lock
);
178 spin_lock(&minor_lock
);
179 if (find_next_bit(minors
, end
, minor
) >= end
) {
180 bitmap_set(minors
, minor
, nr
);
184 spin_unlock(&minor_lock
);
189 static void xlbd_release_minors(unsigned int minor
, unsigned int nr
)
191 unsigned int end
= minor
+ nr
;
193 BUG_ON(end
> nr_minors
);
194 spin_lock(&minor_lock
);
195 bitmap_clear(minors
, minor
, nr
);
196 spin_unlock(&minor_lock
);
199 static void blkif_restart_queue_callback(void *arg
)
201 struct blkfront_info
*info
= (struct blkfront_info
*)arg
;
202 schedule_work(&info
->work
);
205 static int blkif_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
207 /* We don't have real geometry info, but let's at least return
208 values consistent with the size of the device */
209 sector_t nsect
= get_capacity(bd
->bd_disk
);
210 sector_t cylinders
= nsect
;
214 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
215 hg
->cylinders
= cylinders
;
216 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
217 hg
->cylinders
= 0xffff;
221 static int blkif_ioctl(struct block_device
*bdev
, fmode_t mode
,
222 unsigned command
, unsigned long argument
)
224 struct blkfront_info
*info
= bdev
->bd_disk
->private_data
;
227 dev_dbg(&info
->xbdev
->dev
, "command: 0x%x, argument: 0x%lx\n",
228 command
, (long)argument
);
231 case CDROMMULTISESSION
:
232 dev_dbg(&info
->xbdev
->dev
, "FIXME: support multisession CDs later\n");
233 for (i
= 0; i
< sizeof(struct cdrom_multisession
); i
++)
234 if (put_user(0, (char __user
*)(argument
+ i
)))
238 case CDROM_GET_CAPABILITY
: {
239 struct gendisk
*gd
= info
->gd
;
240 if (gd
->flags
& GENHD_FL_CD
)
246 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
248 return -EINVAL
; /* same return as native Linux */
255 * Generate a Xen blkfront IO request from a blk layer request. Reads
256 * and writes are handled as expected.
258 * @req: a request struct
260 static int blkif_queue_request(struct request
*req
)
262 struct blkfront_info
*info
= req
->rq_disk
->private_data
;
263 unsigned long buffer_mfn
;
264 struct blkif_request
*ring_req
;
266 unsigned int fsect
, lsect
;
268 grant_ref_t gref_head
;
269 struct scatterlist
*sg
;
271 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
))
274 if (gnttab_alloc_grant_references(
275 BLKIF_MAX_SEGMENTS_PER_REQUEST
, &gref_head
) < 0) {
276 gnttab_request_free_callback(
278 blkif_restart_queue_callback
,
280 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
284 /* Fill out a communications ring structure. */
285 ring_req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
286 id
= get_id_from_freelist(info
);
287 info
->shadow
[id
].request
= req
;
289 ring_req
->u
.rw
.id
= id
;
290 ring_req
->u
.rw
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
291 ring_req
->u
.rw
.handle
= info
->handle
;
293 ring_req
->operation
= rq_data_dir(req
) ?
294 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
296 if (req
->cmd_flags
& (REQ_FLUSH
| REQ_FUA
)) {
298 * Ideally we can do an unordered flush-to-disk. In case the
299 * backend onlysupports barriers, use that. A barrier request
300 * a superset of FUA, so we can implement it the same
301 * way. (It's also a FLUSH+FUA, since it is
302 * guaranteed ordered WRT previous writes.)
304 ring_req
->operation
= info
->flush_op
;
307 if (unlikely(req
->cmd_flags
& (REQ_DISCARD
| REQ_SECURE
))) {
308 /* id, sector_number and handle are set above. */
309 ring_req
->operation
= BLKIF_OP_DISCARD
;
310 ring_req
->u
.discard
.nr_sectors
= blk_rq_sectors(req
);
311 if ((req
->cmd_flags
& REQ_SECURE
) && info
->feature_secdiscard
)
312 ring_req
->u
.discard
.flag
= BLKIF_DISCARD_SECURE
;
314 ring_req
->u
.discard
.flag
= 0;
316 ring_req
->u
.rw
.nr_segments
= blk_rq_map_sg(req
->q
, req
,
318 BUG_ON(ring_req
->u
.rw
.nr_segments
>
319 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
321 for_each_sg(info
->sg
, sg
, ring_req
->u
.rw
.nr_segments
, i
) {
322 buffer_mfn
= pfn_to_mfn(page_to_pfn(sg_page(sg
)));
323 fsect
= sg
->offset
>> 9;
324 lsect
= fsect
+ (sg
->length
>> 9) - 1;
325 /* install a grant reference. */
326 ref
= gnttab_claim_grant_reference(&gref_head
);
327 BUG_ON(ref
== -ENOSPC
);
329 gnttab_grant_foreign_access_ref(
331 info
->xbdev
->otherend_id
,
335 info
->shadow
[id
].frame
[i
] = mfn_to_pfn(buffer_mfn
);
336 ring_req
->u
.rw
.seg
[i
] =
337 (struct blkif_request_segment
) {
340 .last_sect
= lsect
};
344 info
->ring
.req_prod_pvt
++;
346 /* Keep a private copy so we can reissue requests when recovering. */
347 info
->shadow
[id
].req
= *ring_req
;
349 gnttab_free_grant_references(gref_head
);
355 static inline void flush_requests(struct blkfront_info
*info
)
359 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info
->ring
, notify
);
362 notify_remote_via_irq(info
->irq
);
367 * read a block; request is in a request queue
369 static void do_blkif_request(struct request_queue
*rq
)
371 struct blkfront_info
*info
= NULL
;
375 pr_debug("Entered do_blkif_request\n");
379 while ((req
= blk_peek_request(rq
)) != NULL
) {
380 info
= req
->rq_disk
->private_data
;
382 if (RING_FULL(&info
->ring
))
385 blk_start_request(req
);
387 if ((req
->cmd_type
!= REQ_TYPE_FS
) ||
388 ((req
->cmd_flags
& (REQ_FLUSH
| REQ_FUA
)) &&
390 __blk_end_request_all(req
, -EIO
);
394 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
395 "(%u/%u) buffer:%p [%s]\n",
396 req
, req
->cmd
, (unsigned long)blk_rq_pos(req
),
397 blk_rq_cur_sectors(req
), blk_rq_sectors(req
),
398 req
->buffer
, rq_data_dir(req
) ? "write" : "read");
400 if (blkif_queue_request(req
)) {
401 blk_requeue_request(rq
, req
);
403 /* Avoid pointless unplugs. */
412 flush_requests(info
);
415 static int xlvbd_init_blk_queue(struct gendisk
*gd
, u16 sector_size
)
417 struct request_queue
*rq
;
418 struct blkfront_info
*info
= gd
->private_data
;
420 rq
= blk_init_queue(do_blkif_request
, &info
->io_lock
);
424 queue_flag_set_unlocked(QUEUE_FLAG_VIRT
, rq
);
426 if (info
->feature_discard
) {
427 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, rq
);
428 blk_queue_max_discard_sectors(rq
, get_capacity(gd
));
429 rq
->limits
.discard_granularity
= info
->discard_granularity
;
430 rq
->limits
.discard_alignment
= info
->discard_alignment
;
431 if (info
->feature_secdiscard
)
432 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD
, rq
);
435 /* Hard sector size and max sectors impersonate the equiv. hardware. */
436 blk_queue_logical_block_size(rq
, sector_size
);
437 blk_queue_max_hw_sectors(rq
, 512);
439 /* Each segment in a request is up to an aligned page in size. */
440 blk_queue_segment_boundary(rq
, PAGE_SIZE
- 1);
441 blk_queue_max_segment_size(rq
, PAGE_SIZE
);
443 /* Ensure a merged request will fit in a single I/O ring slot. */
444 blk_queue_max_segments(rq
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
446 /* Make sure buffer addresses are sector-aligned. */
447 blk_queue_dma_alignment(rq
, 511);
449 /* Make sure we don't use bounce buffers. */
450 blk_queue_bounce_limit(rq
, BLK_BOUNCE_ANY
);
458 static void xlvbd_flush(struct blkfront_info
*info
)
460 blk_queue_flush(info
->rq
, info
->feature_flush
);
461 printk(KERN_INFO
"blkfront: %s: %s: %s\n",
463 info
->flush_op
== BLKIF_OP_WRITE_BARRIER
?
464 "barrier" : (info
->flush_op
== BLKIF_OP_FLUSH_DISKCACHE
?
465 "flush diskcache" : "barrier or flush"),
466 info
->feature_flush
? "enabled" : "disabled");
469 static int xen_translate_vdev(int vdevice
, int *minor
, unsigned int *offset
)
472 major
= BLKIF_MAJOR(vdevice
);
473 *minor
= BLKIF_MINOR(vdevice
);
476 *offset
= (*minor
/ 64) + EMULATED_HD_DISK_NAME_OFFSET
;
477 *minor
= ((*minor
/ 64) * PARTS_PER_DISK
) +
478 EMULATED_HD_DISK_MINOR_OFFSET
;
481 *offset
= (*minor
/ 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET
;
482 *minor
= (((*minor
/ 64) + 2) * PARTS_PER_DISK
) +
483 EMULATED_HD_DISK_MINOR_OFFSET
;
485 case XEN_SCSI_DISK0_MAJOR
:
486 *offset
= (*minor
/ PARTS_PER_DISK
) + EMULATED_SD_DISK_NAME_OFFSET
;
487 *minor
= *minor
+ EMULATED_SD_DISK_MINOR_OFFSET
;
489 case XEN_SCSI_DISK1_MAJOR
:
490 case XEN_SCSI_DISK2_MAJOR
:
491 case XEN_SCSI_DISK3_MAJOR
:
492 case XEN_SCSI_DISK4_MAJOR
:
493 case XEN_SCSI_DISK5_MAJOR
:
494 case XEN_SCSI_DISK6_MAJOR
:
495 case XEN_SCSI_DISK7_MAJOR
:
496 *offset
= (*minor
/ PARTS_PER_DISK
) +
497 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16) +
498 EMULATED_SD_DISK_NAME_OFFSET
;
500 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16 * PARTS_PER_DISK
) +
501 EMULATED_SD_DISK_MINOR_OFFSET
;
503 case XEN_SCSI_DISK8_MAJOR
:
504 case XEN_SCSI_DISK9_MAJOR
:
505 case XEN_SCSI_DISK10_MAJOR
:
506 case XEN_SCSI_DISK11_MAJOR
:
507 case XEN_SCSI_DISK12_MAJOR
:
508 case XEN_SCSI_DISK13_MAJOR
:
509 case XEN_SCSI_DISK14_MAJOR
:
510 case XEN_SCSI_DISK15_MAJOR
:
511 *offset
= (*minor
/ PARTS_PER_DISK
) +
512 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16) +
513 EMULATED_SD_DISK_NAME_OFFSET
;
515 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16 * PARTS_PER_DISK
) +
516 EMULATED_SD_DISK_MINOR_OFFSET
;
519 *offset
= *minor
/ PARTS_PER_DISK
;
522 printk(KERN_WARNING
"blkfront: your disk configuration is "
523 "incorrect, please use an xvd device instead\n");
529 static int xlvbd_alloc_gendisk(blkif_sector_t capacity
,
530 struct blkfront_info
*info
,
531 u16 vdisk_info
, u16 sector_size
)
540 BUG_ON(info
->gd
!= NULL
);
541 BUG_ON(info
->rq
!= NULL
);
543 if ((info
->vdevice
>>EXT_SHIFT
) > 1) {
544 /* this is above the extended range; something is wrong */
545 printk(KERN_WARNING
"blkfront: vdevice 0x%x is above the extended range; ignoring\n", info
->vdevice
);
549 if (!VDEV_IS_EXTENDED(info
->vdevice
)) {
550 err
= xen_translate_vdev(info
->vdevice
, &minor
, &offset
);
553 nr_parts
= PARTS_PER_DISK
;
555 minor
= BLKIF_MINOR_EXT(info
->vdevice
);
556 nr_parts
= PARTS_PER_EXT_DISK
;
557 offset
= minor
/ nr_parts
;
558 if (xen_hvm_domain() && offset
< EMULATED_HD_DISK_NAME_OFFSET
+ 4)
559 printk(KERN_WARNING
"blkfront: vdevice 0x%x might conflict with "
560 "emulated IDE disks,\n\t choose an xvd device name"
561 "from xvde on\n", info
->vdevice
);
565 if ((minor
% nr_parts
) == 0)
566 nr_minors
= nr_parts
;
568 err
= xlbd_reserve_minors(minor
, nr_minors
);
573 gd
= alloc_disk(nr_minors
);
579 sprintf(gd
->disk_name
, "%s%c", DEV_NAME
, 'a' + offset
);
581 sprintf(gd
->disk_name
, "%s%c%c", DEV_NAME
,
582 'a' + ((offset
/ 26)-1), 'a' + (offset
% 26));
585 sprintf(gd
->disk_name
, "%s%c%d", DEV_NAME
,
587 minor
& (nr_parts
- 1));
589 sprintf(gd
->disk_name
, "%s%c%c%d", DEV_NAME
,
590 'a' + ((offset
/ 26) - 1),
592 minor
& (nr_parts
- 1));
595 gd
->major
= XENVBD_MAJOR
;
596 gd
->first_minor
= minor
;
597 gd
->fops
= &xlvbd_block_fops
;
598 gd
->private_data
= info
;
599 gd
->driverfs_dev
= &(info
->xbdev
->dev
);
600 set_capacity(gd
, capacity
);
602 if (xlvbd_init_blk_queue(gd
, sector_size
)) {
607 info
->rq
= gd
->queue
;
612 if (vdisk_info
& VDISK_READONLY
)
615 if (vdisk_info
& VDISK_REMOVABLE
)
616 gd
->flags
|= GENHD_FL_REMOVABLE
;
618 if (vdisk_info
& VDISK_CDROM
)
619 gd
->flags
|= GENHD_FL_CD
;
624 xlbd_release_minors(minor
, nr_minors
);
629 static void xlvbd_release_gendisk(struct blkfront_info
*info
)
631 unsigned int minor
, nr_minors
;
634 if (info
->rq
== NULL
)
637 spin_lock_irqsave(&info
->io_lock
, flags
);
639 /* No more blkif_request(). */
640 blk_stop_queue(info
->rq
);
642 /* No more gnttab callback work. */
643 gnttab_cancel_free_callback(&info
->callback
);
644 spin_unlock_irqrestore(&info
->io_lock
, flags
);
646 /* Flush gnttab callback work. Must be done with no locks held. */
647 flush_work_sync(&info
->work
);
649 del_gendisk(info
->gd
);
651 minor
= info
->gd
->first_minor
;
652 nr_minors
= info
->gd
->minors
;
653 xlbd_release_minors(minor
, nr_minors
);
655 blk_cleanup_queue(info
->rq
);
662 static void kick_pending_request_queues(struct blkfront_info
*info
)
664 if (!RING_FULL(&info
->ring
)) {
665 /* Re-enable calldowns. */
666 blk_start_queue(info
->rq
);
667 /* Kick things off immediately. */
668 do_blkif_request(info
->rq
);
672 static void blkif_restart_queue(struct work_struct
*work
)
674 struct blkfront_info
*info
= container_of(work
, struct blkfront_info
, work
);
676 spin_lock_irq(&info
->io_lock
);
677 if (info
->connected
== BLKIF_STATE_CONNECTED
)
678 kick_pending_request_queues(info
);
679 spin_unlock_irq(&info
->io_lock
);
682 static void blkif_free(struct blkfront_info
*info
, int suspend
)
684 /* Prevent new requests being issued until we fix things up. */
685 spin_lock_irq(&info
->io_lock
);
686 info
->connected
= suspend
?
687 BLKIF_STATE_SUSPENDED
: BLKIF_STATE_DISCONNECTED
;
688 /* No more blkif_request(). */
690 blk_stop_queue(info
->rq
);
691 /* No more gnttab callback work. */
692 gnttab_cancel_free_callback(&info
->callback
);
693 spin_unlock_irq(&info
->io_lock
);
695 /* Flush gnttab callback work. Must be done with no locks held. */
696 flush_work_sync(&info
->work
);
698 /* Free resources associated with old device channel. */
699 if (info
->ring_ref
!= GRANT_INVALID_REF
) {
700 gnttab_end_foreign_access(info
->ring_ref
, 0,
701 (unsigned long)info
->ring
.sring
);
702 info
->ring_ref
= GRANT_INVALID_REF
;
703 info
->ring
.sring
= NULL
;
706 unbind_from_irqhandler(info
->irq
, info
);
707 info
->evtchn
= info
->irq
= 0;
711 static void blkif_completion(struct blk_shadow
*s
)
714 /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
716 for (i
= 0; i
< s
->req
.u
.rw
.nr_segments
; i
++)
717 gnttab_end_foreign_access(s
->req
.u
.rw
.seg
[i
].gref
, 0, 0UL);
720 static irqreturn_t
blkif_interrupt(int irq
, void *dev_id
)
723 struct blkif_response
*bret
;
726 struct blkfront_info
*info
= (struct blkfront_info
*)dev_id
;
729 spin_lock_irqsave(&info
->io_lock
, flags
);
731 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
)) {
732 spin_unlock_irqrestore(&info
->io_lock
, flags
);
737 rp
= info
->ring
.sring
->rsp_prod
;
738 rmb(); /* Ensure we see queued responses up to 'rp'. */
740 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
743 bret
= RING_GET_RESPONSE(&info
->ring
, i
);
745 req
= info
->shadow
[id
].request
;
747 if (bret
->operation
!= BLKIF_OP_DISCARD
)
748 blkif_completion(&info
->shadow
[id
]);
750 add_id_to_freelist(info
, id
);
752 error
= (bret
->status
== BLKIF_RSP_OKAY
) ? 0 : -EIO
;
753 switch (bret
->operation
) {
754 case BLKIF_OP_DISCARD
:
755 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
756 struct request_queue
*rq
= info
->rq
;
757 printk(KERN_WARNING
"blkfront: %s: discard op failed\n",
758 info
->gd
->disk_name
);
760 info
->feature_discard
= 0;
761 info
->feature_secdiscard
= 0;
762 queue_flag_clear(QUEUE_FLAG_DISCARD
, rq
);
763 queue_flag_clear(QUEUE_FLAG_SECDISCARD
, rq
);
765 __blk_end_request_all(req
, error
);
767 case BLKIF_OP_FLUSH_DISKCACHE
:
768 case BLKIF_OP_WRITE_BARRIER
:
769 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
770 printk(KERN_WARNING
"blkfront: %s: write %s op failed\n",
771 info
->flush_op
== BLKIF_OP_WRITE_BARRIER
?
772 "barrier" : "flush disk cache",
773 info
->gd
->disk_name
);
776 if (unlikely(bret
->status
== BLKIF_RSP_ERROR
&&
777 info
->shadow
[id
].req
.u
.rw
.nr_segments
== 0)) {
778 printk(KERN_WARNING
"blkfront: %s: empty write %s op failed\n",
779 info
->flush_op
== BLKIF_OP_WRITE_BARRIER
?
780 "barrier" : "flush disk cache",
781 info
->gd
->disk_name
);
784 if (unlikely(error
)) {
785 if (error
== -EOPNOTSUPP
)
787 info
->feature_flush
= 0;
794 if (unlikely(bret
->status
!= BLKIF_RSP_OKAY
))
795 dev_dbg(&info
->xbdev
->dev
, "Bad return from blkdev data "
796 "request: %x\n", bret
->status
);
798 __blk_end_request_all(req
, error
);
805 info
->ring
.rsp_cons
= i
;
807 if (i
!= info
->ring
.req_prod_pvt
) {
809 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
813 info
->ring
.sring
->rsp_event
= i
+ 1;
815 kick_pending_request_queues(info
);
817 spin_unlock_irqrestore(&info
->io_lock
, flags
);
823 static int setup_blkring(struct xenbus_device
*dev
,
824 struct blkfront_info
*info
)
826 struct blkif_sring
*sring
;
829 info
->ring_ref
= GRANT_INVALID_REF
;
831 sring
= (struct blkif_sring
*)__get_free_page(GFP_NOIO
| __GFP_HIGH
);
833 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating shared ring");
836 SHARED_RING_INIT(sring
);
837 FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
839 sg_init_table(info
->sg
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
841 err
= xenbus_grant_ring(dev
, virt_to_mfn(info
->ring
.sring
));
843 free_page((unsigned long)sring
);
844 info
->ring
.sring
= NULL
;
847 info
->ring_ref
= err
;
849 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
853 err
= bind_evtchn_to_irqhandler(info
->evtchn
,
855 IRQF_SAMPLE_RANDOM
, "blkif", info
);
857 xenbus_dev_fatal(dev
, err
,
858 "bind_evtchn_to_irqhandler failed");
870 /* Common code used when first setting up, and when resuming. */
871 static int talk_to_blkback(struct xenbus_device
*dev
,
872 struct blkfront_info
*info
)
874 const char *message
= NULL
;
875 struct xenbus_transaction xbt
;
878 /* Create shared ring, alloc event channel. */
879 err
= setup_blkring(dev
, info
);
884 err
= xenbus_transaction_start(&xbt
);
886 xenbus_dev_fatal(dev
, err
, "starting transaction");
887 goto destroy_blkring
;
890 err
= xenbus_printf(xbt
, dev
->nodename
,
891 "ring-ref", "%u", info
->ring_ref
);
893 message
= "writing ring-ref";
894 goto abort_transaction
;
896 err
= xenbus_printf(xbt
, dev
->nodename
,
897 "event-channel", "%u", info
->evtchn
);
899 message
= "writing event-channel";
900 goto abort_transaction
;
902 err
= xenbus_printf(xbt
, dev
->nodename
, "protocol", "%s",
903 XEN_IO_PROTO_ABI_NATIVE
);
905 message
= "writing protocol";
906 goto abort_transaction
;
909 err
= xenbus_transaction_end(xbt
, 0);
913 xenbus_dev_fatal(dev
, err
, "completing transaction");
914 goto destroy_blkring
;
917 xenbus_switch_state(dev
, XenbusStateInitialised
);
922 xenbus_transaction_end(xbt
, 1);
924 xenbus_dev_fatal(dev
, err
, "%s", message
);
932 * Entry point to this code when a new device is created. Allocate the basic
933 * structures and the ring buffer for communication with the backend, and
934 * inform the backend of the appropriate details for those. Switch to
937 static int blkfront_probe(struct xenbus_device
*dev
,
938 const struct xenbus_device_id
*id
)
941 struct blkfront_info
*info
;
943 /* FIXME: Use dynamic device id if this is not set. */
944 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
,
945 "virtual-device", "%i", &vdevice
);
947 /* go looking in the extended area instead */
948 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "virtual-device-ext",
951 xenbus_dev_fatal(dev
, err
, "reading virtual-device");
956 if (xen_hvm_domain()) {
959 /* no unplug has been done: do not hook devices != xen vbds */
960 if (xen_platform_pci_unplug
& XEN_UNPLUG_UNNECESSARY
) {
963 if (!VDEV_IS_EXTENDED(vdevice
))
964 major
= BLKIF_MAJOR(vdevice
);
966 major
= XENVBD_MAJOR
;
968 if (major
!= XENVBD_MAJOR
) {
970 "%s: HVM does not support vbd %d as xen block device\n",
971 __FUNCTION__
, vdevice
);
975 /* do not create a PV cdrom device if we are an HVM guest */
976 type
= xenbus_read(XBT_NIL
, dev
->nodename
, "device-type", &len
);
979 if (strncmp(type
, "cdrom", 5) == 0) {
985 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
987 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating info structure");
991 mutex_init(&info
->mutex
);
992 spin_lock_init(&info
->io_lock
);
994 info
->vdevice
= vdevice
;
995 info
->connected
= BLKIF_STATE_DISCONNECTED
;
996 INIT_WORK(&info
->work
, blkif_restart_queue
);
998 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
999 info
->shadow
[i
].req
.u
.rw
.id
= i
+1;
1000 info
->shadow
[BLK_RING_SIZE
-1].req
.u
.rw
.id
= 0x0fffffff;
1002 /* Front end dir is a number, which is used as the id. */
1003 info
->handle
= simple_strtoul(strrchr(dev
->nodename
, '/')+1, NULL
, 0);
1004 dev_set_drvdata(&dev
->dev
, info
);
1006 err
= talk_to_blkback(dev
, info
);
1009 dev_set_drvdata(&dev
->dev
, NULL
);
1017 static int blkif_recover(struct blkfront_info
*info
)
1020 struct blkif_request
*req
;
1021 struct blk_shadow
*copy
;
1024 /* Stage 1: Make a safe copy of the shadow state. */
1025 copy
= kmalloc(sizeof(info
->shadow
),
1026 GFP_NOIO
| __GFP_REPEAT
| __GFP_HIGH
);
1029 memcpy(copy
, info
->shadow
, sizeof(info
->shadow
));
1031 /* Stage 2: Set up free list. */
1032 memset(&info
->shadow
, 0, sizeof(info
->shadow
));
1033 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
1034 info
->shadow
[i
].req
.u
.rw
.id
= i
+1;
1035 info
->shadow_free
= info
->ring
.req_prod_pvt
;
1036 info
->shadow
[BLK_RING_SIZE
-1].req
.u
.rw
.id
= 0x0fffffff;
1038 /* Stage 3: Find pending requests and requeue them. */
1039 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
1041 if (!copy
[i
].request
)
1044 /* Grab a request slot and copy shadow state into it. */
1045 req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
1048 /* We get a new request id, and must reset the shadow state. */
1049 req
->u
.rw
.id
= get_id_from_freelist(info
);
1050 memcpy(&info
->shadow
[req
->u
.rw
.id
], ©
[i
], sizeof(copy
[i
]));
1052 if (req
->operation
!= BLKIF_OP_DISCARD
) {
1053 /* Rewrite any grant references invalidated by susp/resume. */
1054 for (j
= 0; j
< req
->u
.rw
.nr_segments
; j
++)
1055 gnttab_grant_foreign_access_ref(
1056 req
->u
.rw
.seg
[j
].gref
,
1057 info
->xbdev
->otherend_id
,
1058 pfn_to_mfn(info
->shadow
[req
->u
.rw
.id
].frame
[j
]),
1059 rq_data_dir(info
->shadow
[req
->u
.rw
.id
].request
));
1061 info
->shadow
[req
->u
.rw
.id
].req
= *req
;
1063 info
->ring
.req_prod_pvt
++;
1068 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
1070 spin_lock_irq(&info
->io_lock
);
1072 /* Now safe for us to use the shared ring */
1073 info
->connected
= BLKIF_STATE_CONNECTED
;
1075 /* Send off requeued requests */
1076 flush_requests(info
);
1078 /* Kick any other new requests queued since we resumed */
1079 kick_pending_request_queues(info
);
1081 spin_unlock_irq(&info
->io_lock
);
1087 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1088 * driver restart. We tear down our blkif structure and recreate it, but
1089 * leave the device-layer structures intact so that this is transparent to the
1090 * rest of the kernel.
1092 static int blkfront_resume(struct xenbus_device
*dev
)
1094 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1097 dev_dbg(&dev
->dev
, "blkfront_resume: %s\n", dev
->nodename
);
1099 blkif_free(info
, info
->connected
== BLKIF_STATE_CONNECTED
);
1101 err
= talk_to_blkback(dev
, info
);
1102 if (info
->connected
== BLKIF_STATE_SUSPENDED
&& !err
)
1103 err
= blkif_recover(info
);
1109 blkfront_closing(struct blkfront_info
*info
)
1111 struct xenbus_device
*xbdev
= info
->xbdev
;
1112 struct block_device
*bdev
= NULL
;
1114 mutex_lock(&info
->mutex
);
1116 if (xbdev
->state
== XenbusStateClosing
) {
1117 mutex_unlock(&info
->mutex
);
1122 bdev
= bdget_disk(info
->gd
, 0);
1124 mutex_unlock(&info
->mutex
);
1127 xenbus_frontend_closed(xbdev
);
1131 mutex_lock(&bdev
->bd_mutex
);
1133 if (bdev
->bd_openers
) {
1134 xenbus_dev_error(xbdev
, -EBUSY
,
1135 "Device in use; refusing to close");
1136 xenbus_switch_state(xbdev
, XenbusStateClosing
);
1138 xlvbd_release_gendisk(info
);
1139 xenbus_frontend_closed(xbdev
);
1142 mutex_unlock(&bdev
->bd_mutex
);
1146 static void blkfront_setup_discard(struct blkfront_info
*info
)
1150 unsigned int discard_granularity
;
1151 unsigned int discard_alignment
;
1152 unsigned int discard_secure
;
1154 type
= xenbus_read(XBT_NIL
, info
->xbdev
->otherend
, "type", NULL
);
1158 info
->feature_secdiscard
= 0;
1159 if (strncmp(type
, "phy", 3) == 0) {
1160 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1161 "discard-granularity", "%u", &discard_granularity
,
1162 "discard-alignment", "%u", &discard_alignment
,
1165 info
->feature_discard
= 1;
1166 info
->discard_granularity
= discard_granularity
;
1167 info
->discard_alignment
= discard_alignment
;
1169 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1170 "discard-secure", "%d", &discard_secure
,
1173 info
->feature_secdiscard
= discard_secure
;
1175 } else if (strncmp(type
, "file", 4) == 0)
1176 info
->feature_discard
= 1;
1182 * Invoked when the backend is finally 'ready' (and has told produced
1183 * the details about the physical device - #sectors, size, etc).
1185 static void blkfront_connect(struct blkfront_info
*info
)
1187 unsigned long long sectors
;
1188 unsigned long sector_size
;
1191 int barrier
, flush
, discard
;
1193 switch (info
->connected
) {
1194 case BLKIF_STATE_CONNECTED
:
1196 * Potentially, the back-end may be signalling
1197 * a capacity change; update the capacity.
1199 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1200 "sectors", "%Lu", §ors
);
1201 if (XENBUS_EXIST_ERR(err
))
1203 printk(KERN_INFO
"Setting capacity to %Lu\n",
1205 set_capacity(info
->gd
, sectors
);
1206 revalidate_disk(info
->gd
);
1209 case BLKIF_STATE_SUSPENDED
:
1216 dev_dbg(&info
->xbdev
->dev
, "%s:%s.\n",
1217 __func__
, info
->xbdev
->otherend
);
1219 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1220 "sectors", "%llu", §ors
,
1221 "info", "%u", &binfo
,
1222 "sector-size", "%lu", §or_size
,
1225 xenbus_dev_fatal(info
->xbdev
, err
,
1226 "reading backend fields at %s",
1227 info
->xbdev
->otherend
);
1231 info
->feature_flush
= 0;
1234 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1235 "feature-barrier", "%d", &barrier
,
1239 * If there's no "feature-barrier" defined, then it means
1240 * we're dealing with a very old backend which writes
1241 * synchronously; nothing to do.
1243 * If there are barriers, then we use flush.
1245 if (!err
&& barrier
) {
1246 info
->feature_flush
= REQ_FLUSH
| REQ_FUA
;
1247 info
->flush_op
= BLKIF_OP_WRITE_BARRIER
;
1250 * And if there is "feature-flush-cache" use that above
1253 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1254 "feature-flush-cache", "%d", &flush
,
1257 if (!err
&& flush
) {
1258 info
->feature_flush
= REQ_FLUSH
;
1259 info
->flush_op
= BLKIF_OP_FLUSH_DISKCACHE
;
1262 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1263 "feature-discard", "%d", &discard
,
1266 if (!err
&& discard
)
1267 blkfront_setup_discard(info
);
1269 err
= xlvbd_alloc_gendisk(sectors
, info
, binfo
, sector_size
);
1271 xenbus_dev_fatal(info
->xbdev
, err
, "xlvbd_add at %s",
1272 info
->xbdev
->otherend
);
1276 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
1278 /* Kick pending requests. */
1279 spin_lock_irq(&info
->io_lock
);
1280 info
->connected
= BLKIF_STATE_CONNECTED
;
1281 kick_pending_request_queues(info
);
1282 spin_unlock_irq(&info
->io_lock
);
1290 * Callback received when the backend's state changes.
1292 static void blkback_changed(struct xenbus_device
*dev
,
1293 enum xenbus_state backend_state
)
1295 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1297 dev_dbg(&dev
->dev
, "blkfront:blkback_changed to state %d.\n", backend_state
);
1299 switch (backend_state
) {
1300 case XenbusStateInitialising
:
1301 case XenbusStateInitWait
:
1302 case XenbusStateInitialised
:
1303 case XenbusStateReconfiguring
:
1304 case XenbusStateReconfigured
:
1305 case XenbusStateUnknown
:
1306 case XenbusStateClosed
:
1309 case XenbusStateConnected
:
1310 blkfront_connect(info
);
1313 case XenbusStateClosing
:
1314 blkfront_closing(info
);
1319 static int blkfront_remove(struct xenbus_device
*xbdev
)
1321 struct blkfront_info
*info
= dev_get_drvdata(&xbdev
->dev
);
1322 struct block_device
*bdev
= NULL
;
1323 struct gendisk
*disk
;
1325 dev_dbg(&xbdev
->dev
, "%s removed", xbdev
->nodename
);
1327 blkif_free(info
, 0);
1329 mutex_lock(&info
->mutex
);
1333 bdev
= bdget_disk(disk
, 0);
1336 mutex_unlock(&info
->mutex
);
1344 * The xbdev was removed before we reached the Closed
1345 * state. See if it's safe to remove the disk. If the bdev
1346 * isn't closed yet, we let release take care of it.
1349 mutex_lock(&bdev
->bd_mutex
);
1350 info
= disk
->private_data
;
1352 dev_warn(disk_to_dev(disk
),
1353 "%s was hot-unplugged, %d stale handles\n",
1354 xbdev
->nodename
, bdev
->bd_openers
);
1356 if (info
&& !bdev
->bd_openers
) {
1357 xlvbd_release_gendisk(info
);
1358 disk
->private_data
= NULL
;
1362 mutex_unlock(&bdev
->bd_mutex
);
1368 static int blkfront_is_ready(struct xenbus_device
*dev
)
1370 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1372 return info
->is_ready
&& info
->xbdev
;
1375 static int blkif_open(struct block_device
*bdev
, fmode_t mode
)
1377 struct gendisk
*disk
= bdev
->bd_disk
;
1378 struct blkfront_info
*info
;
1381 mutex_lock(&blkfront_mutex
);
1383 info
= disk
->private_data
;
1390 mutex_lock(&info
->mutex
);
1393 /* xbdev is closed */
1396 mutex_unlock(&info
->mutex
);
1399 mutex_unlock(&blkfront_mutex
);
1403 static int blkif_release(struct gendisk
*disk
, fmode_t mode
)
1405 struct blkfront_info
*info
= disk
->private_data
;
1406 struct block_device
*bdev
;
1407 struct xenbus_device
*xbdev
;
1409 mutex_lock(&blkfront_mutex
);
1411 bdev
= bdget_disk(disk
, 0);
1413 if (bdev
->bd_openers
)
1417 * Check if we have been instructed to close. We will have
1418 * deferred this request, because the bdev was still open.
1421 mutex_lock(&info
->mutex
);
1422 xbdev
= info
->xbdev
;
1424 if (xbdev
&& xbdev
->state
== XenbusStateClosing
) {
1425 /* pending switch to state closed */
1426 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
1427 xlvbd_release_gendisk(info
);
1428 xenbus_frontend_closed(info
->xbdev
);
1431 mutex_unlock(&info
->mutex
);
1434 /* sudden device removal */
1435 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
1436 xlvbd_release_gendisk(info
);
1437 disk
->private_data
= NULL
;
1443 mutex_unlock(&blkfront_mutex
);
1447 static const struct block_device_operations xlvbd_block_fops
=
1449 .owner
= THIS_MODULE
,
1451 .release
= blkif_release
,
1452 .getgeo
= blkif_getgeo
,
1453 .ioctl
= blkif_ioctl
,
1457 static const struct xenbus_device_id blkfront_ids
[] = {
1462 static DEFINE_XENBUS_DRIVER(blkfront
, ,
1463 .probe
= blkfront_probe
,
1464 .remove
= blkfront_remove
,
1465 .resume
= blkfront_resume
,
1466 .otherend_changed
= blkback_changed
,
1467 .is_ready
= blkfront_is_ready
,
1470 static int __init
xlblk_init(void)
1477 if (xen_hvm_domain() && !xen_platform_pci_unplug
)
1480 if (register_blkdev(XENVBD_MAJOR
, DEV_NAME
)) {
1481 printk(KERN_WARNING
"xen_blk: can't get major %d with name %s\n",
1482 XENVBD_MAJOR
, DEV_NAME
);
1486 ret
= xenbus_register_frontend(&blkfront_driver
);
1488 unregister_blkdev(XENVBD_MAJOR
, DEV_NAME
);
1494 module_init(xlblk_init
);
1497 static void __exit
xlblk_exit(void)
1499 return xenbus_unregister_driver(&blkfront_driver
);
1501 module_exit(xlblk_exit
);
1503 MODULE_DESCRIPTION("Xen virtual block device frontend");
1504 MODULE_LICENSE("GPL");
1505 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR
);
1506 MODULE_ALIAS("xen:vbd");
1507 MODULE_ALIAS("xenblk");