4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/mutex.h>
46 #include <linux/scatterlist.h>
47 #include <linux/bitmap.h>
48 #include <linux/list.h>
51 #include <xen/xenbus.h>
52 #include <xen/grant_table.h>
53 #include <xen/events.h>
55 #include <xen/platform_pci.h>
57 #include <xen/interface/grant_table.h>
58 #include <xen/interface/io/blkif.h>
59 #include <xen/interface/io/protocols.h>
61 #include <asm/xen/hypervisor.h>
64 * The minimal size of segment supported by the block framework is PAGE_SIZE.
65 * When Linux is using a different page size than Xen, it may not be possible
66 * to put all the data in a single segment.
67 * This can happen when the backend doesn't support indirect descriptor and
68 * therefore the maximum amount of data that a request can carry is
69 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
71 * Note that we only support one extra request. So the Linux page size
72 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
75 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
78 BLKIF_STATE_DISCONNECTED
,
79 BLKIF_STATE_CONNECTED
,
80 BLKIF_STATE_SUSPENDED
,
86 struct list_head node
;
97 struct blkif_request req
;
98 struct request
*request
;
99 struct grant
**grants_used
;
100 struct grant
**indirect_grants
;
101 struct scatterlist
*sg
;
103 enum blk_req_status status
;
105 #define NO_ASSOCIATED_ID ~0UL
107 * Id of the sibling if we ever need 2 requests when handling a
110 unsigned long associated_id
;
122 static inline struct blkif_req
*blkif_req(struct request
*rq
)
124 return blk_mq_rq_to_pdu(rq
);
127 static DEFINE_MUTEX(blkfront_mutex
);
128 static const struct block_device_operations xlvbd_block_fops
;
131 * Maximum number of segments in indirect requests, the actual value used by
132 * the frontend driver is the minimum of this value and the value provided
133 * by the backend driver.
136 static unsigned int xen_blkif_max_segments
= 32;
137 module_param_named(max_indirect_segments
, xen_blkif_max_segments
, uint
,
139 MODULE_PARM_DESC(max_indirect_segments
,
140 "Maximum amount of segments in indirect requests (default is 32)");
142 static unsigned int xen_blkif_max_queues
= 4;
143 module_param_named(max_queues
, xen_blkif_max_queues
, uint
, S_IRUGO
);
144 MODULE_PARM_DESC(max_queues
, "Maximum number of hardware queues/rings used per virtual disk");
147 * Maximum order of pages to be used for the shared ring between front and
148 * backend, 4KB page granularity is used.
150 static unsigned int xen_blkif_max_ring_order
;
151 module_param_named(max_ring_page_order
, xen_blkif_max_ring_order
, int, S_IRUGO
);
152 MODULE_PARM_DESC(max_ring_page_order
, "Maximum order of pages to be used for the shared ring");
154 #define BLK_RING_SIZE(info) \
155 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
157 #define BLK_MAX_RING_SIZE \
158 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
161 * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
162 * characters are enough. Define to 20 to keep consistent with backend.
164 #define RINGREF_NAME_LEN (20)
166 * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
168 #define QUEUE_NAME_LEN (17)
172 * Every blkfront device can associate with one or more blkfront_ring_info,
173 * depending on how many hardware queues/rings to be used.
175 struct blkfront_ring_info
{
176 /* Lock to protect data in every ring buffer. */
177 spinlock_t ring_lock
;
178 struct blkif_front_ring ring
;
179 unsigned int ring_ref
[XENBUS_MAX_RING_GRANTS
];
180 unsigned int evtchn
, irq
;
181 struct work_struct work
;
182 struct gnttab_free_callback callback
;
183 struct blk_shadow shadow
[BLK_MAX_RING_SIZE
];
184 struct list_head indirect_pages
;
185 struct list_head grants
;
186 unsigned int persistent_gnts_c
;
187 unsigned long shadow_free
;
188 struct blkfront_info
*dev_info
;
192 * We have one of these per vbd, whether ide, scsi or 'other'. They
193 * hang in private_data off the gendisk structure. We may end up
194 * putting all kinds of interesting stuff here :-)
199 struct xenbus_device
*xbdev
;
202 unsigned int physical_sector_size
;
205 enum blkif_state connected
;
206 /* Number of pages per ring buffer. */
207 unsigned int nr_ring_pages
;
208 struct request_queue
*rq
;
209 unsigned int feature_flush
:1;
210 unsigned int feature_fua
:1;
211 unsigned int feature_discard
:1;
212 unsigned int feature_secdiscard
:1;
213 unsigned int feature_persistent
:1;
214 unsigned int discard_granularity
;
215 unsigned int discard_alignment
;
216 /* Number of 4KB segments handled */
217 unsigned int max_indirect_segments
;
219 struct blk_mq_tag_set tag_set
;
220 struct blkfront_ring_info
*rinfo
;
221 unsigned int nr_rings
;
222 /* Save uncomplete reqs and bios for migration. */
223 struct list_head requests
;
224 struct bio_list bio_list
;
227 static unsigned int nr_minors
;
228 static unsigned long *minors
;
229 static DEFINE_SPINLOCK(minor_lock
);
231 #define GRANT_INVALID_REF 0
233 #define PARTS_PER_DISK 16
234 #define PARTS_PER_EXT_DISK 256
236 #define BLKIF_MAJOR(dev) ((dev)>>8)
237 #define BLKIF_MINOR(dev) ((dev) & 0xff)
240 #define EXTENDED (1<<EXT_SHIFT)
241 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
242 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
243 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
244 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
245 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
246 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
248 #define DEV_NAME "xvd" /* name in /dev */
251 * Grants are always the same size as a Xen page (i.e 4KB).
252 * A physical segment is always the same size as a Linux page.
253 * Number of grants per physical segment
255 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
257 #define GRANTS_PER_INDIRECT_FRAME \
258 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
260 #define PSEGS_PER_INDIRECT_FRAME \
261 (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
263 #define INDIRECT_GREFS(_grants) \
264 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
266 #define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
268 static int blkfront_setup_indirect(struct blkfront_ring_info
*rinfo
);
269 static void blkfront_gather_backend_features(struct blkfront_info
*info
);
271 static int get_id_from_freelist(struct blkfront_ring_info
*rinfo
)
273 unsigned long free
= rinfo
->shadow_free
;
275 BUG_ON(free
>= BLK_RING_SIZE(rinfo
->dev_info
));
276 rinfo
->shadow_free
= rinfo
->shadow
[free
].req
.u
.rw
.id
;
277 rinfo
->shadow
[free
].req
.u
.rw
.id
= 0x0fffffee; /* debug */
281 static int add_id_to_freelist(struct blkfront_ring_info
*rinfo
,
284 if (rinfo
->shadow
[id
].req
.u
.rw
.id
!= id
)
286 if (rinfo
->shadow
[id
].request
== NULL
)
288 rinfo
->shadow
[id
].req
.u
.rw
.id
= rinfo
->shadow_free
;
289 rinfo
->shadow
[id
].request
= NULL
;
290 rinfo
->shadow_free
= id
;
294 static int fill_grant_buffer(struct blkfront_ring_info
*rinfo
, int num
)
296 struct blkfront_info
*info
= rinfo
->dev_info
;
297 struct page
*granted_page
;
298 struct grant
*gnt_list_entry
, *n
;
302 gnt_list_entry
= kzalloc(sizeof(struct grant
), GFP_NOIO
);
306 if (info
->feature_persistent
) {
307 granted_page
= alloc_page(GFP_NOIO
);
309 kfree(gnt_list_entry
);
312 gnt_list_entry
->page
= granted_page
;
315 gnt_list_entry
->gref
= GRANT_INVALID_REF
;
316 list_add(&gnt_list_entry
->node
, &rinfo
->grants
);
323 list_for_each_entry_safe(gnt_list_entry
, n
,
324 &rinfo
->grants
, node
) {
325 list_del(&gnt_list_entry
->node
);
326 if (info
->feature_persistent
)
327 __free_page(gnt_list_entry
->page
);
328 kfree(gnt_list_entry
);
335 static struct grant
*get_free_grant(struct blkfront_ring_info
*rinfo
)
337 struct grant
*gnt_list_entry
;
339 BUG_ON(list_empty(&rinfo
->grants
));
340 gnt_list_entry
= list_first_entry(&rinfo
->grants
, struct grant
,
342 list_del(&gnt_list_entry
->node
);
344 if (gnt_list_entry
->gref
!= GRANT_INVALID_REF
)
345 rinfo
->persistent_gnts_c
--;
347 return gnt_list_entry
;
350 static inline void grant_foreign_access(const struct grant
*gnt_list_entry
,
351 const struct blkfront_info
*info
)
353 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry
->gref
,
354 info
->xbdev
->otherend_id
,
355 gnt_list_entry
->page
,
359 static struct grant
*get_grant(grant_ref_t
*gref_head
,
361 struct blkfront_ring_info
*rinfo
)
363 struct grant
*gnt_list_entry
= get_free_grant(rinfo
);
364 struct blkfront_info
*info
= rinfo
->dev_info
;
366 if (gnt_list_entry
->gref
!= GRANT_INVALID_REF
)
367 return gnt_list_entry
;
369 /* Assign a gref to this page */
370 gnt_list_entry
->gref
= gnttab_claim_grant_reference(gref_head
);
371 BUG_ON(gnt_list_entry
->gref
== -ENOSPC
);
372 if (info
->feature_persistent
)
373 grant_foreign_access(gnt_list_entry
, info
);
375 /* Grant access to the GFN passed by the caller */
376 gnttab_grant_foreign_access_ref(gnt_list_entry
->gref
,
377 info
->xbdev
->otherend_id
,
381 return gnt_list_entry
;
384 static struct grant
*get_indirect_grant(grant_ref_t
*gref_head
,
385 struct blkfront_ring_info
*rinfo
)
387 struct grant
*gnt_list_entry
= get_free_grant(rinfo
);
388 struct blkfront_info
*info
= rinfo
->dev_info
;
390 if (gnt_list_entry
->gref
!= GRANT_INVALID_REF
)
391 return gnt_list_entry
;
393 /* Assign a gref to this page */
394 gnt_list_entry
->gref
= gnttab_claim_grant_reference(gref_head
);
395 BUG_ON(gnt_list_entry
->gref
== -ENOSPC
);
396 if (!info
->feature_persistent
) {
397 struct page
*indirect_page
;
399 /* Fetch a pre-allocated page to use for indirect grefs */
400 BUG_ON(list_empty(&rinfo
->indirect_pages
));
401 indirect_page
= list_first_entry(&rinfo
->indirect_pages
,
403 list_del(&indirect_page
->lru
);
404 gnt_list_entry
->page
= indirect_page
;
406 grant_foreign_access(gnt_list_entry
, info
);
408 return gnt_list_entry
;
411 static const char *op_name(int op
)
413 static const char *const names
[] = {
414 [BLKIF_OP_READ
] = "read",
415 [BLKIF_OP_WRITE
] = "write",
416 [BLKIF_OP_WRITE_BARRIER
] = "barrier",
417 [BLKIF_OP_FLUSH_DISKCACHE
] = "flush",
418 [BLKIF_OP_DISCARD
] = "discard" };
420 if (op
< 0 || op
>= ARRAY_SIZE(names
))
428 static int xlbd_reserve_minors(unsigned int minor
, unsigned int nr
)
430 unsigned int end
= minor
+ nr
;
433 if (end
> nr_minors
) {
434 unsigned long *bitmap
, *old
;
436 bitmap
= kcalloc(BITS_TO_LONGS(end
), sizeof(*bitmap
),
441 spin_lock(&minor_lock
);
442 if (end
> nr_minors
) {
444 memcpy(bitmap
, minors
,
445 BITS_TO_LONGS(nr_minors
) * sizeof(*bitmap
));
447 nr_minors
= BITS_TO_LONGS(end
) * BITS_PER_LONG
;
450 spin_unlock(&minor_lock
);
454 spin_lock(&minor_lock
);
455 if (find_next_bit(minors
, end
, minor
) >= end
) {
456 bitmap_set(minors
, minor
, nr
);
460 spin_unlock(&minor_lock
);
465 static void xlbd_release_minors(unsigned int minor
, unsigned int nr
)
467 unsigned int end
= minor
+ nr
;
469 BUG_ON(end
> nr_minors
);
470 spin_lock(&minor_lock
);
471 bitmap_clear(minors
, minor
, nr
);
472 spin_unlock(&minor_lock
);
475 static void blkif_restart_queue_callback(void *arg
)
477 struct blkfront_ring_info
*rinfo
= (struct blkfront_ring_info
*)arg
;
478 schedule_work(&rinfo
->work
);
481 static int blkif_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
483 /* We don't have real geometry info, but let's at least return
484 values consistent with the size of the device */
485 sector_t nsect
= get_capacity(bd
->bd_disk
);
486 sector_t cylinders
= nsect
;
490 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
491 hg
->cylinders
= cylinders
;
492 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
493 hg
->cylinders
= 0xffff;
497 static int blkif_ioctl(struct block_device
*bdev
, fmode_t mode
,
498 unsigned command
, unsigned long argument
)
500 struct blkfront_info
*info
= bdev
->bd_disk
->private_data
;
503 dev_dbg(&info
->xbdev
->dev
, "command: 0x%x, argument: 0x%lx\n",
504 command
, (long)argument
);
507 case CDROMMULTISESSION
:
508 dev_dbg(&info
->xbdev
->dev
, "FIXME: support multisession CDs later\n");
509 for (i
= 0; i
< sizeof(struct cdrom_multisession
); i
++)
510 if (put_user(0, (char __user
*)(argument
+ i
)))
514 case CDROM_GET_CAPABILITY
: {
515 struct gendisk
*gd
= info
->gd
;
516 if (gd
->flags
& GENHD_FL_CD
)
522 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
524 return -EINVAL
; /* same return as native Linux */
530 static unsigned long blkif_ring_get_request(struct blkfront_ring_info
*rinfo
,
532 struct blkif_request
**ring_req
)
536 *ring_req
= RING_GET_REQUEST(&rinfo
->ring
, rinfo
->ring
.req_prod_pvt
);
537 rinfo
->ring
.req_prod_pvt
++;
539 id
= get_id_from_freelist(rinfo
);
540 rinfo
->shadow
[id
].request
= req
;
541 rinfo
->shadow
[id
].status
= REQ_WAITING
;
542 rinfo
->shadow
[id
].associated_id
= NO_ASSOCIATED_ID
;
544 (*ring_req
)->u
.rw
.id
= id
;
549 static int blkif_queue_discard_req(struct request
*req
, struct blkfront_ring_info
*rinfo
)
551 struct blkfront_info
*info
= rinfo
->dev_info
;
552 struct blkif_request
*ring_req
;
555 /* Fill out a communications ring structure. */
556 id
= blkif_ring_get_request(rinfo
, req
, &ring_req
);
558 ring_req
->operation
= BLKIF_OP_DISCARD
;
559 ring_req
->u
.discard
.nr_sectors
= blk_rq_sectors(req
);
560 ring_req
->u
.discard
.id
= id
;
561 ring_req
->u
.discard
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
562 if (req_op(req
) == REQ_OP_SECURE_ERASE
&& info
->feature_secdiscard
)
563 ring_req
->u
.discard
.flag
= BLKIF_DISCARD_SECURE
;
565 ring_req
->u
.discard
.flag
= 0;
567 /* Keep a private copy so we can reissue requests when recovering. */
568 rinfo
->shadow
[id
].req
= *ring_req
;
573 struct setup_rw_req
{
574 unsigned int grant_idx
;
575 struct blkif_request_segment
*segments
;
576 struct blkfront_ring_info
*rinfo
;
577 struct blkif_request
*ring_req
;
578 grant_ref_t gref_head
;
580 /* Only used when persistent grant is used and it's a read request */
582 unsigned int bvec_off
;
585 bool require_extra_req
;
586 struct blkif_request
*extra_ring_req
;
589 static void blkif_setup_rw_req_grant(unsigned long gfn
, unsigned int offset
,
590 unsigned int len
, void *data
)
592 struct setup_rw_req
*setup
= data
;
594 struct grant
*gnt_list_entry
;
595 unsigned int fsect
, lsect
;
596 /* Convenient aliases */
597 unsigned int grant_idx
= setup
->grant_idx
;
598 struct blkif_request
*ring_req
= setup
->ring_req
;
599 struct blkfront_ring_info
*rinfo
= setup
->rinfo
;
601 * We always use the shadow of the first request to store the list
602 * of grant associated to the block I/O request. This made the
603 * completion more easy to handle even if the block I/O request is
606 struct blk_shadow
*shadow
= &rinfo
->shadow
[setup
->id
];
608 if (unlikely(setup
->require_extra_req
&&
609 grant_idx
>= BLKIF_MAX_SEGMENTS_PER_REQUEST
)) {
611 * We are using the second request, setup grant_idx
612 * to be the index of the segment array.
614 grant_idx
-= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
615 ring_req
= setup
->extra_ring_req
;
618 if ((ring_req
->operation
== BLKIF_OP_INDIRECT
) &&
619 (grant_idx
% GRANTS_PER_INDIRECT_FRAME
== 0)) {
621 kunmap_atomic(setup
->segments
);
623 n
= grant_idx
/ GRANTS_PER_INDIRECT_FRAME
;
624 gnt_list_entry
= get_indirect_grant(&setup
->gref_head
, rinfo
);
625 shadow
->indirect_grants
[n
] = gnt_list_entry
;
626 setup
->segments
= kmap_atomic(gnt_list_entry
->page
);
627 ring_req
->u
.indirect
.indirect_grefs
[n
] = gnt_list_entry
->gref
;
630 gnt_list_entry
= get_grant(&setup
->gref_head
, gfn
, rinfo
);
631 ref
= gnt_list_entry
->gref
;
633 * All the grants are stored in the shadow of the first
634 * request. Therefore we have to use the global index.
636 shadow
->grants_used
[setup
->grant_idx
] = gnt_list_entry
;
638 if (setup
->need_copy
) {
641 shared_data
= kmap_atomic(gnt_list_entry
->page
);
643 * this does not wipe data stored outside the
644 * range sg->offset..sg->offset+sg->length.
645 * Therefore, blkback *could* see data from
646 * previous requests. This is OK as long as
647 * persistent grants are shared with just one
648 * domain. It may need refactoring if this
651 memcpy(shared_data
+ offset
,
652 setup
->bvec_data
+ setup
->bvec_off
,
655 kunmap_atomic(shared_data
);
656 setup
->bvec_off
+= len
;
660 lsect
= fsect
+ (len
>> 9) - 1;
661 if (ring_req
->operation
!= BLKIF_OP_INDIRECT
) {
662 ring_req
->u
.rw
.seg
[grant_idx
] =
663 (struct blkif_request_segment
) {
666 .last_sect
= lsect
};
668 setup
->segments
[grant_idx
% GRANTS_PER_INDIRECT_FRAME
] =
669 (struct blkif_request_segment
) {
672 .last_sect
= lsect
};
675 (setup
->grant_idx
)++;
678 static void blkif_setup_extra_req(struct blkif_request
*first
,
679 struct blkif_request
*second
)
681 uint16_t nr_segments
= first
->u
.rw
.nr_segments
;
684 * The second request is only present when the first request uses
685 * all its segments. It's always the continuity of the first one.
687 first
->u
.rw
.nr_segments
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
689 second
->u
.rw
.nr_segments
= nr_segments
- BLKIF_MAX_SEGMENTS_PER_REQUEST
;
690 second
->u
.rw
.sector_number
= first
->u
.rw
.sector_number
+
691 (BLKIF_MAX_SEGMENTS_PER_REQUEST
* XEN_PAGE_SIZE
) / 512;
693 second
->u
.rw
.handle
= first
->u
.rw
.handle
;
694 second
->operation
= first
->operation
;
697 static int blkif_queue_rw_req(struct request
*req
, struct blkfront_ring_info
*rinfo
)
699 struct blkfront_info
*info
= rinfo
->dev_info
;
700 struct blkif_request
*ring_req
, *extra_ring_req
= NULL
;
701 unsigned long id
, extra_id
= NO_ASSOCIATED_ID
;
702 bool require_extra_req
= false;
704 struct setup_rw_req setup
= {
708 .need_copy
= rq_data_dir(req
) && info
->feature_persistent
,
712 * Used to store if we are able to queue the request by just using
713 * existing persistent grants, or if we have to get new grants,
714 * as there are not sufficiently many free.
716 struct scatterlist
*sg
;
717 int num_sg
, max_grefs
, num_grant
;
719 max_grefs
= req
->nr_phys_segments
* GRANTS_PER_PSEG
;
720 if (max_grefs
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)
722 * If we are using indirect segments we need to account
723 * for the indirect grefs used in the request.
725 max_grefs
+= INDIRECT_GREFS(max_grefs
);
728 * We have to reserve 'max_grefs' grants because persistent
729 * grants are shared by all rings.
732 if (gnttab_alloc_grant_references(max_grefs
, &setup
.gref_head
) < 0) {
733 gnttab_request_free_callback(
735 blkif_restart_queue_callback
,
741 /* Fill out a communications ring structure. */
742 id
= blkif_ring_get_request(rinfo
, req
, &ring_req
);
744 num_sg
= blk_rq_map_sg(req
->q
, req
, rinfo
->shadow
[id
].sg
);
746 /* Calculate the number of grant used */
747 for_each_sg(rinfo
->shadow
[id
].sg
, sg
, num_sg
, i
)
748 num_grant
+= gnttab_count_grant(sg
->offset
, sg
->length
);
750 require_extra_req
= info
->max_indirect_segments
== 0 &&
751 num_grant
> BLKIF_MAX_SEGMENTS_PER_REQUEST
;
752 BUG_ON(!HAS_EXTRA_REQ
&& require_extra_req
);
754 rinfo
->shadow
[id
].num_sg
= num_sg
;
755 if (num_grant
> BLKIF_MAX_SEGMENTS_PER_REQUEST
&&
756 likely(!require_extra_req
)) {
758 * The indirect operation can only be a BLKIF_OP_READ or
761 BUG_ON(req_op(req
) == REQ_OP_FLUSH
|| req
->cmd_flags
& REQ_FUA
);
762 ring_req
->operation
= BLKIF_OP_INDIRECT
;
763 ring_req
->u
.indirect
.indirect_op
= rq_data_dir(req
) ?
764 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
765 ring_req
->u
.indirect
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
766 ring_req
->u
.indirect
.handle
= info
->handle
;
767 ring_req
->u
.indirect
.nr_segments
= num_grant
;
769 ring_req
->u
.rw
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
770 ring_req
->u
.rw
.handle
= info
->handle
;
771 ring_req
->operation
= rq_data_dir(req
) ?
772 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
773 if (req_op(req
) == REQ_OP_FLUSH
|| req
->cmd_flags
& REQ_FUA
) {
775 * Ideally we can do an unordered flush-to-disk.
776 * In case the backend onlysupports barriers, use that.
777 * A barrier request a superset of FUA, so we can
778 * implement it the same way. (It's also a FLUSH+FUA,
779 * since it is guaranteed ordered WRT previous writes.)
781 if (info
->feature_flush
&& info
->feature_fua
)
782 ring_req
->operation
=
783 BLKIF_OP_WRITE_BARRIER
;
784 else if (info
->feature_flush
)
785 ring_req
->operation
=
786 BLKIF_OP_FLUSH_DISKCACHE
;
788 ring_req
->operation
= 0;
790 ring_req
->u
.rw
.nr_segments
= num_grant
;
791 if (unlikely(require_extra_req
)) {
792 extra_id
= blkif_ring_get_request(rinfo
, req
,
795 * Only the first request contains the scatter-gather
798 rinfo
->shadow
[extra_id
].num_sg
= 0;
800 blkif_setup_extra_req(ring_req
, extra_ring_req
);
802 /* Link the 2 requests together */
803 rinfo
->shadow
[extra_id
].associated_id
= id
;
804 rinfo
->shadow
[id
].associated_id
= extra_id
;
808 setup
.ring_req
= ring_req
;
811 setup
.require_extra_req
= require_extra_req
;
812 if (unlikely(require_extra_req
))
813 setup
.extra_ring_req
= extra_ring_req
;
815 for_each_sg(rinfo
->shadow
[id
].sg
, sg
, num_sg
, i
) {
816 BUG_ON(sg
->offset
+ sg
->length
> PAGE_SIZE
);
818 if (setup
.need_copy
) {
819 setup
.bvec_off
= sg
->offset
;
820 setup
.bvec_data
= kmap_atomic(sg_page(sg
));
823 gnttab_foreach_grant_in_range(sg_page(sg
),
826 blkif_setup_rw_req_grant
,
830 kunmap_atomic(setup
.bvec_data
);
833 kunmap_atomic(setup
.segments
);
835 /* Keep a private copy so we can reissue requests when recovering. */
836 rinfo
->shadow
[id
].req
= *ring_req
;
837 if (unlikely(require_extra_req
))
838 rinfo
->shadow
[extra_id
].req
= *extra_ring_req
;
841 gnttab_free_grant_references(setup
.gref_head
);
847 * Generate a Xen blkfront IO request from a blk layer request. Reads
848 * and writes are handled as expected.
850 * @req: a request struct
852 static int blkif_queue_request(struct request
*req
, struct blkfront_ring_info
*rinfo
)
854 if (unlikely(rinfo
->dev_info
->connected
!= BLKIF_STATE_CONNECTED
))
857 if (unlikely(req_op(req
) == REQ_OP_DISCARD
||
858 req_op(req
) == REQ_OP_SECURE_ERASE
))
859 return blkif_queue_discard_req(req
, rinfo
);
861 return blkif_queue_rw_req(req
, rinfo
);
864 static inline void flush_requests(struct blkfront_ring_info
*rinfo
)
868 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo
->ring
, notify
);
871 notify_remote_via_irq(rinfo
->irq
);
874 static inline bool blkif_request_flush_invalid(struct request
*req
,
875 struct blkfront_info
*info
)
877 return (blk_rq_is_passthrough(req
) ||
878 ((req_op(req
) == REQ_OP_FLUSH
) &&
879 !info
->feature_flush
) ||
880 ((req
->cmd_flags
& REQ_FUA
) &&
881 !info
->feature_fua
));
884 static int blkif_queue_rq(struct blk_mq_hw_ctx
*hctx
,
885 const struct blk_mq_queue_data
*qd
)
888 int qid
= hctx
->queue_num
;
889 struct blkfront_info
*info
= hctx
->queue
->queuedata
;
890 struct blkfront_ring_info
*rinfo
= NULL
;
892 BUG_ON(info
->nr_rings
<= qid
);
893 rinfo
= &info
->rinfo
[qid
];
894 blk_mq_start_request(qd
->rq
);
895 spin_lock_irqsave(&rinfo
->ring_lock
, flags
);
896 if (RING_FULL(&rinfo
->ring
))
899 if (blkif_request_flush_invalid(qd
->rq
, rinfo
->dev_info
))
902 if (blkif_queue_request(qd
->rq
, rinfo
))
905 flush_requests(rinfo
);
906 spin_unlock_irqrestore(&rinfo
->ring_lock
, flags
);
907 return BLK_MQ_RQ_QUEUE_OK
;
910 spin_unlock_irqrestore(&rinfo
->ring_lock
, flags
);
911 return BLK_MQ_RQ_QUEUE_ERROR
;
914 spin_unlock_irqrestore(&rinfo
->ring_lock
, flags
);
915 blk_mq_stop_hw_queue(hctx
);
916 return BLK_MQ_RQ_QUEUE_BUSY
;
919 static void blkif_complete_rq(struct request
*rq
)
921 blk_mq_end_request(rq
, blkif_req(rq
)->error
);
924 static const struct blk_mq_ops blkfront_mq_ops
= {
925 .queue_rq
= blkif_queue_rq
,
926 .complete
= blkif_complete_rq
,
929 static void blkif_set_queue_limits(struct blkfront_info
*info
)
931 struct request_queue
*rq
= info
->rq
;
932 struct gendisk
*gd
= info
->gd
;
933 unsigned int segments
= info
->max_indirect_segments
? :
934 BLKIF_MAX_SEGMENTS_PER_REQUEST
;
936 queue_flag_set_unlocked(QUEUE_FLAG_VIRT
, rq
);
938 if (info
->feature_discard
) {
939 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, rq
);
940 blk_queue_max_discard_sectors(rq
, get_capacity(gd
));
941 rq
->limits
.discard_granularity
= info
->discard_granularity
;
942 rq
->limits
.discard_alignment
= info
->discard_alignment
;
943 if (info
->feature_secdiscard
)
944 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE
, rq
);
947 /* Hard sector size and max sectors impersonate the equiv. hardware. */
948 blk_queue_logical_block_size(rq
, info
->sector_size
);
949 blk_queue_physical_block_size(rq
, info
->physical_sector_size
);
950 blk_queue_max_hw_sectors(rq
, (segments
* XEN_PAGE_SIZE
) / 512);
952 /* Each segment in a request is up to an aligned page in size. */
953 blk_queue_segment_boundary(rq
, PAGE_SIZE
- 1);
954 blk_queue_max_segment_size(rq
, PAGE_SIZE
);
956 /* Ensure a merged request will fit in a single I/O ring slot. */
957 blk_queue_max_segments(rq
, segments
/ GRANTS_PER_PSEG
);
959 /* Make sure buffer addresses are sector-aligned. */
960 blk_queue_dma_alignment(rq
, 511);
962 /* Make sure we don't use bounce buffers. */
963 blk_queue_bounce_limit(rq
, BLK_BOUNCE_ANY
);
966 static int xlvbd_init_blk_queue(struct gendisk
*gd
, u16 sector_size
,
967 unsigned int physical_sector_size
)
969 struct request_queue
*rq
;
970 struct blkfront_info
*info
= gd
->private_data
;
972 memset(&info
->tag_set
, 0, sizeof(info
->tag_set
));
973 info
->tag_set
.ops
= &blkfront_mq_ops
;
974 info
->tag_set
.nr_hw_queues
= info
->nr_rings
;
975 if (HAS_EXTRA_REQ
&& info
->max_indirect_segments
== 0) {
977 * When indirect descriptior is not supported, the I/O request
978 * will be split between multiple request in the ring.
979 * To avoid problems when sending the request, divide by
980 * 2 the depth of the queue.
982 info
->tag_set
.queue_depth
= BLK_RING_SIZE(info
) / 2;
984 info
->tag_set
.queue_depth
= BLK_RING_SIZE(info
);
985 info
->tag_set
.numa_node
= NUMA_NO_NODE
;
986 info
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
987 info
->tag_set
.cmd_size
= sizeof(struct blkif_req
);
988 info
->tag_set
.driver_data
= info
;
990 if (blk_mq_alloc_tag_set(&info
->tag_set
))
992 rq
= blk_mq_init_queue(&info
->tag_set
);
994 blk_mq_free_tag_set(&info
->tag_set
);
998 rq
->queuedata
= info
;
999 info
->rq
= gd
->queue
= rq
;
1001 info
->sector_size
= sector_size
;
1002 info
->physical_sector_size
= physical_sector_size
;
1003 blkif_set_queue_limits(info
);
1008 static const char *flush_info(struct blkfront_info
*info
)
1010 if (info
->feature_flush
&& info
->feature_fua
)
1011 return "barrier: enabled;";
1012 else if (info
->feature_flush
)
1013 return "flush diskcache: enabled;";
1015 return "barrier or flush: disabled;";
1018 static void xlvbd_flush(struct blkfront_info
*info
)
1020 blk_queue_write_cache(info
->rq
, info
->feature_flush
? true : false,
1021 info
->feature_fua
? true : false);
1022 pr_info("blkfront: %s: %s %s %s %s %s\n",
1023 info
->gd
->disk_name
, flush_info(info
),
1024 "persistent grants:", info
->feature_persistent
?
1025 "enabled;" : "disabled;", "indirect descriptors:",
1026 info
->max_indirect_segments
? "enabled;" : "disabled;");
1029 static int xen_translate_vdev(int vdevice
, int *minor
, unsigned int *offset
)
1032 major
= BLKIF_MAJOR(vdevice
);
1033 *minor
= BLKIF_MINOR(vdevice
);
1035 case XEN_IDE0_MAJOR
:
1036 *offset
= (*minor
/ 64) + EMULATED_HD_DISK_NAME_OFFSET
;
1037 *minor
= ((*minor
/ 64) * PARTS_PER_DISK
) +
1038 EMULATED_HD_DISK_MINOR_OFFSET
;
1040 case XEN_IDE1_MAJOR
:
1041 *offset
= (*minor
/ 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET
;
1042 *minor
= (((*minor
/ 64) + 2) * PARTS_PER_DISK
) +
1043 EMULATED_HD_DISK_MINOR_OFFSET
;
1045 case XEN_SCSI_DISK0_MAJOR
:
1046 *offset
= (*minor
/ PARTS_PER_DISK
) + EMULATED_SD_DISK_NAME_OFFSET
;
1047 *minor
= *minor
+ EMULATED_SD_DISK_MINOR_OFFSET
;
1049 case XEN_SCSI_DISK1_MAJOR
:
1050 case XEN_SCSI_DISK2_MAJOR
:
1051 case XEN_SCSI_DISK3_MAJOR
:
1052 case XEN_SCSI_DISK4_MAJOR
:
1053 case XEN_SCSI_DISK5_MAJOR
:
1054 case XEN_SCSI_DISK6_MAJOR
:
1055 case XEN_SCSI_DISK7_MAJOR
:
1056 *offset
= (*minor
/ PARTS_PER_DISK
) +
1057 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16) +
1058 EMULATED_SD_DISK_NAME_OFFSET
;
1060 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16 * PARTS_PER_DISK
) +
1061 EMULATED_SD_DISK_MINOR_OFFSET
;
1063 case XEN_SCSI_DISK8_MAJOR
:
1064 case XEN_SCSI_DISK9_MAJOR
:
1065 case XEN_SCSI_DISK10_MAJOR
:
1066 case XEN_SCSI_DISK11_MAJOR
:
1067 case XEN_SCSI_DISK12_MAJOR
:
1068 case XEN_SCSI_DISK13_MAJOR
:
1069 case XEN_SCSI_DISK14_MAJOR
:
1070 case XEN_SCSI_DISK15_MAJOR
:
1071 *offset
= (*minor
/ PARTS_PER_DISK
) +
1072 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16) +
1073 EMULATED_SD_DISK_NAME_OFFSET
;
1075 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16 * PARTS_PER_DISK
) +
1076 EMULATED_SD_DISK_MINOR_OFFSET
;
1079 *offset
= *minor
/ PARTS_PER_DISK
;
1082 printk(KERN_WARNING
"blkfront: your disk configuration is "
1083 "incorrect, please use an xvd device instead\n");
1089 static char *encode_disk_name(char *ptr
, unsigned int n
)
1092 ptr
= encode_disk_name(ptr
, n
/ 26 - 1);
1093 *ptr
= 'a' + n
% 26;
1097 static int xlvbd_alloc_gendisk(blkif_sector_t capacity
,
1098 struct blkfront_info
*info
,
1099 u16 vdisk_info
, u16 sector_size
,
1100 unsigned int physical_sector_size
)
1105 unsigned int offset
;
1110 BUG_ON(info
->gd
!= NULL
);
1111 BUG_ON(info
->rq
!= NULL
);
1113 if ((info
->vdevice
>>EXT_SHIFT
) > 1) {
1114 /* this is above the extended range; something is wrong */
1115 printk(KERN_WARNING
"blkfront: vdevice 0x%x is above the extended range; ignoring\n", info
->vdevice
);
1119 if (!VDEV_IS_EXTENDED(info
->vdevice
)) {
1120 err
= xen_translate_vdev(info
->vdevice
, &minor
, &offset
);
1123 nr_parts
= PARTS_PER_DISK
;
1125 minor
= BLKIF_MINOR_EXT(info
->vdevice
);
1126 nr_parts
= PARTS_PER_EXT_DISK
;
1127 offset
= minor
/ nr_parts
;
1128 if (xen_hvm_domain() && offset
< EMULATED_HD_DISK_NAME_OFFSET
+ 4)
1129 printk(KERN_WARNING
"blkfront: vdevice 0x%x might conflict with "
1130 "emulated IDE disks,\n\t choose an xvd device name"
1131 "from xvde on\n", info
->vdevice
);
1133 if (minor
>> MINORBITS
) {
1134 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1135 info
->vdevice
, minor
);
1139 if ((minor
% nr_parts
) == 0)
1140 nr_minors
= nr_parts
;
1142 err
= xlbd_reserve_minors(minor
, nr_minors
);
1147 gd
= alloc_disk(nr_minors
);
1151 strcpy(gd
->disk_name
, DEV_NAME
);
1152 ptr
= encode_disk_name(gd
->disk_name
+ sizeof(DEV_NAME
) - 1, offset
);
1153 BUG_ON(ptr
>= gd
->disk_name
+ DISK_NAME_LEN
);
1157 snprintf(ptr
, gd
->disk_name
+ DISK_NAME_LEN
- ptr
,
1158 "%d", minor
& (nr_parts
- 1));
1160 gd
->major
= XENVBD_MAJOR
;
1161 gd
->first_minor
= minor
;
1162 gd
->fops
= &xlvbd_block_fops
;
1163 gd
->private_data
= info
;
1164 set_capacity(gd
, capacity
);
1166 if (xlvbd_init_blk_queue(gd
, sector_size
, physical_sector_size
)) {
1173 if (vdisk_info
& VDISK_READONLY
)
1176 if (vdisk_info
& VDISK_REMOVABLE
)
1177 gd
->flags
|= GENHD_FL_REMOVABLE
;
1179 if (vdisk_info
& VDISK_CDROM
)
1180 gd
->flags
|= GENHD_FL_CD
;
1185 xlbd_release_minors(minor
, nr_minors
);
1190 static void xlvbd_release_gendisk(struct blkfront_info
*info
)
1192 unsigned int minor
, nr_minors
, i
;
1194 if (info
->rq
== NULL
)
1197 /* No more blkif_request(). */
1198 blk_mq_stop_hw_queues(info
->rq
);
1200 for (i
= 0; i
< info
->nr_rings
; i
++) {
1201 struct blkfront_ring_info
*rinfo
= &info
->rinfo
[i
];
1203 /* No more gnttab callback work. */
1204 gnttab_cancel_free_callback(&rinfo
->callback
);
1206 /* Flush gnttab callback work. Must be done with no locks held. */
1207 flush_work(&rinfo
->work
);
1210 del_gendisk(info
->gd
);
1212 minor
= info
->gd
->first_minor
;
1213 nr_minors
= info
->gd
->minors
;
1214 xlbd_release_minors(minor
, nr_minors
);
1216 blk_cleanup_queue(info
->rq
);
1217 blk_mq_free_tag_set(&info
->tag_set
);
1224 /* Already hold rinfo->ring_lock. */
1225 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info
*rinfo
)
1227 if (!RING_FULL(&rinfo
->ring
))
1228 blk_mq_start_stopped_hw_queues(rinfo
->dev_info
->rq
, true);
1231 static void kick_pending_request_queues(struct blkfront_ring_info
*rinfo
)
1233 unsigned long flags
;
1235 spin_lock_irqsave(&rinfo
->ring_lock
, flags
);
1236 kick_pending_request_queues_locked(rinfo
);
1237 spin_unlock_irqrestore(&rinfo
->ring_lock
, flags
);
1240 static void blkif_restart_queue(struct work_struct
*work
)
1242 struct blkfront_ring_info
*rinfo
= container_of(work
, struct blkfront_ring_info
, work
);
1244 if (rinfo
->dev_info
->connected
== BLKIF_STATE_CONNECTED
)
1245 kick_pending_request_queues(rinfo
);
1248 static void blkif_free_ring(struct blkfront_ring_info
*rinfo
)
1250 struct grant
*persistent_gnt
, *n
;
1251 struct blkfront_info
*info
= rinfo
->dev_info
;
1255 * Remove indirect pages, this only happens when using indirect
1256 * descriptors but not persistent grants
1258 if (!list_empty(&rinfo
->indirect_pages
)) {
1259 struct page
*indirect_page
, *n
;
1261 BUG_ON(info
->feature_persistent
);
1262 list_for_each_entry_safe(indirect_page
, n
, &rinfo
->indirect_pages
, lru
) {
1263 list_del(&indirect_page
->lru
);
1264 __free_page(indirect_page
);
1268 /* Remove all persistent grants. */
1269 if (!list_empty(&rinfo
->grants
)) {
1270 list_for_each_entry_safe(persistent_gnt
, n
,
1271 &rinfo
->grants
, node
) {
1272 list_del(&persistent_gnt
->node
);
1273 if (persistent_gnt
->gref
!= GRANT_INVALID_REF
) {
1274 gnttab_end_foreign_access(persistent_gnt
->gref
,
1276 rinfo
->persistent_gnts_c
--;
1278 if (info
->feature_persistent
)
1279 __free_page(persistent_gnt
->page
);
1280 kfree(persistent_gnt
);
1283 BUG_ON(rinfo
->persistent_gnts_c
!= 0);
1285 for (i
= 0; i
< BLK_RING_SIZE(info
); i
++) {
1287 * Clear persistent grants present in requests already
1288 * on the shared ring
1290 if (!rinfo
->shadow
[i
].request
)
1293 segs
= rinfo
->shadow
[i
].req
.operation
== BLKIF_OP_INDIRECT
?
1294 rinfo
->shadow
[i
].req
.u
.indirect
.nr_segments
:
1295 rinfo
->shadow
[i
].req
.u
.rw
.nr_segments
;
1296 for (j
= 0; j
< segs
; j
++) {
1297 persistent_gnt
= rinfo
->shadow
[i
].grants_used
[j
];
1298 gnttab_end_foreign_access(persistent_gnt
->gref
, 0, 0UL);
1299 if (info
->feature_persistent
)
1300 __free_page(persistent_gnt
->page
);
1301 kfree(persistent_gnt
);
1304 if (rinfo
->shadow
[i
].req
.operation
!= BLKIF_OP_INDIRECT
)
1306 * If this is not an indirect operation don't try to
1307 * free indirect segments
1311 for (j
= 0; j
< INDIRECT_GREFS(segs
); j
++) {
1312 persistent_gnt
= rinfo
->shadow
[i
].indirect_grants
[j
];
1313 gnttab_end_foreign_access(persistent_gnt
->gref
, 0, 0UL);
1314 __free_page(persistent_gnt
->page
);
1315 kfree(persistent_gnt
);
1319 kfree(rinfo
->shadow
[i
].grants_used
);
1320 rinfo
->shadow
[i
].grants_used
= NULL
;
1321 kfree(rinfo
->shadow
[i
].indirect_grants
);
1322 rinfo
->shadow
[i
].indirect_grants
= NULL
;
1323 kfree(rinfo
->shadow
[i
].sg
);
1324 rinfo
->shadow
[i
].sg
= NULL
;
1327 /* No more gnttab callback work. */
1328 gnttab_cancel_free_callback(&rinfo
->callback
);
1330 /* Flush gnttab callback work. Must be done with no locks held. */
1331 flush_work(&rinfo
->work
);
1333 /* Free resources associated with old device channel. */
1334 for (i
= 0; i
< info
->nr_ring_pages
; i
++) {
1335 if (rinfo
->ring_ref
[i
] != GRANT_INVALID_REF
) {
1336 gnttab_end_foreign_access(rinfo
->ring_ref
[i
], 0, 0);
1337 rinfo
->ring_ref
[i
] = GRANT_INVALID_REF
;
1340 free_pages((unsigned long)rinfo
->ring
.sring
, get_order(info
->nr_ring_pages
* XEN_PAGE_SIZE
));
1341 rinfo
->ring
.sring
= NULL
;
1344 unbind_from_irqhandler(rinfo
->irq
, rinfo
);
1345 rinfo
->evtchn
= rinfo
->irq
= 0;
1348 static void blkif_free(struct blkfront_info
*info
, int suspend
)
1352 /* Prevent new requests being issued until we fix things up. */
1353 info
->connected
= suspend
?
1354 BLKIF_STATE_SUSPENDED
: BLKIF_STATE_DISCONNECTED
;
1355 /* No more blkif_request(). */
1357 blk_mq_stop_hw_queues(info
->rq
);
1359 for (i
= 0; i
< info
->nr_rings
; i
++)
1360 blkif_free_ring(&info
->rinfo
[i
]);
1367 struct copy_from_grant
{
1368 const struct blk_shadow
*s
;
1369 unsigned int grant_idx
;
1370 unsigned int bvec_offset
;
1374 static void blkif_copy_from_grant(unsigned long gfn
, unsigned int offset
,
1375 unsigned int len
, void *data
)
1377 struct copy_from_grant
*info
= data
;
1379 /* Convenient aliases */
1380 const struct blk_shadow
*s
= info
->s
;
1382 shared_data
= kmap_atomic(s
->grants_used
[info
->grant_idx
]->page
);
1384 memcpy(info
->bvec_data
+ info
->bvec_offset
,
1385 shared_data
+ offset
, len
);
1387 info
->bvec_offset
+= len
;
1390 kunmap_atomic(shared_data
);
1393 static enum blk_req_status
blkif_rsp_to_req_status(int rsp
)
1397 case BLKIF_RSP_OKAY
:
1399 case BLKIF_RSP_EOPNOTSUPP
:
1400 return REQ_EOPNOTSUPP
;
1401 case BLKIF_RSP_ERROR
:
1409 * Get the final status of the block request based on two ring response
1411 static int blkif_get_final_status(enum blk_req_status s1
,
1412 enum blk_req_status s2
)
1414 BUG_ON(s1
== REQ_WAITING
);
1415 BUG_ON(s2
== REQ_WAITING
);
1417 if (s1
== REQ_ERROR
|| s2
== REQ_ERROR
)
1418 return BLKIF_RSP_ERROR
;
1419 else if (s1
== REQ_EOPNOTSUPP
|| s2
== REQ_EOPNOTSUPP
)
1420 return BLKIF_RSP_EOPNOTSUPP
;
1421 return BLKIF_RSP_OKAY
;
1424 static bool blkif_completion(unsigned long *id
,
1425 struct blkfront_ring_info
*rinfo
,
1426 struct blkif_response
*bret
)
1429 struct scatterlist
*sg
;
1430 int num_sg
, num_grant
;
1431 struct blkfront_info
*info
= rinfo
->dev_info
;
1432 struct blk_shadow
*s
= &rinfo
->shadow
[*id
];
1433 struct copy_from_grant data
= {
1437 num_grant
= s
->req
.operation
== BLKIF_OP_INDIRECT
?
1438 s
->req
.u
.indirect
.nr_segments
: s
->req
.u
.rw
.nr_segments
;
1440 /* The I/O request may be split in two. */
1441 if (unlikely(s
->associated_id
!= NO_ASSOCIATED_ID
)) {
1442 struct blk_shadow
*s2
= &rinfo
->shadow
[s
->associated_id
];
1444 /* Keep the status of the current response in shadow. */
1445 s
->status
= blkif_rsp_to_req_status(bret
->status
);
1447 /* Wait the second response if not yet here. */
1448 if (s2
->status
== REQ_WAITING
)
1451 bret
->status
= blkif_get_final_status(s
->status
,
1455 * All the grants is stored in the first shadow in order
1456 * to make the completion code simpler.
1458 num_grant
+= s2
->req
.u
.rw
.nr_segments
;
1461 * The two responses may not come in order. Only the
1462 * first request will store the scatter-gather list.
1464 if (s2
->num_sg
!= 0) {
1465 /* Update "id" with the ID of the first response. */
1466 *id
= s
->associated_id
;
1471 * We don't need anymore the second request, so recycling
1474 if (add_id_to_freelist(rinfo
, s
->associated_id
))
1475 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1476 info
->gd
->disk_name
, s
->associated_id
);
1482 if (bret
->operation
== BLKIF_OP_READ
&& info
->feature_persistent
) {
1483 for_each_sg(s
->sg
, sg
, num_sg
, i
) {
1484 BUG_ON(sg
->offset
+ sg
->length
> PAGE_SIZE
);
1486 data
.bvec_offset
= sg
->offset
;
1487 data
.bvec_data
= kmap_atomic(sg_page(sg
));
1489 gnttab_foreach_grant_in_range(sg_page(sg
),
1492 blkif_copy_from_grant
,
1495 kunmap_atomic(data
.bvec_data
);
1498 /* Add the persistent grant into the list of free grants */
1499 for (i
= 0; i
< num_grant
; i
++) {
1500 if (gnttab_query_foreign_access(s
->grants_used
[i
]->gref
)) {
1502 * If the grant is still mapped by the backend (the
1503 * backend has chosen to make this grant persistent)
1504 * we add it at the head of the list, so it will be
1507 if (!info
->feature_persistent
)
1508 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1509 s
->grants_used
[i
]->gref
);
1510 list_add(&s
->grants_used
[i
]->node
, &rinfo
->grants
);
1511 rinfo
->persistent_gnts_c
++;
1514 * If the grant is not mapped by the backend we end the
1515 * foreign access and add it to the tail of the list,
1516 * so it will not be picked again unless we run out of
1517 * persistent grants.
1519 gnttab_end_foreign_access(s
->grants_used
[i
]->gref
, 0, 0UL);
1520 s
->grants_used
[i
]->gref
= GRANT_INVALID_REF
;
1521 list_add_tail(&s
->grants_used
[i
]->node
, &rinfo
->grants
);
1524 if (s
->req
.operation
== BLKIF_OP_INDIRECT
) {
1525 for (i
= 0; i
< INDIRECT_GREFS(num_grant
); i
++) {
1526 if (gnttab_query_foreign_access(s
->indirect_grants
[i
]->gref
)) {
1527 if (!info
->feature_persistent
)
1528 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1529 s
->indirect_grants
[i
]->gref
);
1530 list_add(&s
->indirect_grants
[i
]->node
, &rinfo
->grants
);
1531 rinfo
->persistent_gnts_c
++;
1533 struct page
*indirect_page
;
1535 gnttab_end_foreign_access(s
->indirect_grants
[i
]->gref
, 0, 0UL);
1537 * Add the used indirect page back to the list of
1538 * available pages for indirect grefs.
1540 if (!info
->feature_persistent
) {
1541 indirect_page
= s
->indirect_grants
[i
]->page
;
1542 list_add(&indirect_page
->lru
, &rinfo
->indirect_pages
);
1544 s
->indirect_grants
[i
]->gref
= GRANT_INVALID_REF
;
1545 list_add_tail(&s
->indirect_grants
[i
]->node
, &rinfo
->grants
);
1553 static irqreturn_t
blkif_interrupt(int irq
, void *dev_id
)
1555 struct request
*req
;
1556 struct blkif_response
*bret
;
1558 unsigned long flags
;
1559 struct blkfront_ring_info
*rinfo
= (struct blkfront_ring_info
*)dev_id
;
1560 struct blkfront_info
*info
= rinfo
->dev_info
;
1562 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
))
1565 spin_lock_irqsave(&rinfo
->ring_lock
, flags
);
1567 rp
= rinfo
->ring
.sring
->rsp_prod
;
1568 rmb(); /* Ensure we see queued responses up to 'rp'. */
1570 for (i
= rinfo
->ring
.rsp_cons
; i
!= rp
; i
++) {
1573 bret
= RING_GET_RESPONSE(&rinfo
->ring
, i
);
1576 * The backend has messed up and given us an id that we would
1577 * never have given to it (we stamp it up to BLK_RING_SIZE -
1578 * look in get_id_from_freelist.
1580 if (id
>= BLK_RING_SIZE(info
)) {
1581 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1582 info
->gd
->disk_name
, op_name(bret
->operation
), id
);
1583 /* We can't safely get the 'struct request' as
1584 * the id is busted. */
1587 req
= rinfo
->shadow
[id
].request
;
1589 if (bret
->operation
!= BLKIF_OP_DISCARD
) {
1591 * We may need to wait for an extra response if the
1592 * I/O request is split in 2
1594 if (!blkif_completion(&id
, rinfo
, bret
))
1598 if (add_id_to_freelist(rinfo
, id
)) {
1599 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1600 info
->gd
->disk_name
, op_name(bret
->operation
), id
);
1604 blkif_req(req
)->error
= (bret
->status
== BLKIF_RSP_OKAY
) ? 0 : -EIO
;
1605 switch (bret
->operation
) {
1606 case BLKIF_OP_DISCARD
:
1607 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
1608 struct request_queue
*rq
= info
->rq
;
1609 printk(KERN_WARNING
"blkfront: %s: %s op failed\n",
1610 info
->gd
->disk_name
, op_name(bret
->operation
));
1611 blkif_req(req
)->error
= -EOPNOTSUPP
;
1612 info
->feature_discard
= 0;
1613 info
->feature_secdiscard
= 0;
1614 queue_flag_clear(QUEUE_FLAG_DISCARD
, rq
);
1615 queue_flag_clear(QUEUE_FLAG_SECERASE
, rq
);
1618 case BLKIF_OP_FLUSH_DISKCACHE
:
1619 case BLKIF_OP_WRITE_BARRIER
:
1620 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
1621 printk(KERN_WARNING
"blkfront: %s: %s op failed\n",
1622 info
->gd
->disk_name
, op_name(bret
->operation
));
1623 blkif_req(req
)->error
= -EOPNOTSUPP
;
1625 if (unlikely(bret
->status
== BLKIF_RSP_ERROR
&&
1626 rinfo
->shadow
[id
].req
.u
.rw
.nr_segments
== 0)) {
1627 printk(KERN_WARNING
"blkfront: %s: empty %s op failed\n",
1628 info
->gd
->disk_name
, op_name(bret
->operation
));
1629 blkif_req(req
)->error
= -EOPNOTSUPP
;
1631 if (unlikely(blkif_req(req
)->error
)) {
1632 if (blkif_req(req
)->error
== -EOPNOTSUPP
)
1633 blkif_req(req
)->error
= 0;
1634 info
->feature_fua
= 0;
1635 info
->feature_flush
= 0;
1640 case BLKIF_OP_WRITE
:
1641 if (unlikely(bret
->status
!= BLKIF_RSP_OKAY
))
1642 dev_dbg(&info
->xbdev
->dev
, "Bad return from blkdev data "
1643 "request: %x\n", bret
->status
);
1650 blk_mq_complete_request(req
);
1653 rinfo
->ring
.rsp_cons
= i
;
1655 if (i
!= rinfo
->ring
.req_prod_pvt
) {
1657 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo
->ring
, more_to_do
);
1661 rinfo
->ring
.sring
->rsp_event
= i
+ 1;
1663 kick_pending_request_queues_locked(rinfo
);
1665 spin_unlock_irqrestore(&rinfo
->ring_lock
, flags
);
1671 static int setup_blkring(struct xenbus_device
*dev
,
1672 struct blkfront_ring_info
*rinfo
)
1674 struct blkif_sring
*sring
;
1676 struct blkfront_info
*info
= rinfo
->dev_info
;
1677 unsigned long ring_size
= info
->nr_ring_pages
* XEN_PAGE_SIZE
;
1678 grant_ref_t gref
[XENBUS_MAX_RING_GRANTS
];
1680 for (i
= 0; i
< info
->nr_ring_pages
; i
++)
1681 rinfo
->ring_ref
[i
] = GRANT_INVALID_REF
;
1683 sring
= (struct blkif_sring
*)__get_free_pages(GFP_NOIO
| __GFP_HIGH
,
1684 get_order(ring_size
));
1686 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating shared ring");
1689 SHARED_RING_INIT(sring
);
1690 FRONT_RING_INIT(&rinfo
->ring
, sring
, ring_size
);
1692 err
= xenbus_grant_ring(dev
, rinfo
->ring
.sring
, info
->nr_ring_pages
, gref
);
1694 free_pages((unsigned long)sring
, get_order(ring_size
));
1695 rinfo
->ring
.sring
= NULL
;
1698 for (i
= 0; i
< info
->nr_ring_pages
; i
++)
1699 rinfo
->ring_ref
[i
] = gref
[i
];
1701 err
= xenbus_alloc_evtchn(dev
, &rinfo
->evtchn
);
1705 err
= bind_evtchn_to_irqhandler(rinfo
->evtchn
, blkif_interrupt
, 0,
1708 xenbus_dev_fatal(dev
, err
,
1709 "bind_evtchn_to_irqhandler failed");
1716 blkif_free(info
, 0);
1721 * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1722 * ring buffer may have multi pages depending on ->nr_ring_pages.
1724 static int write_per_ring_nodes(struct xenbus_transaction xbt
,
1725 struct blkfront_ring_info
*rinfo
, const char *dir
)
1729 const char *message
= NULL
;
1730 struct blkfront_info
*info
= rinfo
->dev_info
;
1732 if (info
->nr_ring_pages
== 1) {
1733 err
= xenbus_printf(xbt
, dir
, "ring-ref", "%u", rinfo
->ring_ref
[0]);
1735 message
= "writing ring-ref";
1736 goto abort_transaction
;
1739 for (i
= 0; i
< info
->nr_ring_pages
; i
++) {
1740 char ring_ref_name
[RINGREF_NAME_LEN
];
1742 snprintf(ring_ref_name
, RINGREF_NAME_LEN
, "ring-ref%u", i
);
1743 err
= xenbus_printf(xbt
, dir
, ring_ref_name
,
1744 "%u", rinfo
->ring_ref
[i
]);
1746 message
= "writing ring-ref";
1747 goto abort_transaction
;
1752 err
= xenbus_printf(xbt
, dir
, "event-channel", "%u", rinfo
->evtchn
);
1754 message
= "writing event-channel";
1755 goto abort_transaction
;
1761 xenbus_transaction_end(xbt
, 1);
1763 xenbus_dev_fatal(info
->xbdev
, err
, "%s", message
);
1768 /* Common code used when first setting up, and when resuming. */
1769 static int talk_to_blkback(struct xenbus_device
*dev
,
1770 struct blkfront_info
*info
)
1772 const char *message
= NULL
;
1773 struct xenbus_transaction xbt
;
1775 unsigned int i
, max_page_order
;
1776 unsigned int ring_page_order
;
1778 max_page_order
= xenbus_read_unsigned(info
->xbdev
->otherend
,
1779 "max-ring-page-order", 0);
1780 ring_page_order
= min(xen_blkif_max_ring_order
, max_page_order
);
1781 info
->nr_ring_pages
= 1 << ring_page_order
;
1783 for (i
= 0; i
< info
->nr_rings
; i
++) {
1784 struct blkfront_ring_info
*rinfo
= &info
->rinfo
[i
];
1786 /* Create shared ring, alloc event channel. */
1787 err
= setup_blkring(dev
, rinfo
);
1789 goto destroy_blkring
;
1793 err
= xenbus_transaction_start(&xbt
);
1795 xenbus_dev_fatal(dev
, err
, "starting transaction");
1796 goto destroy_blkring
;
1799 if (info
->nr_ring_pages
> 1) {
1800 err
= xenbus_printf(xbt
, dev
->nodename
, "ring-page-order", "%u",
1803 message
= "writing ring-page-order";
1804 goto abort_transaction
;
1808 /* We already got the number of queues/rings in _probe */
1809 if (info
->nr_rings
== 1) {
1810 err
= write_per_ring_nodes(xbt
, &info
->rinfo
[0], dev
->nodename
);
1812 goto destroy_blkring
;
1817 err
= xenbus_printf(xbt
, dev
->nodename
, "multi-queue-num-queues", "%u",
1820 message
= "writing multi-queue-num-queues";
1821 goto abort_transaction
;
1824 pathsize
= strlen(dev
->nodename
) + QUEUE_NAME_LEN
;
1825 path
= kmalloc(pathsize
, GFP_KERNEL
);
1828 message
= "ENOMEM while writing ring references";
1829 goto abort_transaction
;
1832 for (i
= 0; i
< info
->nr_rings
; i
++) {
1833 memset(path
, 0, pathsize
);
1834 snprintf(path
, pathsize
, "%s/queue-%u", dev
->nodename
, i
);
1835 err
= write_per_ring_nodes(xbt
, &info
->rinfo
[i
], path
);
1838 goto destroy_blkring
;
1843 err
= xenbus_printf(xbt
, dev
->nodename
, "protocol", "%s",
1844 XEN_IO_PROTO_ABI_NATIVE
);
1846 message
= "writing protocol";
1847 goto abort_transaction
;
1849 err
= xenbus_printf(xbt
, dev
->nodename
,
1850 "feature-persistent", "%u", 1);
1853 "writing persistent grants feature to xenbus");
1855 err
= xenbus_transaction_end(xbt
, 0);
1859 xenbus_dev_fatal(dev
, err
, "completing transaction");
1860 goto destroy_blkring
;
1863 for (i
= 0; i
< info
->nr_rings
; i
++) {
1865 struct blkfront_ring_info
*rinfo
= &info
->rinfo
[i
];
1867 for (j
= 0; j
< BLK_RING_SIZE(info
); j
++)
1868 rinfo
->shadow
[j
].req
.u
.rw
.id
= j
+ 1;
1869 rinfo
->shadow
[BLK_RING_SIZE(info
)-1].req
.u
.rw
.id
= 0x0fffffff;
1871 xenbus_switch_state(dev
, XenbusStateInitialised
);
1876 xenbus_transaction_end(xbt
, 1);
1878 xenbus_dev_fatal(dev
, err
, "%s", message
);
1880 blkif_free(info
, 0);
1883 dev_set_drvdata(&dev
->dev
, NULL
);
1888 static int negotiate_mq(struct blkfront_info
*info
)
1890 unsigned int backend_max_queues
;
1893 BUG_ON(info
->nr_rings
);
1895 /* Check if backend supports multiple queues. */
1896 backend_max_queues
= xenbus_read_unsigned(info
->xbdev
->otherend
,
1897 "multi-queue-max-queues", 1);
1898 info
->nr_rings
= min(backend_max_queues
, xen_blkif_max_queues
);
1899 /* We need at least one ring. */
1900 if (!info
->nr_rings
)
1903 info
->rinfo
= kzalloc(sizeof(struct blkfront_ring_info
) * info
->nr_rings
, GFP_KERNEL
);
1905 xenbus_dev_fatal(info
->xbdev
, -ENOMEM
, "allocating ring_info structure");
1909 for (i
= 0; i
< info
->nr_rings
; i
++) {
1910 struct blkfront_ring_info
*rinfo
;
1912 rinfo
= &info
->rinfo
[i
];
1913 INIT_LIST_HEAD(&rinfo
->indirect_pages
);
1914 INIT_LIST_HEAD(&rinfo
->grants
);
1915 rinfo
->dev_info
= info
;
1916 INIT_WORK(&rinfo
->work
, blkif_restart_queue
);
1917 spin_lock_init(&rinfo
->ring_lock
);
1922 * Entry point to this code when a new device is created. Allocate the basic
1923 * structures and the ring buffer for communication with the backend, and
1924 * inform the backend of the appropriate details for those. Switch to
1925 * Initialised state.
1927 static int blkfront_probe(struct xenbus_device
*dev
,
1928 const struct xenbus_device_id
*id
)
1931 struct blkfront_info
*info
;
1933 /* FIXME: Use dynamic device id if this is not set. */
1934 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
,
1935 "virtual-device", "%i", &vdevice
);
1937 /* go looking in the extended area instead */
1938 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "virtual-device-ext",
1941 xenbus_dev_fatal(dev
, err
, "reading virtual-device");
1946 if (xen_hvm_domain()) {
1949 /* no unplug has been done: do not hook devices != xen vbds */
1950 if (xen_has_pv_and_legacy_disk_devices()) {
1953 if (!VDEV_IS_EXTENDED(vdevice
))
1954 major
= BLKIF_MAJOR(vdevice
);
1956 major
= XENVBD_MAJOR
;
1958 if (major
!= XENVBD_MAJOR
) {
1960 "%s: HVM does not support vbd %d as xen block device\n",
1965 /* do not create a PV cdrom device if we are an HVM guest */
1966 type
= xenbus_read(XBT_NIL
, dev
->nodename
, "device-type", &len
);
1969 if (strncmp(type
, "cdrom", 5) == 0) {
1975 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1977 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating info structure");
1982 err
= negotiate_mq(info
);
1988 mutex_init(&info
->mutex
);
1989 info
->vdevice
= vdevice
;
1990 info
->connected
= BLKIF_STATE_DISCONNECTED
;
1992 /* Front end dir is a number, which is used as the id. */
1993 info
->handle
= simple_strtoul(strrchr(dev
->nodename
, '/')+1, NULL
, 0);
1994 dev_set_drvdata(&dev
->dev
, info
);
1999 static void split_bio_end(struct bio
*bio
)
2001 struct split_bio
*split_bio
= bio
->bi_private
;
2003 if (atomic_dec_and_test(&split_bio
->pending
)) {
2004 split_bio
->bio
->bi_phys_segments
= 0;
2005 split_bio
->bio
->bi_error
= bio
->bi_error
;
2006 bio_endio(split_bio
->bio
);
2012 static int blkif_recover(struct blkfront_info
*info
)
2014 unsigned int i
, r_index
;
2015 struct request
*req
, *n
;
2017 struct bio
*bio
, *cloned_bio
;
2018 unsigned int segs
, offset
;
2020 struct split_bio
*split_bio
;
2022 blkfront_gather_backend_features(info
);
2023 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2024 blkif_set_queue_limits(info
);
2025 segs
= info
->max_indirect_segments
? : BLKIF_MAX_SEGMENTS_PER_REQUEST
;
2026 blk_queue_max_segments(info
->rq
, segs
/ GRANTS_PER_PSEG
);
2028 for (r_index
= 0; r_index
< info
->nr_rings
; r_index
++) {
2029 struct blkfront_ring_info
*rinfo
= &info
->rinfo
[r_index
];
2031 rc
= blkfront_setup_indirect(rinfo
);
2035 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
2037 /* Now safe for us to use the shared ring */
2038 info
->connected
= BLKIF_STATE_CONNECTED
;
2040 for (r_index
= 0; r_index
< info
->nr_rings
; r_index
++) {
2041 struct blkfront_ring_info
*rinfo
;
2043 rinfo
= &info
->rinfo
[r_index
];
2044 /* Kick any other new requests queued since we resumed */
2045 kick_pending_request_queues(rinfo
);
2048 list_for_each_entry_safe(req
, n
, &info
->requests
, queuelist
) {
2049 /* Requeue pending requests (flush or discard) */
2050 list_del_init(&req
->queuelist
);
2051 BUG_ON(req
->nr_phys_segments
> segs
);
2052 blk_mq_requeue_request(req
, false);
2054 blk_mq_start_stopped_hw_queues(info
->rq
, true);
2055 blk_mq_kick_requeue_list(info
->rq
);
2057 while ((bio
= bio_list_pop(&info
->bio_list
)) != NULL
) {
2058 /* Traverse the list of pending bios and re-queue them */
2059 if (bio_segments(bio
) > segs
) {
2061 * This bio has more segments than what we can
2062 * handle, we have to split it.
2064 pending
= (bio_segments(bio
) + segs
- 1) / segs
;
2065 split_bio
= kzalloc(sizeof(*split_bio
), GFP_NOIO
);
2066 BUG_ON(split_bio
== NULL
);
2067 atomic_set(&split_bio
->pending
, pending
);
2068 split_bio
->bio
= bio
;
2069 for (i
= 0; i
< pending
; i
++) {
2070 offset
= (i
* segs
* XEN_PAGE_SIZE
) >> 9;
2071 size
= min((unsigned int)(segs
* XEN_PAGE_SIZE
) >> 9,
2072 (unsigned int)bio_sectors(bio
) - offset
);
2073 cloned_bio
= bio_clone(bio
, GFP_NOIO
);
2074 BUG_ON(cloned_bio
== NULL
);
2075 bio_trim(cloned_bio
, offset
, size
);
2076 cloned_bio
->bi_private
= split_bio
;
2077 cloned_bio
->bi_end_io
= split_bio_end
;
2078 submit_bio(cloned_bio
);
2081 * Now we have to wait for all those smaller bios to
2082 * end, so we can also end the "parent" bio.
2086 /* We don't need to split this bio */
2094 * We are reconnecting to the backend, due to a suspend/resume, or a backend
2095 * driver restart. We tear down our blkif structure and recreate it, but
2096 * leave the device-layer structures intact so that this is transparent to the
2097 * rest of the kernel.
2099 static int blkfront_resume(struct xenbus_device
*dev
)
2101 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2105 dev_dbg(&dev
->dev
, "blkfront_resume: %s\n", dev
->nodename
);
2107 bio_list_init(&info
->bio_list
);
2108 INIT_LIST_HEAD(&info
->requests
);
2109 for (i
= 0; i
< info
->nr_rings
; i
++) {
2110 struct blkfront_ring_info
*rinfo
= &info
->rinfo
[i
];
2111 struct bio_list merge_bio
;
2112 struct blk_shadow
*shadow
= rinfo
->shadow
;
2114 for (j
= 0; j
< BLK_RING_SIZE(info
); j
++) {
2116 if (!shadow
[j
].request
)
2120 * Get the bios in the request so we can re-queue them.
2122 if (req_op(shadow
[i
].request
) == REQ_OP_FLUSH
||
2123 req_op(shadow
[i
].request
) == REQ_OP_DISCARD
||
2124 req_op(shadow
[i
].request
) == REQ_OP_SECURE_ERASE
||
2125 shadow
[j
].request
->cmd_flags
& REQ_FUA
) {
2127 * Flush operations don't contain bios, so
2128 * we need to requeue the whole request
2130 * XXX: but this doesn't make any sense for a
2131 * write with the FUA flag set..
2133 list_add(&shadow
[j
].request
->queuelist
, &info
->requests
);
2136 merge_bio
.head
= shadow
[j
].request
->bio
;
2137 merge_bio
.tail
= shadow
[j
].request
->biotail
;
2138 bio_list_merge(&info
->bio_list
, &merge_bio
);
2139 shadow
[j
].request
->bio
= NULL
;
2140 blk_mq_end_request(shadow
[j
].request
, 0);
2144 blkif_free(info
, info
->connected
== BLKIF_STATE_CONNECTED
);
2146 err
= negotiate_mq(info
);
2150 err
= talk_to_blkback(dev
, info
);
2152 blk_mq_update_nr_hw_queues(&info
->tag_set
, info
->nr_rings
);
2155 * We have to wait for the backend to switch to
2156 * connected state, since we want to read which
2157 * features it supports.
2163 static void blkfront_closing(struct blkfront_info
*info
)
2165 struct xenbus_device
*xbdev
= info
->xbdev
;
2166 struct block_device
*bdev
= NULL
;
2168 mutex_lock(&info
->mutex
);
2170 if (xbdev
->state
== XenbusStateClosing
) {
2171 mutex_unlock(&info
->mutex
);
2176 bdev
= bdget_disk(info
->gd
, 0);
2178 mutex_unlock(&info
->mutex
);
2181 xenbus_frontend_closed(xbdev
);
2185 mutex_lock(&bdev
->bd_mutex
);
2187 if (bdev
->bd_openers
) {
2188 xenbus_dev_error(xbdev
, -EBUSY
,
2189 "Device in use; refusing to close");
2190 xenbus_switch_state(xbdev
, XenbusStateClosing
);
2192 xlvbd_release_gendisk(info
);
2193 xenbus_frontend_closed(xbdev
);
2196 mutex_unlock(&bdev
->bd_mutex
);
2200 static void blkfront_setup_discard(struct blkfront_info
*info
)
2203 unsigned int discard_granularity
;
2204 unsigned int discard_alignment
;
2206 info
->feature_discard
= 1;
2207 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
2208 "discard-granularity", "%u", &discard_granularity
,
2209 "discard-alignment", "%u", &discard_alignment
,
2212 info
->discard_granularity
= discard_granularity
;
2213 info
->discard_alignment
= discard_alignment
;
2215 info
->feature_secdiscard
=
2216 !!xenbus_read_unsigned(info
->xbdev
->otherend
, "discard-secure",
2220 static int blkfront_setup_indirect(struct blkfront_ring_info
*rinfo
)
2222 unsigned int psegs
, grants
;
2224 struct blkfront_info
*info
= rinfo
->dev_info
;
2226 if (info
->max_indirect_segments
== 0) {
2228 grants
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
2231 * When an extra req is required, the maximum
2232 * grants supported is related to the size of the
2233 * Linux block segment.
2235 grants
= GRANTS_PER_PSEG
;
2239 grants
= info
->max_indirect_segments
;
2240 psegs
= DIV_ROUND_UP(grants
, GRANTS_PER_PSEG
);
2242 err
= fill_grant_buffer(rinfo
,
2243 (grants
+ INDIRECT_GREFS(grants
)) * BLK_RING_SIZE(info
));
2247 if (!info
->feature_persistent
&& info
->max_indirect_segments
) {
2249 * We are using indirect descriptors but not persistent
2250 * grants, we need to allocate a set of pages that can be
2251 * used for mapping indirect grefs
2253 int num
= INDIRECT_GREFS(grants
) * BLK_RING_SIZE(info
);
2255 BUG_ON(!list_empty(&rinfo
->indirect_pages
));
2256 for (i
= 0; i
< num
; i
++) {
2257 struct page
*indirect_page
= alloc_page(GFP_NOIO
);
2260 list_add(&indirect_page
->lru
, &rinfo
->indirect_pages
);
2264 for (i
= 0; i
< BLK_RING_SIZE(info
); i
++) {
2265 rinfo
->shadow
[i
].grants_used
= kzalloc(
2266 sizeof(rinfo
->shadow
[i
].grants_used
[0]) * grants
,
2268 rinfo
->shadow
[i
].sg
= kzalloc(sizeof(rinfo
->shadow
[i
].sg
[0]) * psegs
, GFP_NOIO
);
2269 if (info
->max_indirect_segments
)
2270 rinfo
->shadow
[i
].indirect_grants
= kzalloc(
2271 sizeof(rinfo
->shadow
[i
].indirect_grants
[0]) *
2272 INDIRECT_GREFS(grants
),
2274 if ((rinfo
->shadow
[i
].grants_used
== NULL
) ||
2275 (rinfo
->shadow
[i
].sg
== NULL
) ||
2276 (info
->max_indirect_segments
&&
2277 (rinfo
->shadow
[i
].indirect_grants
== NULL
)))
2279 sg_init_table(rinfo
->shadow
[i
].sg
, psegs
);
2286 for (i
= 0; i
< BLK_RING_SIZE(info
); i
++) {
2287 kfree(rinfo
->shadow
[i
].grants_used
);
2288 rinfo
->shadow
[i
].grants_used
= NULL
;
2289 kfree(rinfo
->shadow
[i
].sg
);
2290 rinfo
->shadow
[i
].sg
= NULL
;
2291 kfree(rinfo
->shadow
[i
].indirect_grants
);
2292 rinfo
->shadow
[i
].indirect_grants
= NULL
;
2294 if (!list_empty(&rinfo
->indirect_pages
)) {
2295 struct page
*indirect_page
, *n
;
2296 list_for_each_entry_safe(indirect_page
, n
, &rinfo
->indirect_pages
, lru
) {
2297 list_del(&indirect_page
->lru
);
2298 __free_page(indirect_page
);
2305 * Gather all backend feature-*
2307 static void blkfront_gather_backend_features(struct blkfront_info
*info
)
2309 unsigned int indirect_segments
;
2311 info
->feature_flush
= 0;
2312 info
->feature_fua
= 0;
2315 * If there's no "feature-barrier" defined, then it means
2316 * we're dealing with a very old backend which writes
2317 * synchronously; nothing to do.
2319 * If there are barriers, then we use flush.
2321 if (xenbus_read_unsigned(info
->xbdev
->otherend
, "feature-barrier", 0)) {
2322 info
->feature_flush
= 1;
2323 info
->feature_fua
= 1;
2327 * And if there is "feature-flush-cache" use that above
2330 if (xenbus_read_unsigned(info
->xbdev
->otherend
, "feature-flush-cache",
2332 info
->feature_flush
= 1;
2333 info
->feature_fua
= 0;
2336 if (xenbus_read_unsigned(info
->xbdev
->otherend
, "feature-discard", 0))
2337 blkfront_setup_discard(info
);
2339 info
->feature_persistent
=
2340 !!xenbus_read_unsigned(info
->xbdev
->otherend
,
2341 "feature-persistent", 0);
2343 indirect_segments
= xenbus_read_unsigned(info
->xbdev
->otherend
,
2344 "feature-max-indirect-segments", 0);
2345 if (indirect_segments
> xen_blkif_max_segments
)
2346 indirect_segments
= xen_blkif_max_segments
;
2347 if (indirect_segments
<= BLKIF_MAX_SEGMENTS_PER_REQUEST
)
2348 indirect_segments
= 0;
2349 info
->max_indirect_segments
= indirect_segments
;
2353 * Invoked when the backend is finally 'ready' (and has told produced
2354 * the details about the physical device - #sectors, size, etc).
2356 static void blkfront_connect(struct blkfront_info
*info
)
2358 unsigned long long sectors
;
2359 unsigned long sector_size
;
2360 unsigned int physical_sector_size
;
2362 char *envp
[] = { "RESIZE=1", NULL
};
2365 switch (info
->connected
) {
2366 case BLKIF_STATE_CONNECTED
:
2368 * Potentially, the back-end may be signalling
2369 * a capacity change; update the capacity.
2371 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
2372 "sectors", "%Lu", §ors
);
2373 if (XENBUS_EXIST_ERR(err
))
2375 printk(KERN_INFO
"Setting capacity to %Lu\n",
2377 set_capacity(info
->gd
, sectors
);
2378 revalidate_disk(info
->gd
);
2379 kobject_uevent_env(&disk_to_dev(info
->gd
)->kobj
,
2383 case BLKIF_STATE_SUSPENDED
:
2385 * If we are recovering from suspension, we need to wait
2386 * for the backend to announce it's features before
2387 * reconnecting, at least we need to know if the backend
2388 * supports indirect descriptors, and how many.
2390 blkif_recover(info
);
2397 dev_dbg(&info
->xbdev
->dev
, "%s:%s.\n",
2398 __func__
, info
->xbdev
->otherend
);
2400 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
2401 "sectors", "%llu", §ors
,
2402 "info", "%u", &binfo
,
2403 "sector-size", "%lu", §or_size
,
2406 xenbus_dev_fatal(info
->xbdev
, err
,
2407 "reading backend fields at %s",
2408 info
->xbdev
->otherend
);
2413 * physcial-sector-size is a newer field, so old backends may not
2414 * provide this. Assume physical sector size to be the same as
2415 * sector_size in that case.
2417 physical_sector_size
= xenbus_read_unsigned(info
->xbdev
->otherend
,
2418 "physical-sector-size",
2420 blkfront_gather_backend_features(info
);
2421 for (i
= 0; i
< info
->nr_rings
; i
++) {
2422 err
= blkfront_setup_indirect(&info
->rinfo
[i
]);
2424 xenbus_dev_fatal(info
->xbdev
, err
, "setup_indirect at %s",
2425 info
->xbdev
->otherend
);
2426 blkif_free(info
, 0);
2431 err
= xlvbd_alloc_gendisk(sectors
, info
, binfo
, sector_size
,
2432 physical_sector_size
);
2434 xenbus_dev_fatal(info
->xbdev
, err
, "xlvbd_add at %s",
2435 info
->xbdev
->otherend
);
2439 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
2441 /* Kick pending requests. */
2442 info
->connected
= BLKIF_STATE_CONNECTED
;
2443 for (i
= 0; i
< info
->nr_rings
; i
++)
2444 kick_pending_request_queues(&info
->rinfo
[i
]);
2446 device_add_disk(&info
->xbdev
->dev
, info
->gd
);
2452 blkif_free(info
, 0);
2457 * Callback received when the backend's state changes.
2459 static void blkback_changed(struct xenbus_device
*dev
,
2460 enum xenbus_state backend_state
)
2462 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2464 dev_dbg(&dev
->dev
, "blkfront:blkback_changed to state %d.\n", backend_state
);
2466 switch (backend_state
) {
2467 case XenbusStateInitWait
:
2468 if (dev
->state
!= XenbusStateInitialising
)
2470 if (talk_to_blkback(dev
, info
))
2472 case XenbusStateInitialising
:
2473 case XenbusStateInitialised
:
2474 case XenbusStateReconfiguring
:
2475 case XenbusStateReconfigured
:
2476 case XenbusStateUnknown
:
2479 case XenbusStateConnected
:
2481 * talk_to_blkback sets state to XenbusStateInitialised
2482 * and blkfront_connect sets it to XenbusStateConnected
2483 * (if connection went OK).
2485 * If the backend (or toolstack) decides to poke at backend
2486 * state (and re-trigger the watch by setting the state repeatedly
2487 * to XenbusStateConnected (4)) we need to deal with this.
2488 * This is allowed as this is used to communicate to the guest
2489 * that the size of disk has changed!
2491 if ((dev
->state
!= XenbusStateInitialised
) &&
2492 (dev
->state
!= XenbusStateConnected
)) {
2493 if (talk_to_blkback(dev
, info
))
2497 blkfront_connect(info
);
2500 case XenbusStateClosed
:
2501 if (dev
->state
== XenbusStateClosed
)
2503 /* Missed the backend's Closing state -- fallthrough */
2504 case XenbusStateClosing
:
2506 blkfront_closing(info
);
2511 static int blkfront_remove(struct xenbus_device
*xbdev
)
2513 struct blkfront_info
*info
= dev_get_drvdata(&xbdev
->dev
);
2514 struct block_device
*bdev
= NULL
;
2515 struct gendisk
*disk
;
2517 dev_dbg(&xbdev
->dev
, "%s removed", xbdev
->nodename
);
2519 blkif_free(info
, 0);
2521 mutex_lock(&info
->mutex
);
2525 bdev
= bdget_disk(disk
, 0);
2528 mutex_unlock(&info
->mutex
);
2536 * The xbdev was removed before we reached the Closed
2537 * state. See if it's safe to remove the disk. If the bdev
2538 * isn't closed yet, we let release take care of it.
2541 mutex_lock(&bdev
->bd_mutex
);
2542 info
= disk
->private_data
;
2544 dev_warn(disk_to_dev(disk
),
2545 "%s was hot-unplugged, %d stale handles\n",
2546 xbdev
->nodename
, bdev
->bd_openers
);
2548 if (info
&& !bdev
->bd_openers
) {
2549 xlvbd_release_gendisk(info
);
2550 disk
->private_data
= NULL
;
2554 mutex_unlock(&bdev
->bd_mutex
);
2560 static int blkfront_is_ready(struct xenbus_device
*dev
)
2562 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
2564 return info
->is_ready
&& info
->xbdev
;
2567 static int blkif_open(struct block_device
*bdev
, fmode_t mode
)
2569 struct gendisk
*disk
= bdev
->bd_disk
;
2570 struct blkfront_info
*info
;
2573 mutex_lock(&blkfront_mutex
);
2575 info
= disk
->private_data
;
2582 mutex_lock(&info
->mutex
);
2585 /* xbdev is closed */
2588 mutex_unlock(&info
->mutex
);
2591 mutex_unlock(&blkfront_mutex
);
2595 static void blkif_release(struct gendisk
*disk
, fmode_t mode
)
2597 struct blkfront_info
*info
= disk
->private_data
;
2598 struct block_device
*bdev
;
2599 struct xenbus_device
*xbdev
;
2601 mutex_lock(&blkfront_mutex
);
2603 bdev
= bdget_disk(disk
, 0);
2606 WARN(1, "Block device %s yanked out from us!\n", disk
->disk_name
);
2609 if (bdev
->bd_openers
)
2613 * Check if we have been instructed to close. We will have
2614 * deferred this request, because the bdev was still open.
2617 mutex_lock(&info
->mutex
);
2618 xbdev
= info
->xbdev
;
2620 if (xbdev
&& xbdev
->state
== XenbusStateClosing
) {
2621 /* pending switch to state closed */
2622 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
2623 xlvbd_release_gendisk(info
);
2624 xenbus_frontend_closed(info
->xbdev
);
2627 mutex_unlock(&info
->mutex
);
2630 /* sudden device removal */
2631 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
2632 xlvbd_release_gendisk(info
);
2633 disk
->private_data
= NULL
;
2640 mutex_unlock(&blkfront_mutex
);
2643 static const struct block_device_operations xlvbd_block_fops
=
2645 .owner
= THIS_MODULE
,
2647 .release
= blkif_release
,
2648 .getgeo
= blkif_getgeo
,
2649 .ioctl
= blkif_ioctl
,
2653 static const struct xenbus_device_id blkfront_ids
[] = {
2658 static struct xenbus_driver blkfront_driver
= {
2659 .ids
= blkfront_ids
,
2660 .probe
= blkfront_probe
,
2661 .remove
= blkfront_remove
,
2662 .resume
= blkfront_resume
,
2663 .otherend_changed
= blkback_changed
,
2664 .is_ready
= blkfront_is_ready
,
2667 static int __init
xlblk_init(void)
2670 int nr_cpus
= num_online_cpus();
2675 if (xen_blkif_max_segments
< BLKIF_MAX_SEGMENTS_PER_REQUEST
)
2676 xen_blkif_max_segments
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
2678 if (xen_blkif_max_ring_order
> XENBUS_MAX_RING_GRANT_ORDER
) {
2679 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2680 xen_blkif_max_ring_order
, XENBUS_MAX_RING_GRANT_ORDER
);
2681 xen_blkif_max_ring_order
= XENBUS_MAX_RING_GRANT_ORDER
;
2684 if (xen_blkif_max_queues
> nr_cpus
) {
2685 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2686 xen_blkif_max_queues
, nr_cpus
);
2687 xen_blkif_max_queues
= nr_cpus
;
2690 if (!xen_has_pv_disk_devices())
2693 if (register_blkdev(XENVBD_MAJOR
, DEV_NAME
)) {
2694 printk(KERN_WARNING
"xen_blk: can't get major %d with name %s\n",
2695 XENVBD_MAJOR
, DEV_NAME
);
2699 ret
= xenbus_register_frontend(&blkfront_driver
);
2701 unregister_blkdev(XENVBD_MAJOR
, DEV_NAME
);
2707 module_init(xlblk_init
);
2710 static void __exit
xlblk_exit(void)
2712 xenbus_unregister_driver(&blkfront_driver
);
2713 unregister_blkdev(XENVBD_MAJOR
, DEV_NAME
);
2716 module_exit(xlblk_exit
);
2718 MODULE_DESCRIPTION("Xen virtual block device frontend");
2719 MODULE_LICENSE("GPL");
2720 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR
);
2721 MODULE_ALIAS("xen:vbd");
2722 MODULE_ALIAS("xenblk");