]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/xen-blkfront.c
xen/grant-table: Make it running on 64KB granularity
[mirror_ubuntu-artful-kernel.git] / drivers / block / xen-blkfront.c
CommitLineData
9f27ee59
JF
1/*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
907c3eb1 40#include <linux/blk-mq.h>
597592d9 41#include <linux/hdreg.h>
440a01a7 42#include <linux/cdrom.h>
9f27ee59 43#include <linux/module.h>
5a0e3ad6 44#include <linux/slab.h>
2a48fc0a 45#include <linux/mutex.h>
9e973e64 46#include <linux/scatterlist.h>
34ae2e47 47#include <linux/bitmap.h>
155b7edb 48#include <linux/list.h>
9f27ee59 49
1ccbf534 50#include <xen/xen.h>
9f27ee59
JF
51#include <xen/xenbus.h>
52#include <xen/grant_table.h>
53#include <xen/events.h>
54#include <xen/page.h>
c1c5413a 55#include <xen/platform_pci.h>
9f27ee59
JF
56
57#include <xen/interface/grant_table.h>
58#include <xen/interface/io/blkif.h>
3e334239 59#include <xen/interface/io/protocols.h>
9f27ee59
JF
60
61#include <asm/xen/hypervisor.h>
62
63enum blkif_state {
64 BLKIF_STATE_DISCONNECTED,
65 BLKIF_STATE_CONNECTED,
66 BLKIF_STATE_SUSPENDED,
67};
68
0a8704a5
RPM
69struct grant {
70 grant_ref_t gref;
a7a6df22 71 struct page *page;
155b7edb 72 struct list_head node;
0a8704a5
RPM
73};
74
9f27ee59
JF
75struct blk_shadow {
76 struct blkif_request req;
a945b980 77 struct request *request;
402b27f9
RPM
78 struct grant **grants_used;
79 struct grant **indirect_grants;
b7649158 80 struct scatterlist *sg;
402b27f9
RPM
81};
82
83struct split_bio {
84 struct bio *bio;
85 atomic_t pending;
9f27ee59
JF
86};
87
2a48fc0a 88static DEFINE_MUTEX(blkfront_mutex);
83d5cde4 89static const struct block_device_operations xlvbd_block_fops;
9f27ee59 90
402b27f9
RPM
91/*
92 * Maximum number of segments in indirect requests, the actual value used by
93 * the frontend driver is the minimum of this value and the value provided
94 * by the backend driver.
95 */
96
97static unsigned int xen_blkif_max_segments = 32;
2d5dc3ba
KRW
98module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
99MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
402b27f9 100
86839c56
BL
101/*
102 * Maximum order of pages to be used for the shared ring between front and
103 * backend, 4KB page granularity is used.
104 */
105static unsigned int xen_blkif_max_ring_order;
106module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
107MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
108
109#define BLK_RING_SIZE(info) __CONST_RING_SIZE(blkif, PAGE_SIZE * (info)->nr_ring_pages)
110#define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE * XENBUS_MAX_RING_PAGES)
111/*
112 * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
113 * characters are enough. Define to 20 to keep consist with backend.
114 */
115#define RINGREF_NAME_LEN (20)
9f27ee59
JF
116
117/*
118 * We have one of these per vbd, whether ide, scsi or 'other'. They
119 * hang in private_data off the gendisk structure. We may end up
120 * putting all kinds of interesting stuff here :-)
121 */
122struct blkfront_info
123{
3467811e 124 spinlock_t io_lock;
b70f5fa0 125 struct mutex mutex;
9f27ee59 126 struct xenbus_device *xbdev;
9f27ee59
JF
127 struct gendisk *gd;
128 int vdevice;
129 blkif_vdev_t handle;
130 enum blkif_state connected;
86839c56
BL
131 int ring_ref[XENBUS_MAX_RING_PAGES];
132 unsigned int nr_ring_pages;
9f27ee59
JF
133 struct blkif_front_ring ring;
134 unsigned int evtchn, irq;
135 struct request_queue *rq;
136 struct work_struct work;
137 struct gnttab_free_callback callback;
86839c56 138 struct blk_shadow shadow[BLK_MAX_RING_SIZE];
bfe11d6d
RPM
139 struct list_head grants;
140 struct list_head indirect_pages;
0a8704a5 141 unsigned int persistent_gnts_c;
9f27ee59 142 unsigned long shadow_free;
4913efe4 143 unsigned int feature_flush;
5ea42986
KRW
144 unsigned int feature_discard:1;
145 unsigned int feature_secdiscard:1;
ed30bf31
LD
146 unsigned int discard_granularity;
147 unsigned int discard_alignment;
0a8704a5 148 unsigned int feature_persistent:1;
402b27f9 149 unsigned int max_indirect_segments;
1d78d705 150 int is_ready;
907c3eb1 151 struct blk_mq_tag_set tag_set;
9f27ee59
JF
152};
153
0e345826
JB
154static unsigned int nr_minors;
155static unsigned long *minors;
156static DEFINE_SPINLOCK(minor_lock);
157
9f27ee59
JF
158#define GRANT_INVALID_REF 0
159
160#define PARTS_PER_DISK 16
9246b5f0 161#define PARTS_PER_EXT_DISK 256
9f27ee59
JF
162
163#define BLKIF_MAJOR(dev) ((dev)>>8)
164#define BLKIF_MINOR(dev) ((dev) & 0xff)
165
9246b5f0
CL
166#define EXT_SHIFT 28
167#define EXTENDED (1<<EXT_SHIFT)
168#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
169#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
c80a4209
SS
170#define EMULATED_HD_DISK_MINOR_OFFSET (0)
171#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
196cfe2a
SB
172#define EMULATED_SD_DISK_MINOR_OFFSET (0)
173#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
9f27ee59 174
9246b5f0 175#define DEV_NAME "xvd" /* name in /dev */
9f27ee59 176
402b27f9 177#define SEGS_PER_INDIRECT_FRAME \
80bfa2f6 178 (PAGE_SIZE/sizeof(struct blkif_request_segment))
402b27f9
RPM
179#define INDIRECT_GREFS(_segs) \
180 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
181
182static int blkfront_setup_indirect(struct blkfront_info *info);
d50babbe 183static int blkfront_gather_backend_features(struct blkfront_info *info);
402b27f9 184
9f27ee59
JF
185static int get_id_from_freelist(struct blkfront_info *info)
186{
187 unsigned long free = info->shadow_free;
86839c56 188 BUG_ON(free >= BLK_RING_SIZE(info));
97e36834
KRW
189 info->shadow_free = info->shadow[free].req.u.rw.id;
190 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
9f27ee59
JF
191 return free;
192}
193
6878c32e 194static int add_id_to_freelist(struct blkfront_info *info,
9f27ee59
JF
195 unsigned long id)
196{
6878c32e
KRW
197 if (info->shadow[id].req.u.rw.id != id)
198 return -EINVAL;
199 if (info->shadow[id].request == NULL)
200 return -EINVAL;
97e36834 201 info->shadow[id].req.u.rw.id = info->shadow_free;
a945b980 202 info->shadow[id].request = NULL;
9f27ee59 203 info->shadow_free = id;
6878c32e 204 return 0;
9f27ee59
JF
205}
206
9c1e050c
RPM
207static int fill_grant_buffer(struct blkfront_info *info, int num)
208{
209 struct page *granted_page;
210 struct grant *gnt_list_entry, *n;
211 int i = 0;
212
213 while(i < num) {
214 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
215 if (!gnt_list_entry)
216 goto out_of_memory;
217
bfe11d6d
RPM
218 if (info->feature_persistent) {
219 granted_page = alloc_page(GFP_NOIO);
220 if (!granted_page) {
221 kfree(gnt_list_entry);
222 goto out_of_memory;
223 }
a7a6df22 224 gnt_list_entry->page = granted_page;
9c1e050c
RPM
225 }
226
9c1e050c 227 gnt_list_entry->gref = GRANT_INVALID_REF;
bfe11d6d 228 list_add(&gnt_list_entry->node, &info->grants);
9c1e050c
RPM
229 i++;
230 }
231
232 return 0;
233
234out_of_memory:
235 list_for_each_entry_safe(gnt_list_entry, n,
bfe11d6d 236 &info->grants, node) {
9c1e050c 237 list_del(&gnt_list_entry->node);
bfe11d6d 238 if (info->feature_persistent)
a7a6df22 239 __free_page(gnt_list_entry->page);
9c1e050c
RPM
240 kfree(gnt_list_entry);
241 i--;
242 }
243 BUG_ON(i != 0);
244 return -ENOMEM;
245}
246
4f503fbd 247static struct grant *get_free_grant(struct blkfront_info *info)
9c1e050c
RPM
248{
249 struct grant *gnt_list_entry;
9c1e050c 250
bfe11d6d
RPM
251 BUG_ON(list_empty(&info->grants));
252 gnt_list_entry = list_first_entry(&info->grants, struct grant,
4f503fbd 253 node);
9c1e050c
RPM
254 list_del(&gnt_list_entry->node);
255
4f503fbd 256 if (gnt_list_entry->gref != GRANT_INVALID_REF)
9c1e050c 257 info->persistent_gnts_c--;
4f503fbd
JG
258
259 return gnt_list_entry;
260}
261
262static inline void grant_foreign_access(const struct grant *gnt_list_entry,
263 const struct blkfront_info *info)
264{
265 gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
266 info->xbdev->otherend_id,
267 gnt_list_entry->page,
268 0);
269}
270
271static struct grant *get_grant(grant_ref_t *gref_head,
272 unsigned long gfn,
273 struct blkfront_info *info)
274{
275 struct grant *gnt_list_entry = get_free_grant(info);
276
277 if (gnt_list_entry->gref != GRANT_INVALID_REF)
9c1e050c 278 return gnt_list_entry;
4f503fbd
JG
279
280 /* Assign a gref to this page */
281 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
282 BUG_ON(gnt_list_entry->gref == -ENOSPC);
283 if (info->feature_persistent)
284 grant_foreign_access(gnt_list_entry, info);
285 else {
286 /* Grant access to the GFN passed by the caller */
287 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
288 info->xbdev->otherend_id,
289 gfn, 0);
9c1e050c
RPM
290 }
291
4f503fbd
JG
292 return gnt_list_entry;
293}
294
295static struct grant *get_indirect_grant(grant_ref_t *gref_head,
296 struct blkfront_info *info)
297{
298 struct grant *gnt_list_entry = get_free_grant(info);
299
300 if (gnt_list_entry->gref != GRANT_INVALID_REF)
301 return gnt_list_entry;
302
9c1e050c
RPM
303 /* Assign a gref to this page */
304 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
305 BUG_ON(gnt_list_entry->gref == -ENOSPC);
bfe11d6d 306 if (!info->feature_persistent) {
4f503fbd
JG
307 struct page *indirect_page;
308
309 /* Fetch a pre-allocated page to use for indirect grefs */
310 BUG_ON(list_empty(&info->indirect_pages));
311 indirect_page = list_first_entry(&info->indirect_pages,
312 struct page, lru);
313 list_del(&indirect_page->lru);
314 gnt_list_entry->page = indirect_page;
bfe11d6d 315 }
4f503fbd
JG
316 grant_foreign_access(gnt_list_entry, info);
317
9c1e050c
RPM
318 return gnt_list_entry;
319}
320
6878c32e
KRW
321static const char *op_name(int op)
322{
323 static const char *const names[] = {
324 [BLKIF_OP_READ] = "read",
325 [BLKIF_OP_WRITE] = "write",
326 [BLKIF_OP_WRITE_BARRIER] = "barrier",
327 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
328 [BLKIF_OP_DISCARD] = "discard" };
329
330 if (op < 0 || op >= ARRAY_SIZE(names))
331 return "unknown";
332
333 if (!names[op])
334 return "reserved";
335
336 return names[op];
337}
0e345826
JB
338static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
339{
340 unsigned int end = minor + nr;
341 int rc;
342
343 if (end > nr_minors) {
344 unsigned long *bitmap, *old;
345
f094148a 346 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
0e345826
JB
347 GFP_KERNEL);
348 if (bitmap == NULL)
349 return -ENOMEM;
350
351 spin_lock(&minor_lock);
352 if (end > nr_minors) {
353 old = minors;
354 memcpy(bitmap, minors,
355 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
356 minors = bitmap;
357 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
358 } else
359 old = bitmap;
360 spin_unlock(&minor_lock);
361 kfree(old);
362 }
363
364 spin_lock(&minor_lock);
365 if (find_next_bit(minors, end, minor) >= end) {
34ae2e47 366 bitmap_set(minors, minor, nr);
0e345826
JB
367 rc = 0;
368 } else
369 rc = -EBUSY;
370 spin_unlock(&minor_lock);
371
372 return rc;
373}
374
375static void xlbd_release_minors(unsigned int minor, unsigned int nr)
376{
377 unsigned int end = minor + nr;
378
379 BUG_ON(end > nr_minors);
380 spin_lock(&minor_lock);
34ae2e47 381 bitmap_clear(minors, minor, nr);
0e345826
JB
382 spin_unlock(&minor_lock);
383}
384
9f27ee59
JF
385static void blkif_restart_queue_callback(void *arg)
386{
387 struct blkfront_info *info = (struct blkfront_info *)arg;
388 schedule_work(&info->work);
389}
390
afe42d7d 391static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
597592d9
IC
392{
393 /* We don't have real geometry info, but let's at least return
394 values consistent with the size of the device */
395 sector_t nsect = get_capacity(bd->bd_disk);
396 sector_t cylinders = nsect;
397
398 hg->heads = 0xff;
399 hg->sectors = 0x3f;
400 sector_div(cylinders, hg->heads * hg->sectors);
401 hg->cylinders = cylinders;
402 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
403 hg->cylinders = 0xffff;
404 return 0;
405}
406
a63c848b 407static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
62aa0054 408 unsigned command, unsigned long argument)
440a01a7 409{
a63c848b 410 struct blkfront_info *info = bdev->bd_disk->private_data;
440a01a7
CL
411 int i;
412
413 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
414 command, (long)argument);
415
416 switch (command) {
417 case CDROMMULTISESSION:
418 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
419 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
420 if (put_user(0, (char __user *)(argument + i)))
421 return -EFAULT;
422 return 0;
423
424 case CDROM_GET_CAPABILITY: {
425 struct gendisk *gd = info->gd;
426 if (gd->flags & GENHD_FL_CD)
427 return 0;
428 return -EINVAL;
429 }
430
431 default:
432 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
433 command);*/
434 return -EINVAL; /* same return as native Linux */
435 }
436
437 return 0;
438}
439
33204663
JG
440static int blkif_queue_discard_req(struct request *req)
441{
442 struct blkfront_info *info = req->rq_disk->private_data;
443 struct blkif_request *ring_req;
444 unsigned long id;
445
446 /* Fill out a communications ring structure. */
447 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
448 id = get_id_from_freelist(info);
449 info->shadow[id].request = req;
450
451 ring_req->operation = BLKIF_OP_DISCARD;
452 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
453 ring_req->u.discard.id = id;
454 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
455 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
456 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
457 else
458 ring_req->u.discard.flag = 0;
459
460 info->ring.req_prod_pvt++;
461
462 /* Keep a private copy so we can reissue requests when recovering. */
463 info->shadow[id].req = *ring_req;
464
465 return 0;
466}
467
468static int blkif_queue_rw_req(struct request *req)
9f27ee59
JF
469{
470 struct blkfront_info *info = req->rq_disk->private_data;
9f27ee59 471 struct blkif_request *ring_req;
9f27ee59
JF
472 unsigned long id;
473 unsigned int fsect, lsect;
402b27f9 474 int i, ref, n;
80bfa2f6 475 struct blkif_request_segment *segments = NULL;
0a8704a5
RPM
476
477 /*
478 * Used to store if we are able to queue the request by just using
479 * existing persistent grants, or if we have to get new grants,
480 * as there are not sufficiently many free.
481 */
482 bool new_persistent_gnts;
9f27ee59 483 grant_ref_t gref_head;
0a8704a5 484 struct grant *gnt_list_entry = NULL;
9e973e64 485 struct scatterlist *sg;
402b27f9 486 int nseg, max_grefs;
9f27ee59 487
c47206e2
RPM
488 max_grefs = req->nr_phys_segments;
489 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
490 /*
491 * If we are using indirect segments we need to account
492 * for the indirect grefs used in the request.
493 */
494 max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
402b27f9
RPM
495
496 /* Check if we have enough grants to allocate a requests */
497 if (info->persistent_gnts_c < max_grefs) {
0a8704a5
RPM
498 new_persistent_gnts = 1;
499 if (gnttab_alloc_grant_references(
402b27f9 500 max_grefs - info->persistent_gnts_c,
0a8704a5
RPM
501 &gref_head) < 0) {
502 gnttab_request_free_callback(
503 &info->callback,
504 blkif_restart_queue_callback,
505 info,
402b27f9 506 max_grefs);
0a8704a5
RPM
507 return 1;
508 }
509 } else
510 new_persistent_gnts = 0;
9f27ee59
JF
511
512 /* Fill out a communications ring structure. */
513 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
514 id = get_id_from_freelist(info);
a945b980 515 info->shadow[id].request = req;
9f27ee59 516
33204663
JG
517 BUG_ON(info->max_indirect_segments == 0 &&
518 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
519 BUG_ON(info->max_indirect_segments &&
520 req->nr_phys_segments > info->max_indirect_segments);
521 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
522 ring_req->u.rw.id = id;
523 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
524 /*
525 * The indirect operation can only be a BLKIF_OP_READ or
526 * BLKIF_OP_WRITE
527 */
528 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
529 ring_req->operation = BLKIF_OP_INDIRECT;
530 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
531 BLKIF_OP_WRITE : BLKIF_OP_READ;
532 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
533 ring_req->u.indirect.handle = info->handle;
534 ring_req->u.indirect.nr_segments = nseg;
ed30bf31 535 } else {
33204663
JG
536 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
537 ring_req->u.rw.handle = info->handle;
538 ring_req->operation = rq_data_dir(req) ?
539 BLKIF_OP_WRITE : BLKIF_OP_READ;
540 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
402b27f9 541 /*
33204663
JG
542 * Ideally we can do an unordered flush-to-disk.
543 * In case the backend onlysupports barriers, use that.
544 * A barrier request a superset of FUA, so we can
545 * implement it the same way. (It's also a FLUSH+FUA,
546 * since it is guaranteed ordered WRT previous writes.)
402b27f9 547 */
33204663
JG
548 switch (info->feature_flush &
549 ((REQ_FLUSH|REQ_FUA))) {
550 case REQ_FLUSH|REQ_FUA:
551 ring_req->operation =
552 BLKIF_OP_WRITE_BARRIER;
553 break;
554 case REQ_FLUSH:
555 ring_req->operation =
556 BLKIF_OP_FLUSH_DISKCACHE;
557 break;
558 default:
559 ring_req->operation = 0;
560 }
561 }
562 ring_req->u.rw.nr_segments = nseg;
563 }
564 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
565 fsect = sg->offset >> 9;
566 lsect = fsect + (sg->length >> 9) - 1;
567
568 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
569 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
33204663
JG
570 if (segments)
571 kunmap_atomic(segments);
572
573 n = i / SEGS_PER_INDIRECT_FRAME;
4f503fbd 574 gnt_list_entry = get_indirect_grant(&gref_head, info);
33204663 575 info->shadow[id].indirect_grants[n] = gnt_list_entry;
a7a6df22 576 segments = kmap_atomic(gnt_list_entry->page);
33204663 577 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
402b27f9 578 }
402b27f9 579
4f503fbd
JG
580 gnt_list_entry = get_grant(&gref_head,
581 xen_page_to_gfn(sg_page(sg)),
582 info);
33204663 583 ref = gnt_list_entry->gref;
0a8704a5 584
33204663 585 info->shadow[id].grants_used[i] = gnt_list_entry;
0a8704a5 586
33204663
JG
587 if (rq_data_dir(req) && info->feature_persistent) {
588 char *bvec_data;
589 void *shared_data;
0a8704a5 590
33204663 591 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
0a8704a5 592
a7a6df22 593 shared_data = kmap_atomic(gnt_list_entry->page);
33204663 594 bvec_data = kmap_atomic(sg_page(sg));
0a8704a5 595
33204663
JG
596 /*
597 * this does not wipe data stored outside the
598 * range sg->offset..sg->offset+sg->length.
599 * Therefore, blkback *could* see data from
600 * previous requests. This is OK as long as
601 * persistent grants are shared with just one
602 * domain. It may need refactoring if this
603 * changes
604 */
605 memcpy(shared_data + sg->offset,
606 bvec_data + sg->offset,
607 sg->length);
0a8704a5 608
33204663
JG
609 kunmap_atomic(bvec_data);
610 kunmap_atomic(shared_data);
611 }
612 if (ring_req->operation != BLKIF_OP_INDIRECT) {
613 ring_req->u.rw.seg[i] =
80bfa2f6 614 (struct blkif_request_segment) {
33204663
JG
615 .gref = ref,
616 .first_sect = fsect,
617 .last_sect = lsect };
618 } else {
619 n = i % SEGS_PER_INDIRECT_FRAME;
620 segments[n] =
621 (struct blkif_request_segment) {
622 .gref = ref,
623 .first_sect = fsect,
624 .last_sect = lsect };
ed30bf31 625 }
9f27ee59 626 }
33204663
JG
627 if (segments)
628 kunmap_atomic(segments);
9f27ee59
JF
629
630 info->ring.req_prod_pvt++;
631
632 /* Keep a private copy so we can reissue requests when recovering. */
633 info->shadow[id].req = *ring_req;
634
0a8704a5
RPM
635 if (new_persistent_gnts)
636 gnttab_free_grant_references(gref_head);
9f27ee59
JF
637
638 return 0;
639}
640
33204663
JG
641/*
642 * Generate a Xen blkfront IO request from a blk layer request. Reads
643 * and writes are handled as expected.
644 *
645 * @req: a request struct
646 */
647static int blkif_queue_request(struct request *req)
648{
649 struct blkfront_info *info = req->rq_disk->private_data;
650
651 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
652 return 1;
653
654 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
655 return blkif_queue_discard_req(req);
656 else
657 return blkif_queue_rw_req(req);
658}
9f27ee59
JF
659
660static inline void flush_requests(struct blkfront_info *info)
661{
662 int notify;
663
664 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
665
666 if (notify)
667 notify_remote_via_irq(info->irq);
668}
669
ad42d391
VK
670static inline bool blkif_request_flush_invalid(struct request *req,
671 struct blkfront_info *info)
0f1ca65e
AA
672{
673 return ((req->cmd_type != REQ_TYPE_FS) ||
ad42d391
VK
674 ((req->cmd_flags & REQ_FLUSH) &&
675 !(info->feature_flush & REQ_FLUSH)) ||
676 ((req->cmd_flags & REQ_FUA) &&
677 !(info->feature_flush & REQ_FUA)));
0f1ca65e
AA
678}
679
907c3eb1
BL
680static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
681 const struct blk_mq_queue_data *qd)
9f27ee59 682{
907c3eb1 683 struct blkfront_info *info = qd->rq->rq_disk->private_data;
9f27ee59 684
907c3eb1
BL
685 blk_mq_start_request(qd->rq);
686 spin_lock_irq(&info->io_lock);
687 if (RING_FULL(&info->ring))
688 goto out_busy;
9f27ee59 689
907c3eb1
BL
690 if (blkif_request_flush_invalid(qd->rq, info))
691 goto out_err;
296b2f6a 692
907c3eb1
BL
693 if (blkif_queue_request(qd->rq))
694 goto out_busy;
296b2f6a 695
907c3eb1
BL
696 flush_requests(info);
697 spin_unlock_irq(&info->io_lock);
698 return BLK_MQ_RQ_QUEUE_OK;
9f27ee59 699
907c3eb1
BL
700out_err:
701 spin_unlock_irq(&info->io_lock);
702 return BLK_MQ_RQ_QUEUE_ERROR;
9f27ee59 703
907c3eb1
BL
704out_busy:
705 spin_unlock_irq(&info->io_lock);
706 blk_mq_stop_hw_queue(hctx);
707 return BLK_MQ_RQ_QUEUE_BUSY;
9f27ee59
JF
708}
709
907c3eb1
BL
710static struct blk_mq_ops blkfront_mq_ops = {
711 .queue_rq = blkif_queue_rq,
712 .map_queue = blk_mq_map_queue,
713};
714
402b27f9 715static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
7c4d7d71 716 unsigned int physical_sector_size,
402b27f9 717 unsigned int segments)
9f27ee59 718{
165125e1 719 struct request_queue *rq;
ed30bf31 720 struct blkfront_info *info = gd->private_data;
9f27ee59 721
907c3eb1
BL
722 memset(&info->tag_set, 0, sizeof(info->tag_set));
723 info->tag_set.ops = &blkfront_mq_ops;
724 info->tag_set.nr_hw_queues = 1;
725 info->tag_set.queue_depth = BLK_RING_SIZE(info);
726 info->tag_set.numa_node = NUMA_NO_NODE;
727 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
728 info->tag_set.cmd_size = 0;
729 info->tag_set.driver_data = info;
730
731 if (blk_mq_alloc_tag_set(&info->tag_set))
9f27ee59 732 return -1;
907c3eb1
BL
733 rq = blk_mq_init_queue(&info->tag_set);
734 if (IS_ERR(rq)) {
735 blk_mq_free_tag_set(&info->tag_set);
736 return -1;
737 }
9f27ee59 738
66d352e1 739 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
9f27ee59 740
ed30bf31
LD
741 if (info->feature_discard) {
742 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
743 blk_queue_max_discard_sectors(rq, get_capacity(gd));
744 rq->limits.discard_granularity = info->discard_granularity;
745 rq->limits.discard_alignment = info->discard_alignment;
5ea42986
KRW
746 if (info->feature_secdiscard)
747 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
ed30bf31
LD
748 }
749
9f27ee59 750 /* Hard sector size and max sectors impersonate the equiv. hardware. */
e1defc4f 751 blk_queue_logical_block_size(rq, sector_size);
7c4d7d71 752 blk_queue_physical_block_size(rq, physical_sector_size);
294caaf2 753 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
9f27ee59
JF
754
755 /* Each segment in a request is up to an aligned page in size. */
756 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
757 blk_queue_max_segment_size(rq, PAGE_SIZE);
758
759 /* Ensure a merged request will fit in a single I/O ring slot. */
402b27f9 760 blk_queue_max_segments(rq, segments);
9f27ee59
JF
761
762 /* Make sure buffer addresses are sector-aligned. */
763 blk_queue_dma_alignment(rq, 511);
764
1c91fe1a
IC
765 /* Make sure we don't use bounce buffers. */
766 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
767
9f27ee59
JF
768 gd->queue = rq;
769
770 return 0;
771}
772
fdf9b965
VK
773static const char *flush_info(unsigned int feature_flush)
774{
775 switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
776 case REQ_FLUSH|REQ_FUA:
777 return "barrier: enabled;";
778 case REQ_FLUSH:
779 return "flush diskcache: enabled;";
780 default:
781 return "barrier or flush: disabled;";
782 }
783}
9f27ee59 784
4913efe4 785static void xlvbd_flush(struct blkfront_info *info)
9f27ee59 786{
4913efe4 787 blk_queue_flush(info->rq, info->feature_flush);
fdf9b965
VK
788 pr_info("blkfront: %s: %s %s %s %s %s\n",
789 info->gd->disk_name, flush_info(info->feature_flush),
790 "persistent grants:", info->feature_persistent ?
791 "enabled;" : "disabled;", "indirect descriptors:",
792 info->max_indirect_segments ? "enabled;" : "disabled;");
9f27ee59
JF
793}
794
c80a4209
SS
795static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
796{
797 int major;
798 major = BLKIF_MAJOR(vdevice);
799 *minor = BLKIF_MINOR(vdevice);
800 switch (major) {
801 case XEN_IDE0_MAJOR:
802 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
803 *minor = ((*minor / 64) * PARTS_PER_DISK) +
804 EMULATED_HD_DISK_MINOR_OFFSET;
805 break;
806 case XEN_IDE1_MAJOR:
807 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
808 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
809 EMULATED_HD_DISK_MINOR_OFFSET;
810 break;
811 case XEN_SCSI_DISK0_MAJOR:
812 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
813 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
814 break;
815 case XEN_SCSI_DISK1_MAJOR:
816 case XEN_SCSI_DISK2_MAJOR:
817 case XEN_SCSI_DISK3_MAJOR:
818 case XEN_SCSI_DISK4_MAJOR:
819 case XEN_SCSI_DISK5_MAJOR:
820 case XEN_SCSI_DISK6_MAJOR:
821 case XEN_SCSI_DISK7_MAJOR:
822 *offset = (*minor / PARTS_PER_DISK) +
823 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
824 EMULATED_SD_DISK_NAME_OFFSET;
825 *minor = *minor +
826 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
827 EMULATED_SD_DISK_MINOR_OFFSET;
828 break;
829 case XEN_SCSI_DISK8_MAJOR:
830 case XEN_SCSI_DISK9_MAJOR:
831 case XEN_SCSI_DISK10_MAJOR:
832 case XEN_SCSI_DISK11_MAJOR:
833 case XEN_SCSI_DISK12_MAJOR:
834 case XEN_SCSI_DISK13_MAJOR:
835 case XEN_SCSI_DISK14_MAJOR:
836 case XEN_SCSI_DISK15_MAJOR:
837 *offset = (*minor / PARTS_PER_DISK) +
838 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
839 EMULATED_SD_DISK_NAME_OFFSET;
840 *minor = *minor +
841 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
842 EMULATED_SD_DISK_MINOR_OFFSET;
843 break;
844 case XENVBD_MAJOR:
845 *offset = *minor / PARTS_PER_DISK;
846 break;
847 default:
848 printk(KERN_WARNING "blkfront: your disk configuration is "
849 "incorrect, please use an xvd device instead\n");
850 return -ENODEV;
851 }
852 return 0;
853}
9f27ee59 854
e77c78c0
JB
855static char *encode_disk_name(char *ptr, unsigned int n)
856{
857 if (n >= 26)
858 ptr = encode_disk_name(ptr, n / 26 - 1);
859 *ptr = 'a' + n % 26;
860 return ptr + 1;
861}
862
9246b5f0
CL
863static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
864 struct blkfront_info *info,
7c4d7d71
SB
865 u16 vdisk_info, u16 sector_size,
866 unsigned int physical_sector_size)
9f27ee59
JF
867{
868 struct gendisk *gd;
869 int nr_minors = 1;
c80a4209 870 int err;
9246b5f0
CL
871 unsigned int offset;
872 int minor;
873 int nr_parts;
e77c78c0 874 char *ptr;
9f27ee59
JF
875
876 BUG_ON(info->gd != NULL);
877 BUG_ON(info->rq != NULL);
878
9246b5f0
CL
879 if ((info->vdevice>>EXT_SHIFT) > 1) {
880 /* this is above the extended range; something is wrong */
881 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
882 return -ENODEV;
883 }
884
885 if (!VDEV_IS_EXTENDED(info->vdevice)) {
c80a4209
SS
886 err = xen_translate_vdev(info->vdevice, &minor, &offset);
887 if (err)
888 return err;
889 nr_parts = PARTS_PER_DISK;
9246b5f0
CL
890 } else {
891 minor = BLKIF_MINOR_EXT(info->vdevice);
892 nr_parts = PARTS_PER_EXT_DISK;
c80a4209 893 offset = minor / nr_parts;
89153b5c 894 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
c80a4209
SS
895 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
896 "emulated IDE disks,\n\t choose an xvd device name"
897 "from xvde on\n", info->vdevice);
9246b5f0 898 }
e77c78c0
JB
899 if (minor >> MINORBITS) {
900 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
901 info->vdevice, minor);
902 return -ENODEV;
903 }
9246b5f0
CL
904
905 if ((minor % nr_parts) == 0)
906 nr_minors = nr_parts;
9f27ee59 907
0e345826
JB
908 err = xlbd_reserve_minors(minor, nr_minors);
909 if (err)
910 goto out;
911 err = -ENODEV;
912
9f27ee59
JF
913 gd = alloc_disk(nr_minors);
914 if (gd == NULL)
0e345826 915 goto release;
9f27ee59 916
e77c78c0
JB
917 strcpy(gd->disk_name, DEV_NAME);
918 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
919 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
920 if (nr_minors > 1)
921 *ptr = 0;
922 else
923 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
924 "%d", minor & (nr_parts - 1));
9f27ee59
JF
925
926 gd->major = XENVBD_MAJOR;
927 gd->first_minor = minor;
928 gd->fops = &xlvbd_block_fops;
929 gd->private_data = info;
930 gd->driverfs_dev = &(info->xbdev->dev);
931 set_capacity(gd, capacity);
932
7c4d7d71 933 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
402b27f9
RPM
934 info->max_indirect_segments ? :
935 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
9f27ee59 936 del_gendisk(gd);
0e345826 937 goto release;
9f27ee59
JF
938 }
939
940 info->rq = gd->queue;
941 info->gd = gd;
942
4913efe4 943 xlvbd_flush(info);
9f27ee59
JF
944
945 if (vdisk_info & VDISK_READONLY)
946 set_disk_ro(gd, 1);
947
948 if (vdisk_info & VDISK_REMOVABLE)
949 gd->flags |= GENHD_FL_REMOVABLE;
950
951 if (vdisk_info & VDISK_CDROM)
952 gd->flags |= GENHD_FL_CD;
953
954 return 0;
955
0e345826
JB
956 release:
957 xlbd_release_minors(minor, nr_minors);
9f27ee59
JF
958 out:
959 return err;
960}
961
a66b5aeb
DS
962static void xlvbd_release_gendisk(struct blkfront_info *info)
963{
964 unsigned int minor, nr_minors;
a66b5aeb
DS
965
966 if (info->rq == NULL)
967 return;
968
a66b5aeb 969 /* No more blkif_request(). */
907c3eb1 970 blk_mq_stop_hw_queues(info->rq);
a66b5aeb
DS
971
972 /* No more gnttab callback work. */
973 gnttab_cancel_free_callback(&info->callback);
a66b5aeb
DS
974
975 /* Flush gnttab callback work. Must be done with no locks held. */
43829731 976 flush_work(&info->work);
a66b5aeb
DS
977
978 del_gendisk(info->gd);
979
980 minor = info->gd->first_minor;
981 nr_minors = info->gd->minors;
982 xlbd_release_minors(minor, nr_minors);
983
984 blk_cleanup_queue(info->rq);
907c3eb1 985 blk_mq_free_tag_set(&info->tag_set);
a66b5aeb
DS
986 info->rq = NULL;
987
988 put_disk(info->gd);
989 info->gd = NULL;
990}
991
907c3eb1 992/* Must be called with io_lock holded */
9f27ee59
JF
993static void kick_pending_request_queues(struct blkfront_info *info)
994{
907c3eb1
BL
995 if (!RING_FULL(&info->ring))
996 blk_mq_start_stopped_hw_queues(info->rq, true);
9f27ee59
JF
997}
998
999static void blkif_restart_queue(struct work_struct *work)
1000{
1001 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
1002
3467811e 1003 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1004 if (info->connected == BLKIF_STATE_CONNECTED)
1005 kick_pending_request_queues(info);
3467811e 1006 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
1007}
1008
1009static void blkif_free(struct blkfront_info *info, int suspend)
1010{
155b7edb
RPM
1011 struct grant *persistent_gnt;
1012 struct grant *n;
402b27f9 1013 int i, j, segs;
0a8704a5 1014
9f27ee59 1015 /* Prevent new requests being issued until we fix things up. */
3467811e 1016 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1017 info->connected = suspend ?
1018 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1019 /* No more blkif_request(). */
1020 if (info->rq)
907c3eb1 1021 blk_mq_stop_hw_queues(info->rq);
0a8704a5
RPM
1022
1023 /* Remove all persistent grants */
bfe11d6d 1024 if (!list_empty(&info->grants)) {
155b7edb 1025 list_for_each_entry_safe(persistent_gnt, n,
bfe11d6d 1026 &info->grants, node) {
155b7edb 1027 list_del(&persistent_gnt->node);
9c1e050c
RPM
1028 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1029 gnttab_end_foreign_access(persistent_gnt->gref,
1030 0, 0UL);
1031 info->persistent_gnts_c--;
1032 }
bfe11d6d 1033 if (info->feature_persistent)
a7a6df22 1034 __free_page(persistent_gnt->page);
155b7edb 1035 kfree(persistent_gnt);
0a8704a5 1036 }
0a8704a5 1037 }
9c1e050c 1038 BUG_ON(info->persistent_gnts_c != 0);
0a8704a5 1039
bfe11d6d
RPM
1040 /*
1041 * Remove indirect pages, this only happens when using indirect
1042 * descriptors but not persistent grants
1043 */
1044 if (!list_empty(&info->indirect_pages)) {
1045 struct page *indirect_page, *n;
1046
1047 BUG_ON(info->feature_persistent);
1048 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
1049 list_del(&indirect_page->lru);
1050 __free_page(indirect_page);
1051 }
1052 }
1053
86839c56 1054 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
1055 /*
1056 * Clear persistent grants present in requests already
1057 * on the shared ring
1058 */
1059 if (!info->shadow[i].request)
1060 goto free_shadow;
1061
1062 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1063 info->shadow[i].req.u.indirect.nr_segments :
1064 info->shadow[i].req.u.rw.nr_segments;
1065 for (j = 0; j < segs; j++) {
1066 persistent_gnt = info->shadow[i].grants_used[j];
1067 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
bfe11d6d 1068 if (info->feature_persistent)
a7a6df22 1069 __free_page(persistent_gnt->page);
402b27f9
RPM
1070 kfree(persistent_gnt);
1071 }
1072
1073 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1074 /*
1075 * If this is not an indirect operation don't try to
1076 * free indirect segments
1077 */
1078 goto free_shadow;
1079
1080 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1081 persistent_gnt = info->shadow[i].indirect_grants[j];
1082 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
a7a6df22 1083 __free_page(persistent_gnt->page);
402b27f9
RPM
1084 kfree(persistent_gnt);
1085 }
1086
1087free_shadow:
1088 kfree(info->shadow[i].grants_used);
1089 info->shadow[i].grants_used = NULL;
1090 kfree(info->shadow[i].indirect_grants);
1091 info->shadow[i].indirect_grants = NULL;
b7649158
RPM
1092 kfree(info->shadow[i].sg);
1093 info->shadow[i].sg = NULL;
402b27f9
RPM
1094 }
1095
9f27ee59
JF
1096 /* No more gnttab callback work. */
1097 gnttab_cancel_free_callback(&info->callback);
3467811e 1098 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
1099
1100 /* Flush gnttab callback work. Must be done with no locks held. */
43829731 1101 flush_work(&info->work);
9f27ee59
JF
1102
1103 /* Free resources associated with old device channel. */
86839c56
BL
1104 for (i = 0; i < info->nr_ring_pages; i++) {
1105 if (info->ring_ref[i] != GRANT_INVALID_REF) {
1106 gnttab_end_foreign_access(info->ring_ref[i], 0, 0);
1107 info->ring_ref[i] = GRANT_INVALID_REF;
1108 }
9f27ee59 1109 }
86839c56
BL
1110 free_pages((unsigned long)info->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
1111 info->ring.sring = NULL;
1112
9f27ee59
JF
1113 if (info->irq)
1114 unbind_from_irqhandler(info->irq, info);
1115 info->evtchn = info->irq = 0;
1116
1117}
1118
0a8704a5
RPM
1119static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1120 struct blkif_response *bret)
9f27ee59 1121{
d62f6918 1122 int i = 0;
b7649158 1123 struct scatterlist *sg;
0a8704a5
RPM
1124 char *bvec_data;
1125 void *shared_data;
402b27f9
RPM
1126 int nseg;
1127
1128 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
1129 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
0a8704a5 1130
bfe11d6d 1131 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
b7649158
RPM
1132 for_each_sg(s->sg, sg, nseg, i) {
1133 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
a7a6df22 1134 shared_data = kmap_atomic(s->grants_used[i]->page);
b7649158
RPM
1135 bvec_data = kmap_atomic(sg_page(sg));
1136 memcpy(bvec_data + sg->offset,
1137 shared_data + sg->offset,
1138 sg->length);
1139 kunmap_atomic(bvec_data);
0a8704a5 1140 kunmap_atomic(shared_data);
0a8704a5
RPM
1141 }
1142 }
1143 /* Add the persistent grant into the list of free grants */
402b27f9 1144 for (i = 0; i < nseg; i++) {
fbe363c4
RPM
1145 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1146 /*
1147 * If the grant is still mapped by the backend (the
1148 * backend has chosen to make this grant persistent)
1149 * we add it at the head of the list, so it will be
1150 * reused first.
1151 */
bfe11d6d
RPM
1152 if (!info->feature_persistent)
1153 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1154 s->grants_used[i]->gref);
1155 list_add(&s->grants_used[i]->node, &info->grants);
fbe363c4
RPM
1156 info->persistent_gnts_c++;
1157 } else {
1158 /*
1159 * If the grant is not mapped by the backend we end the
1160 * foreign access and add it to the tail of the list,
1161 * so it will not be picked again unless we run out of
1162 * persistent grants.
1163 */
1164 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1165 s->grants_used[i]->gref = GRANT_INVALID_REF;
bfe11d6d 1166 list_add_tail(&s->grants_used[i]->node, &info->grants);
fbe363c4 1167 }
0a8704a5 1168 }
402b27f9
RPM
1169 if (s->req.operation == BLKIF_OP_INDIRECT) {
1170 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
fbe363c4 1171 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
bfe11d6d
RPM
1172 if (!info->feature_persistent)
1173 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1174 s->indirect_grants[i]->gref);
1175 list_add(&s->indirect_grants[i]->node, &info->grants);
fbe363c4
RPM
1176 info->persistent_gnts_c++;
1177 } else {
bfe11d6d
RPM
1178 struct page *indirect_page;
1179
fbe363c4 1180 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
bfe11d6d
RPM
1181 /*
1182 * Add the used indirect page back to the list of
1183 * available pages for indirect grefs.
1184 */
7b076750 1185 if (!info->feature_persistent) {
a7a6df22 1186 indirect_page = s->indirect_grants[i]->page;
7b076750
BL
1187 list_add(&indirect_page->lru, &info->indirect_pages);
1188 }
fbe363c4 1189 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
bfe11d6d 1190 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
fbe363c4 1191 }
402b27f9
RPM
1192 }
1193 }
9f27ee59
JF
1194}
1195
1196static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1197{
1198 struct request *req;
1199 struct blkif_response *bret;
1200 RING_IDX i, rp;
1201 unsigned long flags;
1202 struct blkfront_info *info = (struct blkfront_info *)dev_id;
f4829a9b 1203 int error;
9f27ee59 1204
3467811e 1205 spin_lock_irqsave(&info->io_lock, flags);
9f27ee59
JF
1206
1207 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
3467811e 1208 spin_unlock_irqrestore(&info->io_lock, flags);
9f27ee59
JF
1209 return IRQ_HANDLED;
1210 }
1211
1212 again:
1213 rp = info->ring.sring->rsp_prod;
1214 rmb(); /* Ensure we see queued responses up to 'rp'. */
1215
1216 for (i = info->ring.rsp_cons; i != rp; i++) {
1217 unsigned long id;
9f27ee59
JF
1218
1219 bret = RING_GET_RESPONSE(&info->ring, i);
1220 id = bret->id;
6878c32e
KRW
1221 /*
1222 * The backend has messed up and given us an id that we would
1223 * never have given to it (we stamp it up to BLK_RING_SIZE -
1224 * look in get_id_from_freelist.
1225 */
86839c56 1226 if (id >= BLK_RING_SIZE(info)) {
6878c32e
KRW
1227 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1228 info->gd->disk_name, op_name(bret->operation), id);
1229 /* We can't safely get the 'struct request' as
1230 * the id is busted. */
1231 continue;
1232 }
a945b980 1233 req = info->shadow[id].request;
9f27ee59 1234
5ea42986 1235 if (bret->operation != BLKIF_OP_DISCARD)
0a8704a5 1236 blkif_completion(&info->shadow[id], info, bret);
9f27ee59 1237
6878c32e
KRW
1238 if (add_id_to_freelist(info, id)) {
1239 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1240 info->gd->disk_name, op_name(bret->operation), id);
1241 continue;
1242 }
9f27ee59 1243
f4829a9b 1244 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
9f27ee59 1245 switch (bret->operation) {
ed30bf31
LD
1246 case BLKIF_OP_DISCARD:
1247 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1248 struct request_queue *rq = info->rq;
6878c32e
KRW
1249 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1250 info->gd->disk_name, op_name(bret->operation));
f4829a9b 1251 error = -EOPNOTSUPP;
ed30bf31 1252 info->feature_discard = 0;
5ea42986 1253 info->feature_secdiscard = 0;
ed30bf31 1254 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
5ea42986 1255 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
ed30bf31 1256 }
f4829a9b 1257 blk_mq_complete_request(req, error);
ed30bf31 1258 break;
edf6ef59 1259 case BLKIF_OP_FLUSH_DISKCACHE:
9f27ee59
JF
1260 case BLKIF_OP_WRITE_BARRIER:
1261 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
6878c32e
KRW
1262 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1263 info->gd->disk_name, op_name(bret->operation));
f4829a9b 1264 error = -EOPNOTSUPP;
dcb8baec
JF
1265 }
1266 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
97e36834 1267 info->shadow[id].req.u.rw.nr_segments == 0)) {
6878c32e
KRW
1268 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1269 info->gd->disk_name, op_name(bret->operation));
f4829a9b 1270 error = -EOPNOTSUPP;
dcb8baec 1271 }
f4829a9b
CH
1272 if (unlikely(error)) {
1273 if (error == -EOPNOTSUPP)
1274 error = 0;
4913efe4
TH
1275 info->feature_flush = 0;
1276 xlvbd_flush(info);
9f27ee59
JF
1277 }
1278 /* fall through */
1279 case BLKIF_OP_READ:
1280 case BLKIF_OP_WRITE:
1281 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1282 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1283 "request: %x\n", bret->status);
1284
f4829a9b 1285 blk_mq_complete_request(req, error);
9f27ee59
JF
1286 break;
1287 default:
1288 BUG();
1289 }
1290 }
1291
1292 info->ring.rsp_cons = i;
1293
1294 if (i != info->ring.req_prod_pvt) {
1295 int more_to_do;
1296 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
1297 if (more_to_do)
1298 goto again;
1299 } else
1300 info->ring.sring->rsp_event = i + 1;
1301
1302 kick_pending_request_queues(info);
1303
3467811e 1304 spin_unlock_irqrestore(&info->io_lock, flags);
9f27ee59
JF
1305
1306 return IRQ_HANDLED;
1307}
1308
1309
1310static int setup_blkring(struct xenbus_device *dev,
1311 struct blkfront_info *info)
1312{
1313 struct blkif_sring *sring;
86839c56
BL
1314 int err, i;
1315 unsigned long ring_size = info->nr_ring_pages * PAGE_SIZE;
1316 grant_ref_t gref[XENBUS_MAX_RING_PAGES];
9f27ee59 1317
86839c56
BL
1318 for (i = 0; i < info->nr_ring_pages; i++)
1319 info->ring_ref[i] = GRANT_INVALID_REF;
9f27ee59 1320
86839c56
BL
1321 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1322 get_order(ring_size));
9f27ee59
JF
1323 if (!sring) {
1324 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1325 return -ENOMEM;
1326 }
1327 SHARED_RING_INIT(sring);
86839c56 1328 FRONT_RING_INIT(&info->ring, sring, ring_size);
9e973e64 1329
86839c56 1330 err = xenbus_grant_ring(dev, info->ring.sring, info->nr_ring_pages, gref);
9f27ee59 1331 if (err < 0) {
86839c56 1332 free_pages((unsigned long)sring, get_order(ring_size));
9f27ee59
JF
1333 info->ring.sring = NULL;
1334 goto fail;
1335 }
86839c56
BL
1336 for (i = 0; i < info->nr_ring_pages; i++)
1337 info->ring_ref[i] = gref[i];
9f27ee59
JF
1338
1339 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1340 if (err)
1341 goto fail;
1342
89c30f16
TT
1343 err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
1344 "blkif", info);
9f27ee59
JF
1345 if (err <= 0) {
1346 xenbus_dev_fatal(dev, err,
1347 "bind_evtchn_to_irqhandler failed");
1348 goto fail;
1349 }
1350 info->irq = err;
1351
1352 return 0;
1353fail:
1354 blkif_free(info, 0);
1355 return err;
1356}
1357
1358
1359/* Common code used when first setting up, and when resuming. */
203fd61f 1360static int talk_to_blkback(struct xenbus_device *dev,
9f27ee59
JF
1361 struct blkfront_info *info)
1362{
1363 const char *message = NULL;
1364 struct xenbus_transaction xbt;
86839c56
BL
1365 int err, i;
1366 unsigned int max_page_order = 0;
1367 unsigned int ring_page_order = 0;
1368
1369 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1370 "max-ring-page-order", "%u", &max_page_order);
1371 if (err != 1)
1372 info->nr_ring_pages = 1;
1373 else {
1374 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1375 info->nr_ring_pages = 1 << ring_page_order;
1376 }
9f27ee59
JF
1377
1378 /* Create shared ring, alloc event channel. */
1379 err = setup_blkring(dev, info);
1380 if (err)
1381 goto out;
1382
1383again:
1384 err = xenbus_transaction_start(&xbt);
1385 if (err) {
1386 xenbus_dev_fatal(dev, err, "starting transaction");
1387 goto destroy_blkring;
1388 }
1389
86839c56
BL
1390 if (info->nr_ring_pages == 1) {
1391 err = xenbus_printf(xbt, dev->nodename,
1392 "ring-ref", "%u", info->ring_ref[0]);
1393 if (err) {
1394 message = "writing ring-ref";
1395 goto abort_transaction;
1396 }
1397 } else {
1398 err = xenbus_printf(xbt, dev->nodename,
1399 "ring-page-order", "%u", ring_page_order);
1400 if (err) {
1401 message = "writing ring-page-order";
1402 goto abort_transaction;
1403 }
1404
1405 for (i = 0; i < info->nr_ring_pages; i++) {
1406 char ring_ref_name[RINGREF_NAME_LEN];
1407
1408 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1409 err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
1410 "%u", info->ring_ref[i]);
1411 if (err) {
1412 message = "writing ring-ref";
1413 goto abort_transaction;
1414 }
1415 }
9f27ee59
JF
1416 }
1417 err = xenbus_printf(xbt, dev->nodename,
1418 "event-channel", "%u", info->evtchn);
1419 if (err) {
1420 message = "writing event-channel";
1421 goto abort_transaction;
1422 }
3e334239
MA
1423 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1424 XEN_IO_PROTO_ABI_NATIVE);
1425 if (err) {
1426 message = "writing protocol";
1427 goto abort_transaction;
1428 }
0a8704a5 1429 err = xenbus_printf(xbt, dev->nodename,
cb5bd4d1 1430 "feature-persistent", "%u", 1);
0a8704a5
RPM
1431 if (err)
1432 dev_warn(&dev->dev,
1433 "writing persistent grants feature to xenbus");
9f27ee59
JF
1434
1435 err = xenbus_transaction_end(xbt, 0);
1436 if (err) {
1437 if (err == -EAGAIN)
1438 goto again;
1439 xenbus_dev_fatal(dev, err, "completing transaction");
1440 goto destroy_blkring;
1441 }
1442
86839c56
BL
1443 for (i = 0; i < BLK_RING_SIZE(info); i++)
1444 info->shadow[i].req.u.rw.id = i+1;
1445 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
9f27ee59
JF
1446 xenbus_switch_state(dev, XenbusStateInitialised);
1447
1448 return 0;
1449
1450 abort_transaction:
1451 xenbus_transaction_end(xbt, 1);
1452 if (message)
1453 xenbus_dev_fatal(dev, err, "%s", message);
1454 destroy_blkring:
1455 blkif_free(info, 0);
1456 out:
1457 return err;
1458}
1459
9f27ee59
JF
1460/**
1461 * Entry point to this code when a new device is created. Allocate the basic
1462 * structures and the ring buffer for communication with the backend, and
1463 * inform the backend of the appropriate details for those. Switch to
1464 * Initialised state.
1465 */
1466static int blkfront_probe(struct xenbus_device *dev,
1467 const struct xenbus_device_id *id)
1468{
86839c56 1469 int err, vdevice;
9f27ee59
JF
1470 struct blkfront_info *info;
1471
1472 /* FIXME: Use dynamic device id if this is not set. */
1473 err = xenbus_scanf(XBT_NIL, dev->nodename,
1474 "virtual-device", "%i", &vdevice);
1475 if (err != 1) {
9246b5f0
CL
1476 /* go looking in the extended area instead */
1477 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1478 "%i", &vdevice);
1479 if (err != 1) {
1480 xenbus_dev_fatal(dev, err, "reading virtual-device");
1481 return err;
1482 }
9f27ee59
JF
1483 }
1484
b98a409b
SS
1485 if (xen_hvm_domain()) {
1486 char *type;
1487 int len;
1488 /* no unplug has been done: do not hook devices != xen vbds */
51c71a3b 1489 if (xen_has_pv_and_legacy_disk_devices()) {
b98a409b
SS
1490 int major;
1491
1492 if (!VDEV_IS_EXTENDED(vdevice))
1493 major = BLKIF_MAJOR(vdevice);
1494 else
1495 major = XENVBD_MAJOR;
1496
1497 if (major != XENVBD_MAJOR) {
1498 printk(KERN_INFO
1499 "%s: HVM does not support vbd %d as xen block device\n",
02f1f217 1500 __func__, vdevice);
b98a409b
SS
1501 return -ENODEV;
1502 }
1503 }
1504 /* do not create a PV cdrom device if we are an HVM guest */
1505 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1506 if (IS_ERR(type))
1507 return -ENODEV;
1508 if (strncmp(type, "cdrom", 5) == 0) {
1509 kfree(type);
c1c5413a
SS
1510 return -ENODEV;
1511 }
b98a409b 1512 kfree(type);
c1c5413a 1513 }
9f27ee59
JF
1514 info = kzalloc(sizeof(*info), GFP_KERNEL);
1515 if (!info) {
1516 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1517 return -ENOMEM;
1518 }
1519
b70f5fa0 1520 mutex_init(&info->mutex);
3467811e 1521 spin_lock_init(&info->io_lock);
9f27ee59
JF
1522 info->xbdev = dev;
1523 info->vdevice = vdevice;
bfe11d6d
RPM
1524 INIT_LIST_HEAD(&info->grants);
1525 INIT_LIST_HEAD(&info->indirect_pages);
0a8704a5 1526 info->persistent_gnts_c = 0;
9f27ee59
JF
1527 info->connected = BLKIF_STATE_DISCONNECTED;
1528 INIT_WORK(&info->work, blkif_restart_queue);
1529
9f27ee59
JF
1530 /* Front end dir is a number, which is used as the id. */
1531 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
a1b4b12b 1532 dev_set_drvdata(&dev->dev, info);
9f27ee59 1533
9f27ee59
JF
1534 return 0;
1535}
1536
4246a0b6 1537static void split_bio_end(struct bio *bio)
402b27f9
RPM
1538{
1539 struct split_bio *split_bio = bio->bi_private;
1540
402b27f9
RPM
1541 if (atomic_dec_and_test(&split_bio->pending)) {
1542 split_bio->bio->bi_phys_segments = 0;
4246a0b6
CH
1543 split_bio->bio->bi_error = bio->bi_error;
1544 bio_endio(split_bio->bio);
402b27f9
RPM
1545 kfree(split_bio);
1546 }
1547 bio_put(bio);
1548}
9f27ee59
JF
1549
1550static int blkif_recover(struct blkfront_info *info)
1551{
1552 int i;
402b27f9 1553 struct request *req, *n;
9f27ee59 1554 struct blk_shadow *copy;
402b27f9
RPM
1555 int rc;
1556 struct bio *bio, *cloned_bio;
1557 struct bio_list bio_list, merge_bio;
1558 unsigned int segs, offset;
1559 int pending, size;
1560 struct split_bio *split_bio;
1561 struct list_head requests;
9f27ee59
JF
1562
1563 /* Stage 1: Make a safe copy of the shadow state. */
29d0b218 1564 copy = kmemdup(info->shadow, sizeof(info->shadow),
a144ff09 1565 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
9f27ee59
JF
1566 if (!copy)
1567 return -ENOMEM;
9f27ee59
JF
1568
1569 /* Stage 2: Set up free list. */
1570 memset(&info->shadow, 0, sizeof(info->shadow));
86839c56 1571 for (i = 0; i < BLK_RING_SIZE(info); i++)
97e36834 1572 info->shadow[i].req.u.rw.id = i+1;
9f27ee59 1573 info->shadow_free = info->ring.req_prod_pvt;
86839c56 1574 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
9f27ee59 1575
d50babbe 1576 rc = blkfront_gather_backend_features(info);
402b27f9
RPM
1577 if (rc) {
1578 kfree(copy);
1579 return rc;
1580 }
1581
1582 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1583 blk_queue_max_segments(info->rq, segs);
1584 bio_list_init(&bio_list);
1585 INIT_LIST_HEAD(&requests);
86839c56 1586 for (i = 0; i < BLK_RING_SIZE(info); i++) {
9f27ee59 1587 /* Not in use? */
a945b980 1588 if (!copy[i].request)
9f27ee59
JF
1589 continue;
1590
402b27f9
RPM
1591 /*
1592 * Get the bios in the request so we can re-queue them.
1593 */
1594 if (copy[i].request->cmd_flags &
1595 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1596 /*
1597 * Flush operations don't contain bios, so
1598 * we need to requeue the whole request
1599 */
1600 list_add(&copy[i].request->queuelist, &requests);
1601 continue;
5ea42986 1602 }
402b27f9
RPM
1603 merge_bio.head = copy[i].request->bio;
1604 merge_bio.tail = copy[i].request->biotail;
1605 bio_list_merge(&bio_list, &merge_bio);
1606 copy[i].request->bio = NULL;
3bb8c98e 1607 blk_end_request_all(copy[i].request, 0);
9f27ee59
JF
1608 }
1609
1610 kfree(copy);
1611
1612 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1613
3467811e 1614 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1615
1616 /* Now safe for us to use the shared ring */
1617 info->connected = BLKIF_STATE_CONNECTED;
1618
9f27ee59
JF
1619 /* Kick any other new requests queued since we resumed */
1620 kick_pending_request_queues(info);
1621
402b27f9
RPM
1622 list_for_each_entry_safe(req, n, &requests, queuelist) {
1623 /* Requeue pending requests (flush or discard) */
1624 list_del_init(&req->queuelist);
1625 BUG_ON(req->nr_phys_segments > segs);
907c3eb1 1626 blk_mq_requeue_request(req);
402b27f9 1627 }
3467811e 1628 spin_unlock_irq(&info->io_lock);
907c3eb1 1629 blk_mq_kick_requeue_list(info->rq);
9f27ee59 1630
402b27f9
RPM
1631 while ((bio = bio_list_pop(&bio_list)) != NULL) {
1632 /* Traverse the list of pending bios and re-queue them */
1633 if (bio_segments(bio) > segs) {
1634 /*
1635 * This bio has more segments than what we can
1636 * handle, we have to split it.
1637 */
1638 pending = (bio_segments(bio) + segs - 1) / segs;
1639 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
1640 BUG_ON(split_bio == NULL);
1641 atomic_set(&split_bio->pending, pending);
1642 split_bio->bio = bio;
1643 for (i = 0; i < pending; i++) {
1644 offset = (i * segs * PAGE_SIZE) >> 9;
1645 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
4f024f37 1646 (unsigned int)bio_sectors(bio) - offset);
402b27f9
RPM
1647 cloned_bio = bio_clone(bio, GFP_NOIO);
1648 BUG_ON(cloned_bio == NULL);
6678d83f 1649 bio_trim(cloned_bio, offset, size);
402b27f9
RPM
1650 cloned_bio->bi_private = split_bio;
1651 cloned_bio->bi_end_io = split_bio_end;
1652 submit_bio(cloned_bio->bi_rw, cloned_bio);
1653 }
1654 /*
1655 * Now we have to wait for all those smaller bios to
1656 * end, so we can also end the "parent" bio.
1657 */
1658 continue;
1659 }
1660 /* We don't need to split this bio */
1661 submit_bio(bio->bi_rw, bio);
1662 }
1663
9f27ee59
JF
1664 return 0;
1665}
1666
1667/**
1668 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1669 * driver restart. We tear down our blkif structure and recreate it, but
1670 * leave the device-layer structures intact so that this is transparent to the
1671 * rest of the kernel.
1672 */
1673static int blkfront_resume(struct xenbus_device *dev)
1674{
a1b4b12b 1675 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
9f27ee59
JF
1676 int err;
1677
1678 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
1679
1680 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
1681
203fd61f 1682 err = talk_to_blkback(dev, info);
402b27f9
RPM
1683
1684 /*
1685 * We have to wait for the backend to switch to
1686 * connected state, since we want to read which
1687 * features it supports.
1688 */
9f27ee59
JF
1689
1690 return err;
1691}
1692
b70f5fa0
DS
1693static void
1694blkfront_closing(struct blkfront_info *info)
1695{
1696 struct xenbus_device *xbdev = info->xbdev;
1697 struct block_device *bdev = NULL;
1698
1699 mutex_lock(&info->mutex);
1700
1701 if (xbdev->state == XenbusStateClosing) {
1702 mutex_unlock(&info->mutex);
1703 return;
1704 }
1705
1706 if (info->gd)
1707 bdev = bdget_disk(info->gd, 0);
1708
1709 mutex_unlock(&info->mutex);
1710
1711 if (!bdev) {
1712 xenbus_frontend_closed(xbdev);
1713 return;
1714 }
1715
1716 mutex_lock(&bdev->bd_mutex);
1717
7b32d104 1718 if (bdev->bd_openers) {
b70f5fa0
DS
1719 xenbus_dev_error(xbdev, -EBUSY,
1720 "Device in use; refusing to close");
1721 xenbus_switch_state(xbdev, XenbusStateClosing);
1722 } else {
1723 xlvbd_release_gendisk(info);
1724 xenbus_frontend_closed(xbdev);
1725 }
1726
1727 mutex_unlock(&bdev->bd_mutex);
1728 bdput(bdev);
1729}
9f27ee59 1730
ed30bf31
LD
1731static void blkfront_setup_discard(struct blkfront_info *info)
1732{
1733 int err;
ed30bf31
LD
1734 unsigned int discard_granularity;
1735 unsigned int discard_alignment;
5ea42986 1736 unsigned int discard_secure;
ed30bf31 1737
1c8cad6c
OH
1738 info->feature_discard = 1;
1739 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1740 "discard-granularity", "%u", &discard_granularity,
1741 "discard-alignment", "%u", &discard_alignment,
1742 NULL);
1743 if (!err) {
1744 info->discard_granularity = discard_granularity;
1745 info->discard_alignment = discard_alignment;
1746 }
1747 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1748 "discard-secure", "%d", &discard_secure,
1749 NULL);
1750 if (!err)
1751 info->feature_secdiscard = !!discard_secure;
ed30bf31
LD
1752}
1753
402b27f9
RPM
1754static int blkfront_setup_indirect(struct blkfront_info *info)
1755{
d50babbe 1756 unsigned int segs;
402b27f9
RPM
1757 int err, i;
1758
d50babbe 1759 if (info->max_indirect_segments == 0)
402b27f9 1760 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
d50babbe 1761 else
402b27f9 1762 segs = info->max_indirect_segments;
402b27f9 1763
86839c56 1764 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
402b27f9
RPM
1765 if (err)
1766 goto out_of_memory;
1767
bfe11d6d
RPM
1768 if (!info->feature_persistent && info->max_indirect_segments) {
1769 /*
1770 * We are using indirect descriptors but not persistent
1771 * grants, we need to allocate a set of pages that can be
1772 * used for mapping indirect grefs
1773 */
86839c56 1774 int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE(info);
bfe11d6d
RPM
1775
1776 BUG_ON(!list_empty(&info->indirect_pages));
1777 for (i = 0; i < num; i++) {
1778 struct page *indirect_page = alloc_page(GFP_NOIO);
1779 if (!indirect_page)
1780 goto out_of_memory;
1781 list_add(&indirect_page->lru, &info->indirect_pages);
1782 }
1783 }
1784
86839c56 1785 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
1786 info->shadow[i].grants_used = kzalloc(
1787 sizeof(info->shadow[i].grants_used[0]) * segs,
1788 GFP_NOIO);
b7649158 1789 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
402b27f9
RPM
1790 if (info->max_indirect_segments)
1791 info->shadow[i].indirect_grants = kzalloc(
1792 sizeof(info->shadow[i].indirect_grants[0]) *
1793 INDIRECT_GREFS(segs),
1794 GFP_NOIO);
1795 if ((info->shadow[i].grants_used == NULL) ||
b7649158 1796 (info->shadow[i].sg == NULL) ||
402b27f9
RPM
1797 (info->max_indirect_segments &&
1798 (info->shadow[i].indirect_grants == NULL)))
1799 goto out_of_memory;
b7649158 1800 sg_init_table(info->shadow[i].sg, segs);
402b27f9
RPM
1801 }
1802
1803
1804 return 0;
1805
1806out_of_memory:
86839c56 1807 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
1808 kfree(info->shadow[i].grants_used);
1809 info->shadow[i].grants_used = NULL;
b7649158
RPM
1810 kfree(info->shadow[i].sg);
1811 info->shadow[i].sg = NULL;
402b27f9
RPM
1812 kfree(info->shadow[i].indirect_grants);
1813 info->shadow[i].indirect_grants = NULL;
1814 }
bfe11d6d
RPM
1815 if (!list_empty(&info->indirect_pages)) {
1816 struct page *indirect_page, *n;
1817 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
1818 list_del(&indirect_page->lru);
1819 __free_page(indirect_page);
1820 }
1821 }
402b27f9
RPM
1822 return -ENOMEM;
1823}
1824
d50babbe
BL
1825/*
1826 * Gather all backend feature-*
1827 */
1828static int blkfront_gather_backend_features(struct blkfront_info *info)
1829{
1830 int err;
1831 int barrier, flush, discard, persistent;
1832 unsigned int indirect_segments;
1833
1834 info->feature_flush = 0;
1835
1836 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1837 "feature-barrier", "%d", &barrier,
1838 NULL);
1839
1840 /*
1841 * If there's no "feature-barrier" defined, then it means
1842 * we're dealing with a very old backend which writes
1843 * synchronously; nothing to do.
1844 *
1845 * If there are barriers, then we use flush.
1846 */
1847 if (!err && barrier)
1848 info->feature_flush = REQ_FLUSH | REQ_FUA;
1849 /*
1850 * And if there is "feature-flush-cache" use that above
1851 * barriers.
1852 */
1853 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1854 "feature-flush-cache", "%d", &flush,
1855 NULL);
1856
1857 if (!err && flush)
1858 info->feature_flush = REQ_FLUSH;
1859
1860 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1861 "feature-discard", "%d", &discard,
1862 NULL);
1863
1864 if (!err && discard)
1865 blkfront_setup_discard(info);
1866
1867 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1868 "feature-persistent", "%u", &persistent,
1869 NULL);
1870 if (err)
1871 info->feature_persistent = 0;
1872 else
1873 info->feature_persistent = persistent;
1874
1875 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1876 "feature-max-indirect-segments", "%u", &indirect_segments,
1877 NULL);
1878 if (err)
1879 info->max_indirect_segments = 0;
1880 else
1881 info->max_indirect_segments = min(indirect_segments,
1882 xen_blkif_max_segments);
1883
1884 return blkfront_setup_indirect(info);
1885}
1886
9f27ee59
JF
1887/*
1888 * Invoked when the backend is finally 'ready' (and has told produced
1889 * the details about the physical device - #sectors, size, etc).
1890 */
1891static void blkfront_connect(struct blkfront_info *info)
1892{
1893 unsigned long long sectors;
1894 unsigned long sector_size;
7c4d7d71 1895 unsigned int physical_sector_size;
9f27ee59
JF
1896 unsigned int binfo;
1897 int err;
1898
1fa73be6
S
1899 switch (info->connected) {
1900 case BLKIF_STATE_CONNECTED:
1901 /*
1902 * Potentially, the back-end may be signalling
1903 * a capacity change; update the capacity.
1904 */
1905 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1906 "sectors", "%Lu", &sectors);
1907 if (XENBUS_EXIST_ERR(err))
1908 return;
1909 printk(KERN_INFO "Setting capacity to %Lu\n",
1910 sectors);
1911 set_capacity(info->gd, sectors);
2def141e 1912 revalidate_disk(info->gd);
1fa73be6 1913
402b27f9 1914 return;
1fa73be6 1915 case BLKIF_STATE_SUSPENDED:
402b27f9
RPM
1916 /*
1917 * If we are recovering from suspension, we need to wait
1918 * for the backend to announce it's features before
1919 * reconnecting, at least we need to know if the backend
1920 * supports indirect descriptors, and how many.
1921 */
1922 blkif_recover(info);
9f27ee59
JF
1923 return;
1924
b4dddb49
JF
1925 default:
1926 break;
1fa73be6 1927 }
9f27ee59
JF
1928
1929 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
1930 __func__, info->xbdev->otherend);
1931
1932 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1933 "sectors", "%llu", &sectors,
1934 "info", "%u", &binfo,
1935 "sector-size", "%lu", &sector_size,
1936 NULL);
1937 if (err) {
1938 xenbus_dev_fatal(info->xbdev, err,
1939 "reading backend fields at %s",
1940 info->xbdev->otherend);
1941 return;
1942 }
1943
7c4d7d71
SB
1944 /*
1945 * physcial-sector-size is a newer field, so old backends may not
1946 * provide this. Assume physical sector size to be the same as
1947 * sector_size in that case.
1948 */
1949 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1950 "physical-sector-size", "%u", &physical_sector_size);
1951 if (err != 1)
1952 physical_sector_size = sector_size;
1953
d50babbe 1954 err = blkfront_gather_backend_features(info);
402b27f9
RPM
1955 if (err) {
1956 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1957 info->xbdev->otherend);
1958 return;
1959 }
1960
7c4d7d71
SB
1961 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
1962 physical_sector_size);
9f27ee59
JF
1963 if (err) {
1964 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1965 info->xbdev->otherend);
1966 return;
1967 }
1968
1969 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1970
1971 /* Kick pending requests. */
3467811e 1972 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1973 info->connected = BLKIF_STATE_CONNECTED;
1974 kick_pending_request_queues(info);
3467811e 1975 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
1976
1977 add_disk(info->gd);
1d78d705
CL
1978
1979 info->is_ready = 1;
9f27ee59
JF
1980}
1981
9f27ee59
JF
1982/**
1983 * Callback received when the backend's state changes.
1984 */
203fd61f 1985static void blkback_changed(struct xenbus_device *dev,
9f27ee59
JF
1986 enum xenbus_state backend_state)
1987{
a1b4b12b 1988 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
9f27ee59 1989
203fd61f 1990 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
9f27ee59
JF
1991
1992 switch (backend_state) {
9f27ee59 1993 case XenbusStateInitWait:
a9b54bb9
BL
1994 if (dev->state != XenbusStateInitialising)
1995 break;
8ab0144a
BL
1996 if (talk_to_blkback(dev, info)) {
1997 kfree(info);
1998 dev_set_drvdata(&dev->dev, NULL);
1999 break;
2000 }
2001 case XenbusStateInitialising:
9f27ee59 2002 case XenbusStateInitialised:
b78c9512
NI
2003 case XenbusStateReconfiguring:
2004 case XenbusStateReconfigured:
9f27ee59 2005 case XenbusStateUnknown:
9f27ee59
JF
2006 break;
2007
2008 case XenbusStateConnected:
2009 blkfront_connect(info);
2010 break;
2011
36613717
DV
2012 case XenbusStateClosed:
2013 if (dev->state == XenbusStateClosed)
2014 break;
2015 /* Missed the backend's Closing state -- fallthrough */
9f27ee59 2016 case XenbusStateClosing:
b70f5fa0 2017 blkfront_closing(info);
9f27ee59
JF
2018 break;
2019 }
2020}
2021
fa1bd359 2022static int blkfront_remove(struct xenbus_device *xbdev)
9f27ee59 2023{
fa1bd359
DS
2024 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2025 struct block_device *bdev = NULL;
2026 struct gendisk *disk;
9f27ee59 2027
fa1bd359 2028 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
9f27ee59
JF
2029
2030 blkif_free(info, 0);
2031
fa1bd359
DS
2032 mutex_lock(&info->mutex);
2033
2034 disk = info->gd;
2035 if (disk)
2036 bdev = bdget_disk(disk, 0);
2037
2038 info->xbdev = NULL;
2039 mutex_unlock(&info->mutex);
2040
2041 if (!bdev) {
2042 kfree(info);
2043 return 0;
2044 }
2045
2046 /*
2047 * The xbdev was removed before we reached the Closed
2048 * state. See if it's safe to remove the disk. If the bdev
2049 * isn't closed yet, we let release take care of it.
2050 */
2051
2052 mutex_lock(&bdev->bd_mutex);
2053 info = disk->private_data;
2054
d54142c7
DS
2055 dev_warn(disk_to_dev(disk),
2056 "%s was hot-unplugged, %d stale handles\n",
2057 xbdev->nodename, bdev->bd_openers);
2058
7b32d104 2059 if (info && !bdev->bd_openers) {
fa1bd359
DS
2060 xlvbd_release_gendisk(info);
2061 disk->private_data = NULL;
0e345826 2062 kfree(info);
fa1bd359
DS
2063 }
2064
2065 mutex_unlock(&bdev->bd_mutex);
2066 bdput(bdev);
9f27ee59
JF
2067
2068 return 0;
2069}
2070
1d78d705
CL
2071static int blkfront_is_ready(struct xenbus_device *dev)
2072{
a1b4b12b 2073 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1d78d705 2074
5d7ed20e 2075 return info->is_ready && info->xbdev;
1d78d705
CL
2076}
2077
a63c848b 2078static int blkif_open(struct block_device *bdev, fmode_t mode)
9f27ee59 2079{
13961743
DS
2080 struct gendisk *disk = bdev->bd_disk;
2081 struct blkfront_info *info;
2082 int err = 0;
6e9624b8 2083
2a48fc0a 2084 mutex_lock(&blkfront_mutex);
6e9624b8 2085
13961743
DS
2086 info = disk->private_data;
2087 if (!info) {
2088 /* xbdev gone */
2089 err = -ERESTARTSYS;
2090 goto out;
2091 }
2092
2093 mutex_lock(&info->mutex);
2094
2095 if (!info->gd)
2096 /* xbdev is closed */
2097 err = -ERESTARTSYS;
2098
2099 mutex_unlock(&info->mutex);
2100
13961743 2101out:
2a48fc0a 2102 mutex_unlock(&blkfront_mutex);
13961743 2103 return err;
9f27ee59
JF
2104}
2105
db2a144b 2106static void blkif_release(struct gendisk *disk, fmode_t mode)
9f27ee59 2107{
a63c848b 2108 struct blkfront_info *info = disk->private_data;
7fd152f4
DS
2109 struct block_device *bdev;
2110 struct xenbus_device *xbdev;
2111
2a48fc0a 2112 mutex_lock(&blkfront_mutex);
7fd152f4
DS
2113
2114 bdev = bdget_disk(disk, 0);
7fd152f4 2115
2f089cb8
FP
2116 if (!bdev) {
2117 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2118 goto out_mutex;
2119 }
acfca3c6
DS
2120 if (bdev->bd_openers)
2121 goto out;
2122
7fd152f4
DS
2123 /*
2124 * Check if we have been instructed to close. We will have
2125 * deferred this request, because the bdev was still open.
2126 */
2127
2128 mutex_lock(&info->mutex);
2129 xbdev = info->xbdev;
2130
2131 if (xbdev && xbdev->state == XenbusStateClosing) {
2132 /* pending switch to state closed */
d54142c7 2133 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
7fd152f4
DS
2134 xlvbd_release_gendisk(info);
2135 xenbus_frontend_closed(info->xbdev);
2136 }
2137
2138 mutex_unlock(&info->mutex);
2139
2140 if (!xbdev) {
2141 /* sudden device removal */
d54142c7 2142 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
7fd152f4
DS
2143 xlvbd_release_gendisk(info);
2144 disk->private_data = NULL;
2145 kfree(info);
9f27ee59 2146 }
7fd152f4 2147
a4cc14ec 2148out:
dad5cf65 2149 bdput(bdev);
2f089cb8 2150out_mutex:
2a48fc0a 2151 mutex_unlock(&blkfront_mutex);
9f27ee59
JF
2152}
2153
83d5cde4 2154static const struct block_device_operations xlvbd_block_fops =
9f27ee59
JF
2155{
2156 .owner = THIS_MODULE,
a63c848b
AV
2157 .open = blkif_open,
2158 .release = blkif_release,
597592d9 2159 .getgeo = blkif_getgeo,
8a6cfeb6 2160 .ioctl = blkif_ioctl,
9f27ee59
JF
2161};
2162
2163
ec9c42ec 2164static const struct xenbus_device_id blkfront_ids[] = {
9f27ee59
JF
2165 { "vbd" },
2166 { "" }
2167};
2168
95afae48
DV
2169static struct xenbus_driver blkfront_driver = {
2170 .ids = blkfront_ids,
9f27ee59
JF
2171 .probe = blkfront_probe,
2172 .remove = blkfront_remove,
2173 .resume = blkfront_resume,
203fd61f 2174 .otherend_changed = blkback_changed,
1d78d705 2175 .is_ready = blkfront_is_ready,
95afae48 2176};
9f27ee59
JF
2177
2178static int __init xlblk_init(void)
2179{
469738e6
LE
2180 int ret;
2181
6e833587 2182 if (!xen_domain())
9f27ee59
JF
2183 return -ENODEV;
2184
86839c56
BL
2185 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
2186 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2187 xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
2188 xen_blkif_max_ring_order = 0;
2189 }
2190
51c71a3b 2191 if (!xen_has_pv_disk_devices())
b9136d20
IM
2192 return -ENODEV;
2193
9f27ee59
JF
2194 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2195 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2196 XENVBD_MAJOR, DEV_NAME);
2197 return -ENODEV;
2198 }
2199
73db144b 2200 ret = xenbus_register_frontend(&blkfront_driver);
469738e6
LE
2201 if (ret) {
2202 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2203 return ret;
2204 }
2205
2206 return 0;
9f27ee59
JF
2207}
2208module_init(xlblk_init);
2209
2210
5a60d0cd 2211static void __exit xlblk_exit(void)
9f27ee59 2212{
8605067f
JB
2213 xenbus_unregister_driver(&blkfront_driver);
2214 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2215 kfree(minors);
9f27ee59
JF
2216}
2217module_exit(xlblk_exit);
2218
2219MODULE_DESCRIPTION("Xen virtual block device frontend");
2220MODULE_LICENSE("GPL");
2221MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
d2f0c52b 2222MODULE_ALIAS("xen:vbd");
4f93f09b 2223MODULE_ALIAS("xenblk");