]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/xen-blkfront.c
arm/xen: implement correctly pfn_to_mfn
[mirror_ubuntu-bionic-kernel.git] / drivers / block / xen-blkfront.c
CommitLineData
9f27ee59
JF
1/*
2 * blkfront.c
3 *
4 * XenLinux virtual block device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/interrupt.h>
39#include <linux/blkdev.h>
907c3eb1 40#include <linux/blk-mq.h>
597592d9 41#include <linux/hdreg.h>
440a01a7 42#include <linux/cdrom.h>
9f27ee59 43#include <linux/module.h>
5a0e3ad6 44#include <linux/slab.h>
2a48fc0a 45#include <linux/mutex.h>
9e973e64 46#include <linux/scatterlist.h>
34ae2e47 47#include <linux/bitmap.h>
155b7edb 48#include <linux/list.h>
9f27ee59 49
1ccbf534 50#include <xen/xen.h>
9f27ee59
JF
51#include <xen/xenbus.h>
52#include <xen/grant_table.h>
53#include <xen/events.h>
54#include <xen/page.h>
c1c5413a 55#include <xen/platform_pci.h>
9f27ee59
JF
56
57#include <xen/interface/grant_table.h>
58#include <xen/interface/io/blkif.h>
3e334239 59#include <xen/interface/io/protocols.h>
9f27ee59
JF
60
61#include <asm/xen/hypervisor.h>
62
63enum blkif_state {
64 BLKIF_STATE_DISCONNECTED,
65 BLKIF_STATE_CONNECTED,
66 BLKIF_STATE_SUSPENDED,
67};
68
0a8704a5
RPM
69struct grant {
70 grant_ref_t gref;
71 unsigned long pfn;
155b7edb 72 struct list_head node;
0a8704a5
RPM
73};
74
9f27ee59
JF
75struct blk_shadow {
76 struct blkif_request req;
a945b980 77 struct request *request;
402b27f9
RPM
78 struct grant **grants_used;
79 struct grant **indirect_grants;
b7649158 80 struct scatterlist *sg;
402b27f9
RPM
81};
82
83struct split_bio {
84 struct bio *bio;
85 atomic_t pending;
86 int err;
9f27ee59
JF
87};
88
2a48fc0a 89static DEFINE_MUTEX(blkfront_mutex);
83d5cde4 90static const struct block_device_operations xlvbd_block_fops;
9f27ee59 91
402b27f9
RPM
92/*
93 * Maximum number of segments in indirect requests, the actual value used by
94 * the frontend driver is the minimum of this value and the value provided
95 * by the backend driver.
96 */
97
98static unsigned int xen_blkif_max_segments = 32;
2d5dc3ba
KRW
99module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
100MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
402b27f9 101
86839c56
BL
102/*
103 * Maximum order of pages to be used for the shared ring between front and
104 * backend, 4KB page granularity is used.
105 */
106static unsigned int xen_blkif_max_ring_order;
107module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
108MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
109
110#define BLK_RING_SIZE(info) __CONST_RING_SIZE(blkif, PAGE_SIZE * (info)->nr_ring_pages)
111#define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE * XENBUS_MAX_RING_PAGES)
112/*
113 * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
114 * characters are enough. Define to 20 to keep consist with backend.
115 */
116#define RINGREF_NAME_LEN (20)
9f27ee59
JF
117
118/*
119 * We have one of these per vbd, whether ide, scsi or 'other'. They
120 * hang in private_data off the gendisk structure. We may end up
121 * putting all kinds of interesting stuff here :-)
122 */
123struct blkfront_info
124{
3467811e 125 spinlock_t io_lock;
b70f5fa0 126 struct mutex mutex;
9f27ee59 127 struct xenbus_device *xbdev;
9f27ee59
JF
128 struct gendisk *gd;
129 int vdevice;
130 blkif_vdev_t handle;
131 enum blkif_state connected;
86839c56
BL
132 int ring_ref[XENBUS_MAX_RING_PAGES];
133 unsigned int nr_ring_pages;
9f27ee59
JF
134 struct blkif_front_ring ring;
135 unsigned int evtchn, irq;
136 struct request_queue *rq;
137 struct work_struct work;
138 struct gnttab_free_callback callback;
86839c56 139 struct blk_shadow shadow[BLK_MAX_RING_SIZE];
bfe11d6d
RPM
140 struct list_head grants;
141 struct list_head indirect_pages;
0a8704a5 142 unsigned int persistent_gnts_c;
9f27ee59 143 unsigned long shadow_free;
4913efe4 144 unsigned int feature_flush;
5ea42986
KRW
145 unsigned int feature_discard:1;
146 unsigned int feature_secdiscard:1;
ed30bf31
LD
147 unsigned int discard_granularity;
148 unsigned int discard_alignment;
0a8704a5 149 unsigned int feature_persistent:1;
402b27f9 150 unsigned int max_indirect_segments;
1d78d705 151 int is_ready;
907c3eb1 152 struct blk_mq_tag_set tag_set;
9f27ee59
JF
153};
154
0e345826
JB
155static unsigned int nr_minors;
156static unsigned long *minors;
157static DEFINE_SPINLOCK(minor_lock);
158
9f27ee59
JF
159#define GRANT_INVALID_REF 0
160
161#define PARTS_PER_DISK 16
9246b5f0 162#define PARTS_PER_EXT_DISK 256
9f27ee59
JF
163
164#define BLKIF_MAJOR(dev) ((dev)>>8)
165#define BLKIF_MINOR(dev) ((dev) & 0xff)
166
9246b5f0
CL
167#define EXT_SHIFT 28
168#define EXTENDED (1<<EXT_SHIFT)
169#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
170#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
c80a4209
SS
171#define EMULATED_HD_DISK_MINOR_OFFSET (0)
172#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
196cfe2a
SB
173#define EMULATED_SD_DISK_MINOR_OFFSET (0)
174#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
9f27ee59 175
9246b5f0 176#define DEV_NAME "xvd" /* name in /dev */
9f27ee59 177
402b27f9 178#define SEGS_PER_INDIRECT_FRAME \
80bfa2f6 179 (PAGE_SIZE/sizeof(struct blkif_request_segment))
402b27f9
RPM
180#define INDIRECT_GREFS(_segs) \
181 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
182
183static int blkfront_setup_indirect(struct blkfront_info *info);
d50babbe 184static int blkfront_gather_backend_features(struct blkfront_info *info);
402b27f9 185
9f27ee59
JF
186static int get_id_from_freelist(struct blkfront_info *info)
187{
188 unsigned long free = info->shadow_free;
86839c56 189 BUG_ON(free >= BLK_RING_SIZE(info));
97e36834
KRW
190 info->shadow_free = info->shadow[free].req.u.rw.id;
191 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
9f27ee59
JF
192 return free;
193}
194
6878c32e 195static int add_id_to_freelist(struct blkfront_info *info,
9f27ee59
JF
196 unsigned long id)
197{
6878c32e
KRW
198 if (info->shadow[id].req.u.rw.id != id)
199 return -EINVAL;
200 if (info->shadow[id].request == NULL)
201 return -EINVAL;
97e36834 202 info->shadow[id].req.u.rw.id = info->shadow_free;
a945b980 203 info->shadow[id].request = NULL;
9f27ee59 204 info->shadow_free = id;
6878c32e 205 return 0;
9f27ee59
JF
206}
207
9c1e050c
RPM
208static int fill_grant_buffer(struct blkfront_info *info, int num)
209{
210 struct page *granted_page;
211 struct grant *gnt_list_entry, *n;
212 int i = 0;
213
214 while(i < num) {
215 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
216 if (!gnt_list_entry)
217 goto out_of_memory;
218
bfe11d6d
RPM
219 if (info->feature_persistent) {
220 granted_page = alloc_page(GFP_NOIO);
221 if (!granted_page) {
222 kfree(gnt_list_entry);
223 goto out_of_memory;
224 }
225 gnt_list_entry->pfn = page_to_pfn(granted_page);
9c1e050c
RPM
226 }
227
9c1e050c 228 gnt_list_entry->gref = GRANT_INVALID_REF;
bfe11d6d 229 list_add(&gnt_list_entry->node, &info->grants);
9c1e050c
RPM
230 i++;
231 }
232
233 return 0;
234
235out_of_memory:
236 list_for_each_entry_safe(gnt_list_entry, n,
bfe11d6d 237 &info->grants, node) {
9c1e050c 238 list_del(&gnt_list_entry->node);
bfe11d6d
RPM
239 if (info->feature_persistent)
240 __free_page(pfn_to_page(gnt_list_entry->pfn));
9c1e050c
RPM
241 kfree(gnt_list_entry);
242 i--;
243 }
244 BUG_ON(i != 0);
245 return -ENOMEM;
246}
247
248static struct grant *get_grant(grant_ref_t *gref_head,
bfe11d6d 249 unsigned long pfn,
9c1e050c
RPM
250 struct blkfront_info *info)
251{
252 struct grant *gnt_list_entry;
253 unsigned long buffer_mfn;
254
bfe11d6d
RPM
255 BUG_ON(list_empty(&info->grants));
256 gnt_list_entry = list_first_entry(&info->grants, struct grant,
9c1e050c
RPM
257 node);
258 list_del(&gnt_list_entry->node);
259
260 if (gnt_list_entry->gref != GRANT_INVALID_REF) {
261 info->persistent_gnts_c--;
262 return gnt_list_entry;
263 }
264
265 /* Assign a gref to this page */
266 gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
267 BUG_ON(gnt_list_entry->gref == -ENOSPC);
bfe11d6d
RPM
268 if (!info->feature_persistent) {
269 BUG_ON(!pfn);
270 gnt_list_entry->pfn = pfn;
271 }
9c1e050c
RPM
272 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
273 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
274 info->xbdev->otherend_id,
275 buffer_mfn, 0);
276 return gnt_list_entry;
277}
278
6878c32e
KRW
279static const char *op_name(int op)
280{
281 static const char *const names[] = {
282 [BLKIF_OP_READ] = "read",
283 [BLKIF_OP_WRITE] = "write",
284 [BLKIF_OP_WRITE_BARRIER] = "barrier",
285 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
286 [BLKIF_OP_DISCARD] = "discard" };
287
288 if (op < 0 || op >= ARRAY_SIZE(names))
289 return "unknown";
290
291 if (!names[op])
292 return "reserved";
293
294 return names[op];
295}
0e345826
JB
296static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
297{
298 unsigned int end = minor + nr;
299 int rc;
300
301 if (end > nr_minors) {
302 unsigned long *bitmap, *old;
303
f094148a 304 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
0e345826
JB
305 GFP_KERNEL);
306 if (bitmap == NULL)
307 return -ENOMEM;
308
309 spin_lock(&minor_lock);
310 if (end > nr_minors) {
311 old = minors;
312 memcpy(bitmap, minors,
313 BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
314 minors = bitmap;
315 nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
316 } else
317 old = bitmap;
318 spin_unlock(&minor_lock);
319 kfree(old);
320 }
321
322 spin_lock(&minor_lock);
323 if (find_next_bit(minors, end, minor) >= end) {
34ae2e47 324 bitmap_set(minors, minor, nr);
0e345826
JB
325 rc = 0;
326 } else
327 rc = -EBUSY;
328 spin_unlock(&minor_lock);
329
330 return rc;
331}
332
333static void xlbd_release_minors(unsigned int minor, unsigned int nr)
334{
335 unsigned int end = minor + nr;
336
337 BUG_ON(end > nr_minors);
338 spin_lock(&minor_lock);
34ae2e47 339 bitmap_clear(minors, minor, nr);
0e345826
JB
340 spin_unlock(&minor_lock);
341}
342
9f27ee59
JF
343static void blkif_restart_queue_callback(void *arg)
344{
345 struct blkfront_info *info = (struct blkfront_info *)arg;
346 schedule_work(&info->work);
347}
348
afe42d7d 349static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
597592d9
IC
350{
351 /* We don't have real geometry info, but let's at least return
352 values consistent with the size of the device */
353 sector_t nsect = get_capacity(bd->bd_disk);
354 sector_t cylinders = nsect;
355
356 hg->heads = 0xff;
357 hg->sectors = 0x3f;
358 sector_div(cylinders, hg->heads * hg->sectors);
359 hg->cylinders = cylinders;
360 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
361 hg->cylinders = 0xffff;
362 return 0;
363}
364
a63c848b 365static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
62aa0054 366 unsigned command, unsigned long argument)
440a01a7 367{
a63c848b 368 struct blkfront_info *info = bdev->bd_disk->private_data;
440a01a7
CL
369 int i;
370
371 dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
372 command, (long)argument);
373
374 switch (command) {
375 case CDROMMULTISESSION:
376 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
377 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
378 if (put_user(0, (char __user *)(argument + i)))
379 return -EFAULT;
380 return 0;
381
382 case CDROM_GET_CAPABILITY: {
383 struct gendisk *gd = info->gd;
384 if (gd->flags & GENHD_FL_CD)
385 return 0;
386 return -EINVAL;
387 }
388
389 default:
390 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
391 command);*/
392 return -EINVAL; /* same return as native Linux */
393 }
394
395 return 0;
396}
397
9f27ee59 398/*
c64e38ea 399 * Generate a Xen blkfront IO request from a blk layer request. Reads
edf6ef59 400 * and writes are handled as expected.
9f27ee59 401 *
c64e38ea 402 * @req: a request struct
9f27ee59
JF
403 */
404static int blkif_queue_request(struct request *req)
405{
406 struct blkfront_info *info = req->rq_disk->private_data;
9f27ee59 407 struct blkif_request *ring_req;
9f27ee59
JF
408 unsigned long id;
409 unsigned int fsect, lsect;
402b27f9 410 int i, ref, n;
80bfa2f6 411 struct blkif_request_segment *segments = NULL;
0a8704a5
RPM
412
413 /*
414 * Used to store if we are able to queue the request by just using
415 * existing persistent grants, or if we have to get new grants,
416 * as there are not sufficiently many free.
417 */
418 bool new_persistent_gnts;
9f27ee59 419 grant_ref_t gref_head;
0a8704a5 420 struct grant *gnt_list_entry = NULL;
9e973e64 421 struct scatterlist *sg;
402b27f9 422 int nseg, max_grefs;
9f27ee59
JF
423
424 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
425 return 1;
426
c47206e2
RPM
427 max_grefs = req->nr_phys_segments;
428 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
429 /*
430 * If we are using indirect segments we need to account
431 * for the indirect grefs used in the request.
432 */
433 max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
402b27f9
RPM
434
435 /* Check if we have enough grants to allocate a requests */
436 if (info->persistent_gnts_c < max_grefs) {
0a8704a5
RPM
437 new_persistent_gnts = 1;
438 if (gnttab_alloc_grant_references(
402b27f9 439 max_grefs - info->persistent_gnts_c,
0a8704a5
RPM
440 &gref_head) < 0) {
441 gnttab_request_free_callback(
442 &info->callback,
443 blkif_restart_queue_callback,
444 info,
402b27f9 445 max_grefs);
0a8704a5
RPM
446 return 1;
447 }
448 } else
449 new_persistent_gnts = 0;
9f27ee59
JF
450
451 /* Fill out a communications ring structure. */
452 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
453 id = get_id_from_freelist(info);
a945b980 454 info->shadow[id].request = req;
9f27ee59 455
5ea42986 456 if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
ed30bf31 457 ring_req->operation = BLKIF_OP_DISCARD;
ed30bf31 458 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
402b27f9
RPM
459 ring_req->u.discard.id = id;
460 ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
5ea42986
KRW
461 if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
462 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
463 else
464 ring_req->u.discard.flag = 0;
ed30bf31 465 } else {
402b27f9
RPM
466 BUG_ON(info->max_indirect_segments == 0 &&
467 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
468 BUG_ON(info->max_indirect_segments &&
469 req->nr_phys_segments > info->max_indirect_segments);
b7649158 470 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
402b27f9
RPM
471 ring_req->u.rw.id = id;
472 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
473 /*
474 * The indirect operation can only be a BLKIF_OP_READ or
475 * BLKIF_OP_WRITE
476 */
477 BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
478 ring_req->operation = BLKIF_OP_INDIRECT;
479 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
480 BLKIF_OP_WRITE : BLKIF_OP_READ;
481 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
482 ring_req->u.indirect.handle = info->handle;
483 ring_req->u.indirect.nr_segments = nseg;
484 } else {
485 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
486 ring_req->u.rw.handle = info->handle;
487 ring_req->operation = rq_data_dir(req) ?
488 BLKIF_OP_WRITE : BLKIF_OP_READ;
489 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
490 /*
491 * Ideally we can do an unordered flush-to-disk. In case the
492 * backend onlysupports barriers, use that. A barrier request
493 * a superset of FUA, so we can implement it the same
494 * way. (It's also a FLUSH+FUA, since it is
495 * guaranteed ordered WRT previous writes.)
496 */
fdf9b965
VK
497 switch (info->feature_flush &
498 ((REQ_FLUSH|REQ_FUA))) {
499 case REQ_FLUSH|REQ_FUA:
500 ring_req->operation =
501 BLKIF_OP_WRITE_BARRIER;
502 break;
503 case REQ_FLUSH:
504 ring_req->operation =
505 BLKIF_OP_FLUSH_DISKCACHE;
506 break;
507 default:
508 ring_req->operation = 0;
509 }
402b27f9
RPM
510 }
511 ring_req->u.rw.nr_segments = nseg;
512 }
b7649158 513 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
ed30bf31
LD
514 fsect = sg->offset >> 9;
515 lsect = fsect + (sg->length >> 9) - 1;
6c92e699 516
402b27f9
RPM
517 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
518 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
427bfe07 519 unsigned long uninitialized_var(pfn);
bfe11d6d 520
402b27f9
RPM
521 if (segments)
522 kunmap_atomic(segments);
523
524 n = i / SEGS_PER_INDIRECT_FRAME;
bfe11d6d
RPM
525 if (!info->feature_persistent) {
526 struct page *indirect_page;
527
528 /* Fetch a pre-allocated page to use for indirect grefs */
529 BUG_ON(list_empty(&info->indirect_pages));
530 indirect_page = list_first_entry(&info->indirect_pages,
531 struct page, lru);
532 list_del(&indirect_page->lru);
533 pfn = page_to_pfn(indirect_page);
534 }
535 gnt_list_entry = get_grant(&gref_head, pfn, info);
402b27f9
RPM
536 info->shadow[id].indirect_grants[n] = gnt_list_entry;
537 segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
538 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
539 }
540
bfe11d6d 541 gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
9c1e050c 542 ref = gnt_list_entry->gref;
0a8704a5
RPM
543
544 info->shadow[id].grants_used[i] = gnt_list_entry;
545
bfe11d6d 546 if (rq_data_dir(req) && info->feature_persistent) {
0a8704a5
RPM
547 char *bvec_data;
548 void *shared_data;
549
550 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
551
402b27f9 552 shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
0a8704a5
RPM
553 bvec_data = kmap_atomic(sg_page(sg));
554
555 /*
556 * this does not wipe data stored outside the
557 * range sg->offset..sg->offset+sg->length.
558 * Therefore, blkback *could* see data from
559 * previous requests. This is OK as long as
560 * persistent grants are shared with just one
561 * domain. It may need refactoring if this
562 * changes
563 */
564 memcpy(shared_data + sg->offset,
565 bvec_data + sg->offset,
566 sg->length);
567
568 kunmap_atomic(bvec_data);
569 kunmap_atomic(shared_data);
570 }
402b27f9
RPM
571 if (ring_req->operation != BLKIF_OP_INDIRECT) {
572 ring_req->u.rw.seg[i] =
573 (struct blkif_request_segment) {
574 .gref = ref,
575 .first_sect = fsect,
576 .last_sect = lsect };
577 } else {
578 n = i % SEGS_PER_INDIRECT_FRAME;
579 segments[n] =
80bfa2f6 580 (struct blkif_request_segment) {
402b27f9
RPM
581 .gref = ref,
582 .first_sect = fsect,
583 .last_sect = lsect };
584 }
ed30bf31 585 }
402b27f9
RPM
586 if (segments)
587 kunmap_atomic(segments);
9f27ee59
JF
588 }
589
590 info->ring.req_prod_pvt++;
591
592 /* Keep a private copy so we can reissue requests when recovering. */
593 info->shadow[id].req = *ring_req;
594
0a8704a5
RPM
595 if (new_persistent_gnts)
596 gnttab_free_grant_references(gref_head);
9f27ee59
JF
597
598 return 0;
599}
600
601
602static inline void flush_requests(struct blkfront_info *info)
603{
604 int notify;
605
606 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
607
608 if (notify)
609 notify_remote_via_irq(info->irq);
610}
611
ad42d391
VK
612static inline bool blkif_request_flush_invalid(struct request *req,
613 struct blkfront_info *info)
0f1ca65e
AA
614{
615 return ((req->cmd_type != REQ_TYPE_FS) ||
ad42d391
VK
616 ((req->cmd_flags & REQ_FLUSH) &&
617 !(info->feature_flush & REQ_FLUSH)) ||
618 ((req->cmd_flags & REQ_FUA) &&
619 !(info->feature_flush & REQ_FUA)));
0f1ca65e
AA
620}
621
907c3eb1
BL
622static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
623 const struct blk_mq_queue_data *qd)
9f27ee59 624{
907c3eb1 625 struct blkfront_info *info = qd->rq->rq_disk->private_data;
9f27ee59 626
907c3eb1
BL
627 blk_mq_start_request(qd->rq);
628 spin_lock_irq(&info->io_lock);
629 if (RING_FULL(&info->ring))
630 goto out_busy;
9f27ee59 631
907c3eb1
BL
632 if (blkif_request_flush_invalid(qd->rq, info))
633 goto out_err;
296b2f6a 634
907c3eb1
BL
635 if (blkif_queue_request(qd->rq))
636 goto out_busy;
296b2f6a 637
907c3eb1
BL
638 flush_requests(info);
639 spin_unlock_irq(&info->io_lock);
640 return BLK_MQ_RQ_QUEUE_OK;
9f27ee59 641
907c3eb1
BL
642out_err:
643 spin_unlock_irq(&info->io_lock);
644 return BLK_MQ_RQ_QUEUE_ERROR;
9f27ee59 645
907c3eb1
BL
646out_busy:
647 spin_unlock_irq(&info->io_lock);
648 blk_mq_stop_hw_queue(hctx);
649 return BLK_MQ_RQ_QUEUE_BUSY;
9f27ee59
JF
650}
651
907c3eb1
BL
652static struct blk_mq_ops blkfront_mq_ops = {
653 .queue_rq = blkif_queue_rq,
654 .map_queue = blk_mq_map_queue,
655};
656
402b27f9 657static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
7c4d7d71 658 unsigned int physical_sector_size,
402b27f9 659 unsigned int segments)
9f27ee59 660{
165125e1 661 struct request_queue *rq;
ed30bf31 662 struct blkfront_info *info = gd->private_data;
9f27ee59 663
907c3eb1
BL
664 memset(&info->tag_set, 0, sizeof(info->tag_set));
665 info->tag_set.ops = &blkfront_mq_ops;
666 info->tag_set.nr_hw_queues = 1;
667 info->tag_set.queue_depth = BLK_RING_SIZE(info);
668 info->tag_set.numa_node = NUMA_NO_NODE;
669 info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
670 info->tag_set.cmd_size = 0;
671 info->tag_set.driver_data = info;
672
673 if (blk_mq_alloc_tag_set(&info->tag_set))
9f27ee59 674 return -1;
907c3eb1
BL
675 rq = blk_mq_init_queue(&info->tag_set);
676 if (IS_ERR(rq)) {
677 blk_mq_free_tag_set(&info->tag_set);
678 return -1;
679 }
9f27ee59 680
66d352e1 681 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
9f27ee59 682
ed30bf31
LD
683 if (info->feature_discard) {
684 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
685 blk_queue_max_discard_sectors(rq, get_capacity(gd));
686 rq->limits.discard_granularity = info->discard_granularity;
687 rq->limits.discard_alignment = info->discard_alignment;
5ea42986
KRW
688 if (info->feature_secdiscard)
689 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
ed30bf31
LD
690 }
691
9f27ee59 692 /* Hard sector size and max sectors impersonate the equiv. hardware. */
e1defc4f 693 blk_queue_logical_block_size(rq, sector_size);
7c4d7d71 694 blk_queue_physical_block_size(rq, physical_sector_size);
294caaf2 695 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
9f27ee59
JF
696
697 /* Each segment in a request is up to an aligned page in size. */
698 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
699 blk_queue_max_segment_size(rq, PAGE_SIZE);
700
701 /* Ensure a merged request will fit in a single I/O ring slot. */
402b27f9 702 blk_queue_max_segments(rq, segments);
9f27ee59
JF
703
704 /* Make sure buffer addresses are sector-aligned. */
705 blk_queue_dma_alignment(rq, 511);
706
1c91fe1a
IC
707 /* Make sure we don't use bounce buffers. */
708 blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
709
9f27ee59
JF
710 gd->queue = rq;
711
712 return 0;
713}
714
fdf9b965
VK
715static const char *flush_info(unsigned int feature_flush)
716{
717 switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
718 case REQ_FLUSH|REQ_FUA:
719 return "barrier: enabled;";
720 case REQ_FLUSH:
721 return "flush diskcache: enabled;";
722 default:
723 return "barrier or flush: disabled;";
724 }
725}
9f27ee59 726
4913efe4 727static void xlvbd_flush(struct blkfront_info *info)
9f27ee59 728{
4913efe4 729 blk_queue_flush(info->rq, info->feature_flush);
fdf9b965
VK
730 pr_info("blkfront: %s: %s %s %s %s %s\n",
731 info->gd->disk_name, flush_info(info->feature_flush),
732 "persistent grants:", info->feature_persistent ?
733 "enabled;" : "disabled;", "indirect descriptors:",
734 info->max_indirect_segments ? "enabled;" : "disabled;");
9f27ee59
JF
735}
736
c80a4209
SS
737static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
738{
739 int major;
740 major = BLKIF_MAJOR(vdevice);
741 *minor = BLKIF_MINOR(vdevice);
742 switch (major) {
743 case XEN_IDE0_MAJOR:
744 *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
745 *minor = ((*minor / 64) * PARTS_PER_DISK) +
746 EMULATED_HD_DISK_MINOR_OFFSET;
747 break;
748 case XEN_IDE1_MAJOR:
749 *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
750 *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
751 EMULATED_HD_DISK_MINOR_OFFSET;
752 break;
753 case XEN_SCSI_DISK0_MAJOR:
754 *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
755 *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
756 break;
757 case XEN_SCSI_DISK1_MAJOR:
758 case XEN_SCSI_DISK2_MAJOR:
759 case XEN_SCSI_DISK3_MAJOR:
760 case XEN_SCSI_DISK4_MAJOR:
761 case XEN_SCSI_DISK5_MAJOR:
762 case XEN_SCSI_DISK6_MAJOR:
763 case XEN_SCSI_DISK7_MAJOR:
764 *offset = (*minor / PARTS_PER_DISK) +
765 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
766 EMULATED_SD_DISK_NAME_OFFSET;
767 *minor = *minor +
768 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
769 EMULATED_SD_DISK_MINOR_OFFSET;
770 break;
771 case XEN_SCSI_DISK8_MAJOR:
772 case XEN_SCSI_DISK9_MAJOR:
773 case XEN_SCSI_DISK10_MAJOR:
774 case XEN_SCSI_DISK11_MAJOR:
775 case XEN_SCSI_DISK12_MAJOR:
776 case XEN_SCSI_DISK13_MAJOR:
777 case XEN_SCSI_DISK14_MAJOR:
778 case XEN_SCSI_DISK15_MAJOR:
779 *offset = (*minor / PARTS_PER_DISK) +
780 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
781 EMULATED_SD_DISK_NAME_OFFSET;
782 *minor = *minor +
783 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
784 EMULATED_SD_DISK_MINOR_OFFSET;
785 break;
786 case XENVBD_MAJOR:
787 *offset = *minor / PARTS_PER_DISK;
788 break;
789 default:
790 printk(KERN_WARNING "blkfront: your disk configuration is "
791 "incorrect, please use an xvd device instead\n");
792 return -ENODEV;
793 }
794 return 0;
795}
9f27ee59 796
e77c78c0
JB
797static char *encode_disk_name(char *ptr, unsigned int n)
798{
799 if (n >= 26)
800 ptr = encode_disk_name(ptr, n / 26 - 1);
801 *ptr = 'a' + n % 26;
802 return ptr + 1;
803}
804
9246b5f0
CL
805static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
806 struct blkfront_info *info,
7c4d7d71
SB
807 u16 vdisk_info, u16 sector_size,
808 unsigned int physical_sector_size)
9f27ee59
JF
809{
810 struct gendisk *gd;
811 int nr_minors = 1;
c80a4209 812 int err;
9246b5f0
CL
813 unsigned int offset;
814 int minor;
815 int nr_parts;
e77c78c0 816 char *ptr;
9f27ee59
JF
817
818 BUG_ON(info->gd != NULL);
819 BUG_ON(info->rq != NULL);
820
9246b5f0
CL
821 if ((info->vdevice>>EXT_SHIFT) > 1) {
822 /* this is above the extended range; something is wrong */
823 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
824 return -ENODEV;
825 }
826
827 if (!VDEV_IS_EXTENDED(info->vdevice)) {
c80a4209
SS
828 err = xen_translate_vdev(info->vdevice, &minor, &offset);
829 if (err)
830 return err;
831 nr_parts = PARTS_PER_DISK;
9246b5f0
CL
832 } else {
833 minor = BLKIF_MINOR_EXT(info->vdevice);
834 nr_parts = PARTS_PER_EXT_DISK;
c80a4209 835 offset = minor / nr_parts;
89153b5c 836 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
c80a4209
SS
837 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
838 "emulated IDE disks,\n\t choose an xvd device name"
839 "from xvde on\n", info->vdevice);
9246b5f0 840 }
e77c78c0
JB
841 if (minor >> MINORBITS) {
842 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
843 info->vdevice, minor);
844 return -ENODEV;
845 }
9246b5f0
CL
846
847 if ((minor % nr_parts) == 0)
848 nr_minors = nr_parts;
9f27ee59 849
0e345826
JB
850 err = xlbd_reserve_minors(minor, nr_minors);
851 if (err)
852 goto out;
853 err = -ENODEV;
854
9f27ee59
JF
855 gd = alloc_disk(nr_minors);
856 if (gd == NULL)
0e345826 857 goto release;
9f27ee59 858
e77c78c0
JB
859 strcpy(gd->disk_name, DEV_NAME);
860 ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
861 BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
862 if (nr_minors > 1)
863 *ptr = 0;
864 else
865 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
866 "%d", minor & (nr_parts - 1));
9f27ee59
JF
867
868 gd->major = XENVBD_MAJOR;
869 gd->first_minor = minor;
870 gd->fops = &xlvbd_block_fops;
871 gd->private_data = info;
872 gd->driverfs_dev = &(info->xbdev->dev);
873 set_capacity(gd, capacity);
874
7c4d7d71 875 if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
402b27f9
RPM
876 info->max_indirect_segments ? :
877 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
9f27ee59 878 del_gendisk(gd);
0e345826 879 goto release;
9f27ee59
JF
880 }
881
882 info->rq = gd->queue;
883 info->gd = gd;
884
4913efe4 885 xlvbd_flush(info);
9f27ee59
JF
886
887 if (vdisk_info & VDISK_READONLY)
888 set_disk_ro(gd, 1);
889
890 if (vdisk_info & VDISK_REMOVABLE)
891 gd->flags |= GENHD_FL_REMOVABLE;
892
893 if (vdisk_info & VDISK_CDROM)
894 gd->flags |= GENHD_FL_CD;
895
896 return 0;
897
0e345826
JB
898 release:
899 xlbd_release_minors(minor, nr_minors);
9f27ee59
JF
900 out:
901 return err;
902}
903
a66b5aeb
DS
904static void xlvbd_release_gendisk(struct blkfront_info *info)
905{
906 unsigned int minor, nr_minors;
a66b5aeb
DS
907
908 if (info->rq == NULL)
909 return;
910
a66b5aeb 911 /* No more blkif_request(). */
907c3eb1 912 blk_mq_stop_hw_queues(info->rq);
a66b5aeb
DS
913
914 /* No more gnttab callback work. */
915 gnttab_cancel_free_callback(&info->callback);
a66b5aeb
DS
916
917 /* Flush gnttab callback work. Must be done with no locks held. */
43829731 918 flush_work(&info->work);
a66b5aeb
DS
919
920 del_gendisk(info->gd);
921
922 minor = info->gd->first_minor;
923 nr_minors = info->gd->minors;
924 xlbd_release_minors(minor, nr_minors);
925
926 blk_cleanup_queue(info->rq);
907c3eb1 927 blk_mq_free_tag_set(&info->tag_set);
a66b5aeb
DS
928 info->rq = NULL;
929
930 put_disk(info->gd);
931 info->gd = NULL;
932}
933
907c3eb1 934/* Must be called with io_lock holded */
9f27ee59
JF
935static void kick_pending_request_queues(struct blkfront_info *info)
936{
907c3eb1
BL
937 if (!RING_FULL(&info->ring))
938 blk_mq_start_stopped_hw_queues(info->rq, true);
9f27ee59
JF
939}
940
941static void blkif_restart_queue(struct work_struct *work)
942{
943 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
944
3467811e 945 spin_lock_irq(&info->io_lock);
9f27ee59
JF
946 if (info->connected == BLKIF_STATE_CONNECTED)
947 kick_pending_request_queues(info);
3467811e 948 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
949}
950
951static void blkif_free(struct blkfront_info *info, int suspend)
952{
155b7edb
RPM
953 struct grant *persistent_gnt;
954 struct grant *n;
402b27f9 955 int i, j, segs;
0a8704a5 956
9f27ee59 957 /* Prevent new requests being issued until we fix things up. */
3467811e 958 spin_lock_irq(&info->io_lock);
9f27ee59
JF
959 info->connected = suspend ?
960 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
961 /* No more blkif_request(). */
962 if (info->rq)
907c3eb1 963 blk_mq_stop_hw_queues(info->rq);
0a8704a5
RPM
964
965 /* Remove all persistent grants */
bfe11d6d 966 if (!list_empty(&info->grants)) {
155b7edb 967 list_for_each_entry_safe(persistent_gnt, n,
bfe11d6d 968 &info->grants, node) {
155b7edb 969 list_del(&persistent_gnt->node);
9c1e050c
RPM
970 if (persistent_gnt->gref != GRANT_INVALID_REF) {
971 gnttab_end_foreign_access(persistent_gnt->gref,
972 0, 0UL);
973 info->persistent_gnts_c--;
974 }
bfe11d6d
RPM
975 if (info->feature_persistent)
976 __free_page(pfn_to_page(persistent_gnt->pfn));
155b7edb 977 kfree(persistent_gnt);
0a8704a5 978 }
0a8704a5 979 }
9c1e050c 980 BUG_ON(info->persistent_gnts_c != 0);
0a8704a5 981
bfe11d6d
RPM
982 /*
983 * Remove indirect pages, this only happens when using indirect
984 * descriptors but not persistent grants
985 */
986 if (!list_empty(&info->indirect_pages)) {
987 struct page *indirect_page, *n;
988
989 BUG_ON(info->feature_persistent);
990 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
991 list_del(&indirect_page->lru);
992 __free_page(indirect_page);
993 }
994 }
995
86839c56 996 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
997 /*
998 * Clear persistent grants present in requests already
999 * on the shared ring
1000 */
1001 if (!info->shadow[i].request)
1002 goto free_shadow;
1003
1004 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1005 info->shadow[i].req.u.indirect.nr_segments :
1006 info->shadow[i].req.u.rw.nr_segments;
1007 for (j = 0; j < segs; j++) {
1008 persistent_gnt = info->shadow[i].grants_used[j];
1009 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
bfe11d6d
RPM
1010 if (info->feature_persistent)
1011 __free_page(pfn_to_page(persistent_gnt->pfn));
402b27f9
RPM
1012 kfree(persistent_gnt);
1013 }
1014
1015 if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1016 /*
1017 * If this is not an indirect operation don't try to
1018 * free indirect segments
1019 */
1020 goto free_shadow;
1021
1022 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1023 persistent_gnt = info->shadow[i].indirect_grants[j];
1024 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1025 __free_page(pfn_to_page(persistent_gnt->pfn));
1026 kfree(persistent_gnt);
1027 }
1028
1029free_shadow:
1030 kfree(info->shadow[i].grants_used);
1031 info->shadow[i].grants_used = NULL;
1032 kfree(info->shadow[i].indirect_grants);
1033 info->shadow[i].indirect_grants = NULL;
b7649158
RPM
1034 kfree(info->shadow[i].sg);
1035 info->shadow[i].sg = NULL;
402b27f9
RPM
1036 }
1037
9f27ee59
JF
1038 /* No more gnttab callback work. */
1039 gnttab_cancel_free_callback(&info->callback);
3467811e 1040 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
1041
1042 /* Flush gnttab callback work. Must be done with no locks held. */
43829731 1043 flush_work(&info->work);
9f27ee59
JF
1044
1045 /* Free resources associated with old device channel. */
86839c56
BL
1046 for (i = 0; i < info->nr_ring_pages; i++) {
1047 if (info->ring_ref[i] != GRANT_INVALID_REF) {
1048 gnttab_end_foreign_access(info->ring_ref[i], 0, 0);
1049 info->ring_ref[i] = GRANT_INVALID_REF;
1050 }
9f27ee59 1051 }
86839c56
BL
1052 free_pages((unsigned long)info->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
1053 info->ring.sring = NULL;
1054
9f27ee59
JF
1055 if (info->irq)
1056 unbind_from_irqhandler(info->irq, info);
1057 info->evtchn = info->irq = 0;
1058
1059}
1060
0a8704a5
RPM
1061static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1062 struct blkif_response *bret)
9f27ee59 1063{
d62f6918 1064 int i = 0;
b7649158 1065 struct scatterlist *sg;
0a8704a5
RPM
1066 char *bvec_data;
1067 void *shared_data;
402b27f9
RPM
1068 int nseg;
1069
1070 nseg = s->req.operation == BLKIF_OP_INDIRECT ?
1071 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
0a8704a5 1072
bfe11d6d 1073 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
b7649158
RPM
1074 for_each_sg(s->sg, sg, nseg, i) {
1075 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
0a8704a5
RPM
1076 shared_data = kmap_atomic(
1077 pfn_to_page(s->grants_used[i]->pfn));
b7649158
RPM
1078 bvec_data = kmap_atomic(sg_page(sg));
1079 memcpy(bvec_data + sg->offset,
1080 shared_data + sg->offset,
1081 sg->length);
1082 kunmap_atomic(bvec_data);
0a8704a5 1083 kunmap_atomic(shared_data);
0a8704a5
RPM
1084 }
1085 }
1086 /* Add the persistent grant into the list of free grants */
402b27f9 1087 for (i = 0; i < nseg; i++) {
fbe363c4
RPM
1088 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1089 /*
1090 * If the grant is still mapped by the backend (the
1091 * backend has chosen to make this grant persistent)
1092 * we add it at the head of the list, so it will be
1093 * reused first.
1094 */
bfe11d6d
RPM
1095 if (!info->feature_persistent)
1096 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1097 s->grants_used[i]->gref);
1098 list_add(&s->grants_used[i]->node, &info->grants);
fbe363c4
RPM
1099 info->persistent_gnts_c++;
1100 } else {
1101 /*
1102 * If the grant is not mapped by the backend we end the
1103 * foreign access and add it to the tail of the list,
1104 * so it will not be picked again unless we run out of
1105 * persistent grants.
1106 */
1107 gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1108 s->grants_used[i]->gref = GRANT_INVALID_REF;
bfe11d6d 1109 list_add_tail(&s->grants_used[i]->node, &info->grants);
fbe363c4 1110 }
0a8704a5 1111 }
402b27f9
RPM
1112 if (s->req.operation == BLKIF_OP_INDIRECT) {
1113 for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
fbe363c4 1114 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
bfe11d6d
RPM
1115 if (!info->feature_persistent)
1116 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1117 s->indirect_grants[i]->gref);
1118 list_add(&s->indirect_grants[i]->node, &info->grants);
fbe363c4
RPM
1119 info->persistent_gnts_c++;
1120 } else {
bfe11d6d
RPM
1121 struct page *indirect_page;
1122
fbe363c4 1123 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
bfe11d6d
RPM
1124 /*
1125 * Add the used indirect page back to the list of
1126 * available pages for indirect grefs.
1127 */
7b076750
BL
1128 if (!info->feature_persistent) {
1129 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1130 list_add(&indirect_page->lru, &info->indirect_pages);
1131 }
fbe363c4 1132 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
bfe11d6d 1133 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
fbe363c4 1134 }
402b27f9
RPM
1135 }
1136 }
9f27ee59
JF
1137}
1138
1139static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1140{
1141 struct request *req;
1142 struct blkif_response *bret;
1143 RING_IDX i, rp;
1144 unsigned long flags;
1145 struct blkfront_info *info = (struct blkfront_info *)dev_id;
9f27ee59 1146
3467811e 1147 spin_lock_irqsave(&info->io_lock, flags);
9f27ee59
JF
1148
1149 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
3467811e 1150 spin_unlock_irqrestore(&info->io_lock, flags);
9f27ee59
JF
1151 return IRQ_HANDLED;
1152 }
1153
1154 again:
1155 rp = info->ring.sring->rsp_prod;
1156 rmb(); /* Ensure we see queued responses up to 'rp'. */
1157
1158 for (i = info->ring.rsp_cons; i != rp; i++) {
1159 unsigned long id;
9f27ee59
JF
1160
1161 bret = RING_GET_RESPONSE(&info->ring, i);
1162 id = bret->id;
6878c32e
KRW
1163 /*
1164 * The backend has messed up and given us an id that we would
1165 * never have given to it (we stamp it up to BLK_RING_SIZE -
1166 * look in get_id_from_freelist.
1167 */
86839c56 1168 if (id >= BLK_RING_SIZE(info)) {
6878c32e
KRW
1169 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1170 info->gd->disk_name, op_name(bret->operation), id);
1171 /* We can't safely get the 'struct request' as
1172 * the id is busted. */
1173 continue;
1174 }
a945b980 1175 req = info->shadow[id].request;
9f27ee59 1176
5ea42986 1177 if (bret->operation != BLKIF_OP_DISCARD)
0a8704a5 1178 blkif_completion(&info->shadow[id], info, bret);
9f27ee59 1179
6878c32e
KRW
1180 if (add_id_to_freelist(info, id)) {
1181 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1182 info->gd->disk_name, op_name(bret->operation), id);
1183 continue;
1184 }
9f27ee59 1185
907c3eb1 1186 req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
9f27ee59 1187 switch (bret->operation) {
ed30bf31
LD
1188 case BLKIF_OP_DISCARD:
1189 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1190 struct request_queue *rq = info->rq;
6878c32e
KRW
1191 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1192 info->gd->disk_name, op_name(bret->operation));
907c3eb1 1193 req->errors = -EOPNOTSUPP;
ed30bf31 1194 info->feature_discard = 0;
5ea42986 1195 info->feature_secdiscard = 0;
ed30bf31 1196 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
5ea42986 1197 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
ed30bf31 1198 }
907c3eb1 1199 blk_mq_complete_request(req);
ed30bf31 1200 break;
edf6ef59 1201 case BLKIF_OP_FLUSH_DISKCACHE:
9f27ee59
JF
1202 case BLKIF_OP_WRITE_BARRIER:
1203 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
6878c32e
KRW
1204 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1205 info->gd->disk_name, op_name(bret->operation));
907c3eb1 1206 req->errors = -EOPNOTSUPP;
dcb8baec
JF
1207 }
1208 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
97e36834 1209 info->shadow[id].req.u.rw.nr_segments == 0)) {
6878c32e
KRW
1210 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1211 info->gd->disk_name, op_name(bret->operation));
907c3eb1 1212 req->errors = -EOPNOTSUPP;
dcb8baec 1213 }
907c3eb1
BL
1214 if (unlikely(req->errors)) {
1215 if (req->errors == -EOPNOTSUPP)
1216 req->errors = 0;
4913efe4
TH
1217 info->feature_flush = 0;
1218 xlvbd_flush(info);
9f27ee59
JF
1219 }
1220 /* fall through */
1221 case BLKIF_OP_READ:
1222 case BLKIF_OP_WRITE:
1223 if (unlikely(bret->status != BLKIF_RSP_OKAY))
1224 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1225 "request: %x\n", bret->status);
1226
907c3eb1 1227 blk_mq_complete_request(req);
9f27ee59
JF
1228 break;
1229 default:
1230 BUG();
1231 }
1232 }
1233
1234 info->ring.rsp_cons = i;
1235
1236 if (i != info->ring.req_prod_pvt) {
1237 int more_to_do;
1238 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
1239 if (more_to_do)
1240 goto again;
1241 } else
1242 info->ring.sring->rsp_event = i + 1;
1243
1244 kick_pending_request_queues(info);
1245
3467811e 1246 spin_unlock_irqrestore(&info->io_lock, flags);
9f27ee59
JF
1247
1248 return IRQ_HANDLED;
1249}
1250
1251
1252static int setup_blkring(struct xenbus_device *dev,
1253 struct blkfront_info *info)
1254{
1255 struct blkif_sring *sring;
86839c56
BL
1256 int err, i;
1257 unsigned long ring_size = info->nr_ring_pages * PAGE_SIZE;
1258 grant_ref_t gref[XENBUS_MAX_RING_PAGES];
9f27ee59 1259
86839c56
BL
1260 for (i = 0; i < info->nr_ring_pages; i++)
1261 info->ring_ref[i] = GRANT_INVALID_REF;
9f27ee59 1262
86839c56
BL
1263 sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1264 get_order(ring_size));
9f27ee59
JF
1265 if (!sring) {
1266 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1267 return -ENOMEM;
1268 }
1269 SHARED_RING_INIT(sring);
86839c56 1270 FRONT_RING_INIT(&info->ring, sring, ring_size);
9e973e64 1271
86839c56 1272 err = xenbus_grant_ring(dev, info->ring.sring, info->nr_ring_pages, gref);
9f27ee59 1273 if (err < 0) {
86839c56 1274 free_pages((unsigned long)sring, get_order(ring_size));
9f27ee59
JF
1275 info->ring.sring = NULL;
1276 goto fail;
1277 }
86839c56
BL
1278 for (i = 0; i < info->nr_ring_pages; i++)
1279 info->ring_ref[i] = gref[i];
9f27ee59
JF
1280
1281 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1282 if (err)
1283 goto fail;
1284
89c30f16
TT
1285 err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
1286 "blkif", info);
9f27ee59
JF
1287 if (err <= 0) {
1288 xenbus_dev_fatal(dev, err,
1289 "bind_evtchn_to_irqhandler failed");
1290 goto fail;
1291 }
1292 info->irq = err;
1293
1294 return 0;
1295fail:
1296 blkif_free(info, 0);
1297 return err;
1298}
1299
1300
1301/* Common code used when first setting up, and when resuming. */
203fd61f 1302static int talk_to_blkback(struct xenbus_device *dev,
9f27ee59
JF
1303 struct blkfront_info *info)
1304{
1305 const char *message = NULL;
1306 struct xenbus_transaction xbt;
86839c56
BL
1307 int err, i;
1308 unsigned int max_page_order = 0;
1309 unsigned int ring_page_order = 0;
1310
1311 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1312 "max-ring-page-order", "%u", &max_page_order);
1313 if (err != 1)
1314 info->nr_ring_pages = 1;
1315 else {
1316 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1317 info->nr_ring_pages = 1 << ring_page_order;
1318 }
9f27ee59
JF
1319
1320 /* Create shared ring, alloc event channel. */
1321 err = setup_blkring(dev, info);
1322 if (err)
1323 goto out;
1324
1325again:
1326 err = xenbus_transaction_start(&xbt);
1327 if (err) {
1328 xenbus_dev_fatal(dev, err, "starting transaction");
1329 goto destroy_blkring;
1330 }
1331
86839c56
BL
1332 if (info->nr_ring_pages == 1) {
1333 err = xenbus_printf(xbt, dev->nodename,
1334 "ring-ref", "%u", info->ring_ref[0]);
1335 if (err) {
1336 message = "writing ring-ref";
1337 goto abort_transaction;
1338 }
1339 } else {
1340 err = xenbus_printf(xbt, dev->nodename,
1341 "ring-page-order", "%u", ring_page_order);
1342 if (err) {
1343 message = "writing ring-page-order";
1344 goto abort_transaction;
1345 }
1346
1347 for (i = 0; i < info->nr_ring_pages; i++) {
1348 char ring_ref_name[RINGREF_NAME_LEN];
1349
1350 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1351 err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
1352 "%u", info->ring_ref[i]);
1353 if (err) {
1354 message = "writing ring-ref";
1355 goto abort_transaction;
1356 }
1357 }
9f27ee59
JF
1358 }
1359 err = xenbus_printf(xbt, dev->nodename,
1360 "event-channel", "%u", info->evtchn);
1361 if (err) {
1362 message = "writing event-channel";
1363 goto abort_transaction;
1364 }
3e334239
MA
1365 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1366 XEN_IO_PROTO_ABI_NATIVE);
1367 if (err) {
1368 message = "writing protocol";
1369 goto abort_transaction;
1370 }
0a8704a5 1371 err = xenbus_printf(xbt, dev->nodename,
cb5bd4d1 1372 "feature-persistent", "%u", 1);
0a8704a5
RPM
1373 if (err)
1374 dev_warn(&dev->dev,
1375 "writing persistent grants feature to xenbus");
9f27ee59
JF
1376
1377 err = xenbus_transaction_end(xbt, 0);
1378 if (err) {
1379 if (err == -EAGAIN)
1380 goto again;
1381 xenbus_dev_fatal(dev, err, "completing transaction");
1382 goto destroy_blkring;
1383 }
1384
86839c56
BL
1385 for (i = 0; i < BLK_RING_SIZE(info); i++)
1386 info->shadow[i].req.u.rw.id = i+1;
1387 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
9f27ee59
JF
1388 xenbus_switch_state(dev, XenbusStateInitialised);
1389
1390 return 0;
1391
1392 abort_transaction:
1393 xenbus_transaction_end(xbt, 1);
1394 if (message)
1395 xenbus_dev_fatal(dev, err, "%s", message);
1396 destroy_blkring:
1397 blkif_free(info, 0);
1398 out:
1399 return err;
1400}
1401
9f27ee59
JF
1402/**
1403 * Entry point to this code when a new device is created. Allocate the basic
1404 * structures and the ring buffer for communication with the backend, and
1405 * inform the backend of the appropriate details for those. Switch to
1406 * Initialised state.
1407 */
1408static int blkfront_probe(struct xenbus_device *dev,
1409 const struct xenbus_device_id *id)
1410{
86839c56 1411 int err, vdevice;
9f27ee59
JF
1412 struct blkfront_info *info;
1413
1414 /* FIXME: Use dynamic device id if this is not set. */
1415 err = xenbus_scanf(XBT_NIL, dev->nodename,
1416 "virtual-device", "%i", &vdevice);
1417 if (err != 1) {
9246b5f0
CL
1418 /* go looking in the extended area instead */
1419 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1420 "%i", &vdevice);
1421 if (err != 1) {
1422 xenbus_dev_fatal(dev, err, "reading virtual-device");
1423 return err;
1424 }
9f27ee59
JF
1425 }
1426
b98a409b
SS
1427 if (xen_hvm_domain()) {
1428 char *type;
1429 int len;
1430 /* no unplug has been done: do not hook devices != xen vbds */
51c71a3b 1431 if (xen_has_pv_and_legacy_disk_devices()) {
b98a409b
SS
1432 int major;
1433
1434 if (!VDEV_IS_EXTENDED(vdevice))
1435 major = BLKIF_MAJOR(vdevice);
1436 else
1437 major = XENVBD_MAJOR;
1438
1439 if (major != XENVBD_MAJOR) {
1440 printk(KERN_INFO
1441 "%s: HVM does not support vbd %d as xen block device\n",
02f1f217 1442 __func__, vdevice);
b98a409b
SS
1443 return -ENODEV;
1444 }
1445 }
1446 /* do not create a PV cdrom device if we are an HVM guest */
1447 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1448 if (IS_ERR(type))
1449 return -ENODEV;
1450 if (strncmp(type, "cdrom", 5) == 0) {
1451 kfree(type);
c1c5413a
SS
1452 return -ENODEV;
1453 }
b98a409b 1454 kfree(type);
c1c5413a 1455 }
9f27ee59
JF
1456 info = kzalloc(sizeof(*info), GFP_KERNEL);
1457 if (!info) {
1458 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1459 return -ENOMEM;
1460 }
1461
b70f5fa0 1462 mutex_init(&info->mutex);
3467811e 1463 spin_lock_init(&info->io_lock);
9f27ee59
JF
1464 info->xbdev = dev;
1465 info->vdevice = vdevice;
bfe11d6d
RPM
1466 INIT_LIST_HEAD(&info->grants);
1467 INIT_LIST_HEAD(&info->indirect_pages);
0a8704a5 1468 info->persistent_gnts_c = 0;
9f27ee59
JF
1469 info->connected = BLKIF_STATE_DISCONNECTED;
1470 INIT_WORK(&info->work, blkif_restart_queue);
1471
9f27ee59
JF
1472 /* Front end dir is a number, which is used as the id. */
1473 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
a1b4b12b 1474 dev_set_drvdata(&dev->dev, info);
9f27ee59 1475
9f27ee59
JF
1476 return 0;
1477}
1478
402b27f9
RPM
1479static void split_bio_end(struct bio *bio, int error)
1480{
1481 struct split_bio *split_bio = bio->bi_private;
1482
1483 if (error)
1484 split_bio->err = error;
1485
1486 if (atomic_dec_and_test(&split_bio->pending)) {
1487 split_bio->bio->bi_phys_segments = 0;
1488 bio_endio(split_bio->bio, split_bio->err);
1489 kfree(split_bio);
1490 }
1491 bio_put(bio);
1492}
9f27ee59
JF
1493
1494static int blkif_recover(struct blkfront_info *info)
1495{
1496 int i;
402b27f9 1497 struct request *req, *n;
9f27ee59 1498 struct blk_shadow *copy;
402b27f9
RPM
1499 int rc;
1500 struct bio *bio, *cloned_bio;
1501 struct bio_list bio_list, merge_bio;
1502 unsigned int segs, offset;
1503 int pending, size;
1504 struct split_bio *split_bio;
1505 struct list_head requests;
9f27ee59
JF
1506
1507 /* Stage 1: Make a safe copy of the shadow state. */
29d0b218 1508 copy = kmemdup(info->shadow, sizeof(info->shadow),
a144ff09 1509 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
9f27ee59
JF
1510 if (!copy)
1511 return -ENOMEM;
9f27ee59
JF
1512
1513 /* Stage 2: Set up free list. */
1514 memset(&info->shadow, 0, sizeof(info->shadow));
86839c56 1515 for (i = 0; i < BLK_RING_SIZE(info); i++)
97e36834 1516 info->shadow[i].req.u.rw.id = i+1;
9f27ee59 1517 info->shadow_free = info->ring.req_prod_pvt;
86839c56 1518 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
9f27ee59 1519
d50babbe 1520 rc = blkfront_gather_backend_features(info);
402b27f9
RPM
1521 if (rc) {
1522 kfree(copy);
1523 return rc;
1524 }
1525
1526 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1527 blk_queue_max_segments(info->rq, segs);
1528 bio_list_init(&bio_list);
1529 INIT_LIST_HEAD(&requests);
86839c56 1530 for (i = 0; i < BLK_RING_SIZE(info); i++) {
9f27ee59 1531 /* Not in use? */
a945b980 1532 if (!copy[i].request)
9f27ee59
JF
1533 continue;
1534
402b27f9
RPM
1535 /*
1536 * Get the bios in the request so we can re-queue them.
1537 */
1538 if (copy[i].request->cmd_flags &
1539 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1540 /*
1541 * Flush operations don't contain bios, so
1542 * we need to requeue the whole request
1543 */
1544 list_add(&copy[i].request->queuelist, &requests);
1545 continue;
5ea42986 1546 }
402b27f9
RPM
1547 merge_bio.head = copy[i].request->bio;
1548 merge_bio.tail = copy[i].request->biotail;
1549 bio_list_merge(&bio_list, &merge_bio);
1550 copy[i].request->bio = NULL;
3bb8c98e 1551 blk_end_request_all(copy[i].request, 0);
9f27ee59
JF
1552 }
1553
1554 kfree(copy);
1555
1556 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1557
3467811e 1558 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1559
1560 /* Now safe for us to use the shared ring */
1561 info->connected = BLKIF_STATE_CONNECTED;
1562
9f27ee59
JF
1563 /* Kick any other new requests queued since we resumed */
1564 kick_pending_request_queues(info);
1565
402b27f9
RPM
1566 list_for_each_entry_safe(req, n, &requests, queuelist) {
1567 /* Requeue pending requests (flush or discard) */
1568 list_del_init(&req->queuelist);
1569 BUG_ON(req->nr_phys_segments > segs);
907c3eb1 1570 blk_mq_requeue_request(req);
402b27f9 1571 }
3467811e 1572 spin_unlock_irq(&info->io_lock);
907c3eb1 1573 blk_mq_kick_requeue_list(info->rq);
9f27ee59 1574
402b27f9
RPM
1575 while ((bio = bio_list_pop(&bio_list)) != NULL) {
1576 /* Traverse the list of pending bios and re-queue them */
1577 if (bio_segments(bio) > segs) {
1578 /*
1579 * This bio has more segments than what we can
1580 * handle, we have to split it.
1581 */
1582 pending = (bio_segments(bio) + segs - 1) / segs;
1583 split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
1584 BUG_ON(split_bio == NULL);
1585 atomic_set(&split_bio->pending, pending);
1586 split_bio->bio = bio;
1587 for (i = 0; i < pending; i++) {
1588 offset = (i * segs * PAGE_SIZE) >> 9;
1589 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
4f024f37 1590 (unsigned int)bio_sectors(bio) - offset);
402b27f9
RPM
1591 cloned_bio = bio_clone(bio, GFP_NOIO);
1592 BUG_ON(cloned_bio == NULL);
6678d83f 1593 bio_trim(cloned_bio, offset, size);
402b27f9
RPM
1594 cloned_bio->bi_private = split_bio;
1595 cloned_bio->bi_end_io = split_bio_end;
1596 submit_bio(cloned_bio->bi_rw, cloned_bio);
1597 }
1598 /*
1599 * Now we have to wait for all those smaller bios to
1600 * end, so we can also end the "parent" bio.
1601 */
1602 continue;
1603 }
1604 /* We don't need to split this bio */
1605 submit_bio(bio->bi_rw, bio);
1606 }
1607
9f27ee59
JF
1608 return 0;
1609}
1610
1611/**
1612 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1613 * driver restart. We tear down our blkif structure and recreate it, but
1614 * leave the device-layer structures intact so that this is transparent to the
1615 * rest of the kernel.
1616 */
1617static int blkfront_resume(struct xenbus_device *dev)
1618{
a1b4b12b 1619 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
9f27ee59
JF
1620 int err;
1621
1622 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
1623
1624 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
1625
203fd61f 1626 err = talk_to_blkback(dev, info);
402b27f9
RPM
1627
1628 /*
1629 * We have to wait for the backend to switch to
1630 * connected state, since we want to read which
1631 * features it supports.
1632 */
9f27ee59
JF
1633
1634 return err;
1635}
1636
b70f5fa0
DS
1637static void
1638blkfront_closing(struct blkfront_info *info)
1639{
1640 struct xenbus_device *xbdev = info->xbdev;
1641 struct block_device *bdev = NULL;
1642
1643 mutex_lock(&info->mutex);
1644
1645 if (xbdev->state == XenbusStateClosing) {
1646 mutex_unlock(&info->mutex);
1647 return;
1648 }
1649
1650 if (info->gd)
1651 bdev = bdget_disk(info->gd, 0);
1652
1653 mutex_unlock(&info->mutex);
1654
1655 if (!bdev) {
1656 xenbus_frontend_closed(xbdev);
1657 return;
1658 }
1659
1660 mutex_lock(&bdev->bd_mutex);
1661
7b32d104 1662 if (bdev->bd_openers) {
b70f5fa0
DS
1663 xenbus_dev_error(xbdev, -EBUSY,
1664 "Device in use; refusing to close");
1665 xenbus_switch_state(xbdev, XenbusStateClosing);
1666 } else {
1667 xlvbd_release_gendisk(info);
1668 xenbus_frontend_closed(xbdev);
1669 }
1670
1671 mutex_unlock(&bdev->bd_mutex);
1672 bdput(bdev);
1673}
9f27ee59 1674
ed30bf31
LD
1675static void blkfront_setup_discard(struct blkfront_info *info)
1676{
1677 int err;
ed30bf31
LD
1678 unsigned int discard_granularity;
1679 unsigned int discard_alignment;
5ea42986 1680 unsigned int discard_secure;
ed30bf31 1681
1c8cad6c
OH
1682 info->feature_discard = 1;
1683 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1684 "discard-granularity", "%u", &discard_granularity,
1685 "discard-alignment", "%u", &discard_alignment,
1686 NULL);
1687 if (!err) {
1688 info->discard_granularity = discard_granularity;
1689 info->discard_alignment = discard_alignment;
1690 }
1691 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1692 "discard-secure", "%d", &discard_secure,
1693 NULL);
1694 if (!err)
1695 info->feature_secdiscard = !!discard_secure;
ed30bf31
LD
1696}
1697
402b27f9
RPM
1698static int blkfront_setup_indirect(struct blkfront_info *info)
1699{
d50babbe 1700 unsigned int segs;
402b27f9
RPM
1701 int err, i;
1702
d50babbe 1703 if (info->max_indirect_segments == 0)
402b27f9 1704 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
d50babbe 1705 else
402b27f9 1706 segs = info->max_indirect_segments;
402b27f9 1707
86839c56 1708 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
402b27f9
RPM
1709 if (err)
1710 goto out_of_memory;
1711
bfe11d6d
RPM
1712 if (!info->feature_persistent && info->max_indirect_segments) {
1713 /*
1714 * We are using indirect descriptors but not persistent
1715 * grants, we need to allocate a set of pages that can be
1716 * used for mapping indirect grefs
1717 */
86839c56 1718 int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE(info);
bfe11d6d
RPM
1719
1720 BUG_ON(!list_empty(&info->indirect_pages));
1721 for (i = 0; i < num; i++) {
1722 struct page *indirect_page = alloc_page(GFP_NOIO);
1723 if (!indirect_page)
1724 goto out_of_memory;
1725 list_add(&indirect_page->lru, &info->indirect_pages);
1726 }
1727 }
1728
86839c56 1729 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
1730 info->shadow[i].grants_used = kzalloc(
1731 sizeof(info->shadow[i].grants_used[0]) * segs,
1732 GFP_NOIO);
b7649158 1733 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
402b27f9
RPM
1734 if (info->max_indirect_segments)
1735 info->shadow[i].indirect_grants = kzalloc(
1736 sizeof(info->shadow[i].indirect_grants[0]) *
1737 INDIRECT_GREFS(segs),
1738 GFP_NOIO);
1739 if ((info->shadow[i].grants_used == NULL) ||
b7649158 1740 (info->shadow[i].sg == NULL) ||
402b27f9
RPM
1741 (info->max_indirect_segments &&
1742 (info->shadow[i].indirect_grants == NULL)))
1743 goto out_of_memory;
b7649158 1744 sg_init_table(info->shadow[i].sg, segs);
402b27f9
RPM
1745 }
1746
1747
1748 return 0;
1749
1750out_of_memory:
86839c56 1751 for (i = 0; i < BLK_RING_SIZE(info); i++) {
402b27f9
RPM
1752 kfree(info->shadow[i].grants_used);
1753 info->shadow[i].grants_used = NULL;
b7649158
RPM
1754 kfree(info->shadow[i].sg);
1755 info->shadow[i].sg = NULL;
402b27f9
RPM
1756 kfree(info->shadow[i].indirect_grants);
1757 info->shadow[i].indirect_grants = NULL;
1758 }
bfe11d6d
RPM
1759 if (!list_empty(&info->indirect_pages)) {
1760 struct page *indirect_page, *n;
1761 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
1762 list_del(&indirect_page->lru);
1763 __free_page(indirect_page);
1764 }
1765 }
402b27f9
RPM
1766 return -ENOMEM;
1767}
1768
d50babbe
BL
1769/*
1770 * Gather all backend feature-*
1771 */
1772static int blkfront_gather_backend_features(struct blkfront_info *info)
1773{
1774 int err;
1775 int barrier, flush, discard, persistent;
1776 unsigned int indirect_segments;
1777
1778 info->feature_flush = 0;
1779
1780 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1781 "feature-barrier", "%d", &barrier,
1782 NULL);
1783
1784 /*
1785 * If there's no "feature-barrier" defined, then it means
1786 * we're dealing with a very old backend which writes
1787 * synchronously; nothing to do.
1788 *
1789 * If there are barriers, then we use flush.
1790 */
1791 if (!err && barrier)
1792 info->feature_flush = REQ_FLUSH | REQ_FUA;
1793 /*
1794 * And if there is "feature-flush-cache" use that above
1795 * barriers.
1796 */
1797 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1798 "feature-flush-cache", "%d", &flush,
1799 NULL);
1800
1801 if (!err && flush)
1802 info->feature_flush = REQ_FLUSH;
1803
1804 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1805 "feature-discard", "%d", &discard,
1806 NULL);
1807
1808 if (!err && discard)
1809 blkfront_setup_discard(info);
1810
1811 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1812 "feature-persistent", "%u", &persistent,
1813 NULL);
1814 if (err)
1815 info->feature_persistent = 0;
1816 else
1817 info->feature_persistent = persistent;
1818
1819 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1820 "feature-max-indirect-segments", "%u", &indirect_segments,
1821 NULL);
1822 if (err)
1823 info->max_indirect_segments = 0;
1824 else
1825 info->max_indirect_segments = min(indirect_segments,
1826 xen_blkif_max_segments);
1827
1828 return blkfront_setup_indirect(info);
1829}
1830
9f27ee59
JF
1831/*
1832 * Invoked when the backend is finally 'ready' (and has told produced
1833 * the details about the physical device - #sectors, size, etc).
1834 */
1835static void blkfront_connect(struct blkfront_info *info)
1836{
1837 unsigned long long sectors;
1838 unsigned long sector_size;
7c4d7d71 1839 unsigned int physical_sector_size;
9f27ee59
JF
1840 unsigned int binfo;
1841 int err;
1842
1fa73be6
S
1843 switch (info->connected) {
1844 case BLKIF_STATE_CONNECTED:
1845 /*
1846 * Potentially, the back-end may be signalling
1847 * a capacity change; update the capacity.
1848 */
1849 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1850 "sectors", "%Lu", &sectors);
1851 if (XENBUS_EXIST_ERR(err))
1852 return;
1853 printk(KERN_INFO "Setting capacity to %Lu\n",
1854 sectors);
1855 set_capacity(info->gd, sectors);
2def141e 1856 revalidate_disk(info->gd);
1fa73be6 1857
402b27f9 1858 return;
1fa73be6 1859 case BLKIF_STATE_SUSPENDED:
402b27f9
RPM
1860 /*
1861 * If we are recovering from suspension, we need to wait
1862 * for the backend to announce it's features before
1863 * reconnecting, at least we need to know if the backend
1864 * supports indirect descriptors, and how many.
1865 */
1866 blkif_recover(info);
9f27ee59
JF
1867 return;
1868
b4dddb49
JF
1869 default:
1870 break;
1fa73be6 1871 }
9f27ee59
JF
1872
1873 dev_dbg(&info->xbdev->dev, "%s:%s.\n",
1874 __func__, info->xbdev->otherend);
1875
1876 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1877 "sectors", "%llu", &sectors,
1878 "info", "%u", &binfo,
1879 "sector-size", "%lu", &sector_size,
1880 NULL);
1881 if (err) {
1882 xenbus_dev_fatal(info->xbdev, err,
1883 "reading backend fields at %s",
1884 info->xbdev->otherend);
1885 return;
1886 }
1887
7c4d7d71
SB
1888 /*
1889 * physcial-sector-size is a newer field, so old backends may not
1890 * provide this. Assume physical sector size to be the same as
1891 * sector_size in that case.
1892 */
1893 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1894 "physical-sector-size", "%u", &physical_sector_size);
1895 if (err != 1)
1896 physical_sector_size = sector_size;
1897
d50babbe 1898 err = blkfront_gather_backend_features(info);
402b27f9
RPM
1899 if (err) {
1900 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1901 info->xbdev->otherend);
1902 return;
1903 }
1904
7c4d7d71
SB
1905 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
1906 physical_sector_size);
9f27ee59
JF
1907 if (err) {
1908 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
1909 info->xbdev->otherend);
1910 return;
1911 }
1912
1913 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1914
1915 /* Kick pending requests. */
3467811e 1916 spin_lock_irq(&info->io_lock);
9f27ee59
JF
1917 info->connected = BLKIF_STATE_CONNECTED;
1918 kick_pending_request_queues(info);
3467811e 1919 spin_unlock_irq(&info->io_lock);
9f27ee59
JF
1920
1921 add_disk(info->gd);
1d78d705
CL
1922
1923 info->is_ready = 1;
9f27ee59
JF
1924}
1925
9f27ee59
JF
1926/**
1927 * Callback received when the backend's state changes.
1928 */
203fd61f 1929static void blkback_changed(struct xenbus_device *dev,
9f27ee59
JF
1930 enum xenbus_state backend_state)
1931{
a1b4b12b 1932 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
9f27ee59 1933
203fd61f 1934 dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
9f27ee59
JF
1935
1936 switch (backend_state) {
9f27ee59 1937 case XenbusStateInitWait:
a9b54bb9
BL
1938 if (dev->state != XenbusStateInitialising)
1939 break;
8ab0144a
BL
1940 if (talk_to_blkback(dev, info)) {
1941 kfree(info);
1942 dev_set_drvdata(&dev->dev, NULL);
1943 break;
1944 }
1945 case XenbusStateInitialising:
9f27ee59 1946 case XenbusStateInitialised:
b78c9512
NI
1947 case XenbusStateReconfiguring:
1948 case XenbusStateReconfigured:
9f27ee59 1949 case XenbusStateUnknown:
9f27ee59
JF
1950 break;
1951
1952 case XenbusStateConnected:
1953 blkfront_connect(info);
1954 break;
1955
36613717
DV
1956 case XenbusStateClosed:
1957 if (dev->state == XenbusStateClosed)
1958 break;
1959 /* Missed the backend's Closing state -- fallthrough */
9f27ee59 1960 case XenbusStateClosing:
b70f5fa0 1961 blkfront_closing(info);
9f27ee59
JF
1962 break;
1963 }
1964}
1965
fa1bd359 1966static int blkfront_remove(struct xenbus_device *xbdev)
9f27ee59 1967{
fa1bd359
DS
1968 struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
1969 struct block_device *bdev = NULL;
1970 struct gendisk *disk;
9f27ee59 1971
fa1bd359 1972 dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
9f27ee59
JF
1973
1974 blkif_free(info, 0);
1975
fa1bd359
DS
1976 mutex_lock(&info->mutex);
1977
1978 disk = info->gd;
1979 if (disk)
1980 bdev = bdget_disk(disk, 0);
1981
1982 info->xbdev = NULL;
1983 mutex_unlock(&info->mutex);
1984
1985 if (!bdev) {
1986 kfree(info);
1987 return 0;
1988 }
1989
1990 /*
1991 * The xbdev was removed before we reached the Closed
1992 * state. See if it's safe to remove the disk. If the bdev
1993 * isn't closed yet, we let release take care of it.
1994 */
1995
1996 mutex_lock(&bdev->bd_mutex);
1997 info = disk->private_data;
1998
d54142c7
DS
1999 dev_warn(disk_to_dev(disk),
2000 "%s was hot-unplugged, %d stale handles\n",
2001 xbdev->nodename, bdev->bd_openers);
2002
7b32d104 2003 if (info && !bdev->bd_openers) {
fa1bd359
DS
2004 xlvbd_release_gendisk(info);
2005 disk->private_data = NULL;
0e345826 2006 kfree(info);
fa1bd359
DS
2007 }
2008
2009 mutex_unlock(&bdev->bd_mutex);
2010 bdput(bdev);
9f27ee59
JF
2011
2012 return 0;
2013}
2014
1d78d705
CL
2015static int blkfront_is_ready(struct xenbus_device *dev)
2016{
a1b4b12b 2017 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1d78d705 2018
5d7ed20e 2019 return info->is_ready && info->xbdev;
1d78d705
CL
2020}
2021
a63c848b 2022static int blkif_open(struct block_device *bdev, fmode_t mode)
9f27ee59 2023{
13961743
DS
2024 struct gendisk *disk = bdev->bd_disk;
2025 struct blkfront_info *info;
2026 int err = 0;
6e9624b8 2027
2a48fc0a 2028 mutex_lock(&blkfront_mutex);
6e9624b8 2029
13961743
DS
2030 info = disk->private_data;
2031 if (!info) {
2032 /* xbdev gone */
2033 err = -ERESTARTSYS;
2034 goto out;
2035 }
2036
2037 mutex_lock(&info->mutex);
2038
2039 if (!info->gd)
2040 /* xbdev is closed */
2041 err = -ERESTARTSYS;
2042
2043 mutex_unlock(&info->mutex);
2044
13961743 2045out:
2a48fc0a 2046 mutex_unlock(&blkfront_mutex);
13961743 2047 return err;
9f27ee59
JF
2048}
2049
db2a144b 2050static void blkif_release(struct gendisk *disk, fmode_t mode)
9f27ee59 2051{
a63c848b 2052 struct blkfront_info *info = disk->private_data;
7fd152f4
DS
2053 struct block_device *bdev;
2054 struct xenbus_device *xbdev;
2055
2a48fc0a 2056 mutex_lock(&blkfront_mutex);
7fd152f4
DS
2057
2058 bdev = bdget_disk(disk, 0);
7fd152f4 2059
2f089cb8
FP
2060 if (!bdev) {
2061 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2062 goto out_mutex;
2063 }
acfca3c6
DS
2064 if (bdev->bd_openers)
2065 goto out;
2066
7fd152f4
DS
2067 /*
2068 * Check if we have been instructed to close. We will have
2069 * deferred this request, because the bdev was still open.
2070 */
2071
2072 mutex_lock(&info->mutex);
2073 xbdev = info->xbdev;
2074
2075 if (xbdev && xbdev->state == XenbusStateClosing) {
2076 /* pending switch to state closed */
d54142c7 2077 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
7fd152f4
DS
2078 xlvbd_release_gendisk(info);
2079 xenbus_frontend_closed(info->xbdev);
2080 }
2081
2082 mutex_unlock(&info->mutex);
2083
2084 if (!xbdev) {
2085 /* sudden device removal */
d54142c7 2086 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
7fd152f4
DS
2087 xlvbd_release_gendisk(info);
2088 disk->private_data = NULL;
2089 kfree(info);
9f27ee59 2090 }
7fd152f4 2091
a4cc14ec 2092out:
dad5cf65 2093 bdput(bdev);
2f089cb8 2094out_mutex:
2a48fc0a 2095 mutex_unlock(&blkfront_mutex);
9f27ee59
JF
2096}
2097
83d5cde4 2098static const struct block_device_operations xlvbd_block_fops =
9f27ee59
JF
2099{
2100 .owner = THIS_MODULE,
a63c848b
AV
2101 .open = blkif_open,
2102 .release = blkif_release,
597592d9 2103 .getgeo = blkif_getgeo,
8a6cfeb6 2104 .ioctl = blkif_ioctl,
9f27ee59
JF
2105};
2106
2107
ec9c42ec 2108static const struct xenbus_device_id blkfront_ids[] = {
9f27ee59
JF
2109 { "vbd" },
2110 { "" }
2111};
2112
95afae48
DV
2113static struct xenbus_driver blkfront_driver = {
2114 .ids = blkfront_ids,
9f27ee59
JF
2115 .probe = blkfront_probe,
2116 .remove = blkfront_remove,
2117 .resume = blkfront_resume,
203fd61f 2118 .otherend_changed = blkback_changed,
1d78d705 2119 .is_ready = blkfront_is_ready,
95afae48 2120};
9f27ee59
JF
2121
2122static int __init xlblk_init(void)
2123{
469738e6
LE
2124 int ret;
2125
6e833587 2126 if (!xen_domain())
9f27ee59
JF
2127 return -ENODEV;
2128
86839c56
BL
2129 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
2130 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2131 xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
2132 xen_blkif_max_ring_order = 0;
2133 }
2134
51c71a3b 2135 if (!xen_has_pv_disk_devices())
b9136d20
IM
2136 return -ENODEV;
2137
9f27ee59
JF
2138 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2139 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2140 XENVBD_MAJOR, DEV_NAME);
2141 return -ENODEV;
2142 }
2143
73db144b 2144 ret = xenbus_register_frontend(&blkfront_driver);
469738e6
LE
2145 if (ret) {
2146 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2147 return ret;
2148 }
2149
2150 return 0;
9f27ee59
JF
2151}
2152module_init(xlblk_init);
2153
2154
5a60d0cd 2155static void __exit xlblk_exit(void)
9f27ee59 2156{
8605067f
JB
2157 xenbus_unregister_driver(&blkfront_driver);
2158 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2159 kfree(minors);
9f27ee59
JF
2160}
2161module_exit(xlblk_exit);
2162
2163MODULE_DESCRIPTION("Xen virtual block device frontend");
2164MODULE_LICENSE("GPL");
2165MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
d2f0c52b 2166MODULE_ALIAS("xen:vbd");
4f93f09b 2167MODULE_ALIAS("xenblk");