]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/block/xen-blkback/xenbus.c
treewide: kzalloc() -> kcalloc()
[mirror_ubuntu-hirsute-kernel.git] / drivers / block / xen-blkback / xenbus.c
CommitLineData
4d05a28d
KRW
1/* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
4d05a28d
KRW
15*/
16
77387b82
TC
17#define pr_fmt(fmt) "xen-blkback: " fmt
18
4d05a28d
KRW
19#include <stdarg.h>
20#include <linux/module.h>
21#include <linux/kthread.h>
ee9ff853
KRW
22#include <xen/events.h>
23#include <xen/grant_table.h>
4d05a28d
KRW
24#include "common.h"
25
fa3184b8 26/* On the XenBus the max length of 'ring-ref%u'. */
86839c56 27#define RINGREF_NAME_LEN (20)
1375590d 28
d6091b21 29struct backend_info {
01f37f2d 30 struct xenbus_device *dev;
51854322 31 struct xen_blkif *blkif;
01f37f2d
KRW
32 struct xenbus_watch backend_watch;
33 unsigned major;
34 unsigned minor;
35 char *mode;
4d05a28d
KRW
36};
37
8b6bf747 38static struct kmem_cache *xen_blkif_cachep;
4d05a28d
KRW
39static void connect(struct backend_info *);
40static int connect_ring(struct backend_info *);
5584ea25
JG
41static void backend_changed(struct xenbus_watch *, const char *,
42 const char *);
814d04e7
VP
43static void xen_blkif_free(struct xen_blkif *blkif);
44static void xen_vbd_free(struct xen_vbd *vbd);
4d05a28d 45
8b6bf747 46struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
98e036a3
JF
47{
48 return be->dev;
49}
50
814d04e7
VP
51/*
52 * The last request could free the device from softirq context and
53 * xen_blkif_free() can sleep.
54 */
55static void xen_blkif_deferred_free(struct work_struct *work)
56{
57 struct xen_blkif *blkif;
58
59 blkif = container_of(work, struct xen_blkif, free_work);
60 xen_blkif_free(blkif);
61}
62
30fd1502 63static int blkback_name(struct xen_blkif *blkif, char *buf)
4d05a28d
KRW
64{
65 char *devpath, *devname;
66 struct xenbus_device *dev = blkif->be->dev;
67
68 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
69 if (IS_ERR(devpath))
70 return PTR_ERR(devpath);
71
d6091b21
KRW
72 devname = strstr(devpath, "/dev/");
73 if (devname != NULL)
4d05a28d
KRW
74 devname += strlen("/dev/");
75 else
76 devname = devpath;
77
fa3184b8 78 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
4d05a28d
KRW
79 kfree(devpath);
80
81 return 0;
82}
83
30fd1502 84static void xen_update_blkif_status(struct xen_blkif *blkif)
4d05a28d
KRW
85{
86 int err;
fa3184b8 87 char name[TASK_COMM_LEN];
2fb1ef4f
KRW
88 struct xen_blkif_ring *ring;
89 int i;
4d05a28d
KRW
90
91 /* Not ready to connect? */
2fb1ef4f 92 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
4d05a28d
KRW
93 return;
94
95 /* Already connected? */
96 if (blkif->be->dev->state == XenbusStateConnected)
97 return;
98
99 /* Attempt to connect: exit if we fail to. */
100 connect(blkif->be);
101 if (blkif->be->dev->state != XenbusStateConnected)
102 return;
103
104 err = blkback_name(blkif, name);
105 if (err) {
106 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
107 return;
108 }
109
cbf46290
CL
110 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
111 if (err) {
112 xenbus_dev_error(blkif->be->dev, err, "block flush");
113 return;
114 }
115 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
116
2fb1ef4f
KRW
117 for (i = 0; i < blkif->nr_rings; i++) {
118 ring = &blkif->rings[i];
119 ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
120 if (IS_ERR(ring->xenblkd)) {
121 err = PTR_ERR(ring->xenblkd);
122 ring->xenblkd = NULL;
123 xenbus_dev_fatal(blkif->be->dev, err,
124 "start %s-%d xenblkd", name, i);
125 goto out;
126 }
127 }
128 return;
129
130out:
131 while (--i >= 0) {
132 ring = &blkif->rings[i];
133 kthread_stop(ring->xenblkd);
4d05a28d 134 }
2fb1ef4f
KRW
135 return;
136}
137
138static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
139{
140 unsigned int r;
141
6396bb22
KC
142 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
143 GFP_KERNEL);
2fb1ef4f
KRW
144 if (!blkif->rings)
145 return -ENOMEM;
146
147 for (r = 0; r < blkif->nr_rings; r++) {
148 struct xen_blkif_ring *ring = &blkif->rings[r];
149
150 spin_lock_init(&ring->blk_ring_lock);
151 init_waitqueue_head(&ring->wq);
152 INIT_LIST_HEAD(&ring->pending_free);
d4bf0065
BL
153 INIT_LIST_HEAD(&ring->persistent_purge_list);
154 INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
155 spin_lock_init(&ring->free_pages_lock);
156 INIT_LIST_HEAD(&ring->free_pages);
2fb1ef4f
KRW
157
158 spin_lock_init(&ring->pending_free_lock);
159 init_waitqueue_head(&ring->pending_free_wq);
160 init_waitqueue_head(&ring->shutdown_wq);
161 ring->blkif = blkif;
db6fbc10 162 ring->st_print = jiffies;
46464411 163 ring->active = true;
2fb1ef4f
KRW
164 }
165
166 return 0;
4d05a28d
KRW
167}
168
30fd1502 169static struct xen_blkif *xen_blkif_alloc(domid_t domid)
ee9ff853 170{
30fd1502 171 struct xen_blkif *blkif;
ee9ff853 172
402b27f9 173 BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
ee9ff853 174
654dbef2 175 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
ee9ff853
KRW
176 if (!blkif)
177 return ERR_PTR(-ENOMEM);
178
ee9ff853 179 blkif->domid = domid;
ee9ff853 180 atomic_set(&blkif->refcnt, 1);
29bde093 181 init_completion(&blkif->drain_complete);
59795700 182 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
ee9ff853
KRW
183
184 return blkif;
185}
186
59795700 187static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
86839c56 188 unsigned int nr_grefs, unsigned int evtchn)
ee9ff853
KRW
189{
190 int err;
59795700 191 struct xen_blkif *blkif = ring->blkif;
ee9ff853
KRW
192
193 /* Already connected through? */
59795700 194 if (ring->irq)
ee9ff853
KRW
195 return 0;
196
86839c56 197 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
59795700 198 &ring->blk_ring);
2d073846 199 if (err < 0)
ee9ff853 200 return err;
ee9ff853
KRW
201
202 switch (blkif->blk_protocol) {
203 case BLKIF_PROTOCOL_NATIVE:
204 {
205 struct blkif_sring *sring;
59795700
BL
206 sring = (struct blkif_sring *)ring->blk_ring;
207 BACK_RING_INIT(&ring->blk_rings.native, sring,
67de5dfb 208 XEN_PAGE_SIZE * nr_grefs);
ee9ff853
KRW
209 break;
210 }
211 case BLKIF_PROTOCOL_X86_32:
212 {
213 struct blkif_x86_32_sring *sring_x86_32;
59795700
BL
214 sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
215 BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
67de5dfb 216 XEN_PAGE_SIZE * nr_grefs);
ee9ff853
KRW
217 break;
218 }
219 case BLKIF_PROTOCOL_X86_64:
220 {
221 struct blkif_x86_64_sring *sring_x86_64;
59795700
BL
222 sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
223 BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
67de5dfb 224 XEN_PAGE_SIZE * nr_grefs);
ee9ff853
KRW
225 break;
226 }
227 default:
228 BUG();
229 }
230
8b6bf747
KRW
231 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
232 xen_blkif_be_int, 0,
59795700 233 "blkif-backend", ring);
ee9ff853 234 if (err < 0) {
59795700
BL
235 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
236 ring->blk_rings.common.sring = NULL;
ee9ff853
KRW
237 return err;
238 }
59795700 239 ring->irq = err;
ee9ff853
KRW
240
241 return 0;
242}
243
814d04e7 244static int xen_blkif_disconnect(struct xen_blkif *blkif)
ee9ff853 245{
f929d42c 246 struct pending_req *req, *n;
2fb1ef4f 247 unsigned int j, r;
dc52d783 248 bool busy = false;
f929d42c 249
2fb1ef4f
KRW
250 for (r = 0; r < blkif->nr_rings; r++) {
251 struct xen_blkif_ring *ring = &blkif->rings[r];
252 unsigned int i = 0;
ee9ff853 253
46464411
JG
254 if (!ring->active)
255 continue;
256
2fb1ef4f
KRW
257 if (ring->xenblkd) {
258 kthread_stop(ring->xenblkd);
259 wake_up(&ring->shutdown_wq);
2fb1ef4f 260 }
ee9ff853 261
2fb1ef4f
KRW
262 /* The above kthread_stop() guarantees that at this point we
263 * don't have any discard_io or other_io requests. So, checking
264 * for inflight IO is enough.
265 */
dc52d783
AL
266 if (atomic_read(&ring->inflight) > 0) {
267 busy = true;
268 continue;
269 }
ee9ff853 270
2fb1ef4f
KRW
271 if (ring->irq) {
272 unbind_from_irqhandler(ring->irq, ring);
273 ring->irq = 0;
274 }
814d04e7 275
2fb1ef4f
KRW
276 if (ring->blk_rings.common.sring) {
277 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
278 ring->blk_rings.common.sring = NULL;
279 }
12ea7296 280
2fb1ef4f
KRW
281 /* Remove all persistent grants and the cache of ballooned pages. */
282 xen_blkbk_free_caches(ring);
f929d42c 283
2fb1ef4f
KRW
284 /* Check that there is no request in use */
285 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
286 list_del(&req->free_list);
f929d42c 287
2fb1ef4f
KRW
288 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
289 kfree(req->segments[j]);
f929d42c 290
2fb1ef4f
KRW
291 for (j = 0; j < MAX_INDIRECT_PAGES; j++)
292 kfree(req->indirect_pages[j]);
f929d42c 293
2fb1ef4f
KRW
294 kfree(req);
295 i++;
296 }
297
d4bf0065
BL
298 BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
299 BUG_ON(!list_empty(&ring->persistent_purge_list));
300 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
301 BUG_ON(!list_empty(&ring->free_pages));
302 BUG_ON(ring->free_pages_num != 0);
303 BUG_ON(ring->persistent_gnt_c != 0);
2fb1ef4f 304 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
46464411 305 ring->active = false;
2fb1ef4f 306 }
dc52d783
AL
307 if (busy)
308 return -EBUSY;
309
f929d42c 310 blkif->nr_ring_pages = 0;
93bb277f
BL
311 /*
312 * blkif->rings was allocated in connect_ring, so we should free it in
313 * here.
314 */
315 kfree(blkif->rings);
316 blkif->rings = NULL;
317 blkif->nr_rings = 0;
f929d42c 318
814d04e7 319 return 0;
ee9ff853
KRW
320}
321
2911758f 322static void xen_blkif_free(struct xen_blkif *blkif)
ee9ff853 323{
71df1d7c 324 WARN_ON(xen_blkif_disconnect(blkif));
814d04e7 325 xen_vbd_free(&blkif->vbd);
71df1d7c
JG
326 kfree(blkif->be->mode);
327 kfree(blkif->be);
bf0720c4 328
ef753411 329 /* Make sure everything is drained before shutting down */
8b6bf747 330 kmem_cache_free(xen_blkif_cachep, blkif);
ee9ff853
KRW
331}
332
8b6bf747 333int __init xen_blkif_interface_init(void)
ee9ff853 334{
8b6bf747 335 xen_blkif_cachep = kmem_cache_create("blkif_cache",
30fd1502 336 sizeof(struct xen_blkif),
8b6bf747
KRW
337 0, 0, NULL);
338 if (!xen_blkif_cachep)
ee9ff853
KRW
339 return -ENOMEM;
340
341 return 0;
342}
4d05a28d 343
a1397fa3 344/*
4d05a28d
KRW
345 * sysfs interface for VBD I/O requests
346 */
347
db6fbc10 348#define VBD_SHOW_ALLRING(name, format) \
4d05a28d
KRW
349 static ssize_t show_##name(struct device *_dev, \
350 struct device_attribute *attr, \
351 char *buf) \
352 { \
353 struct xenbus_device *dev = to_xenbus_device(_dev); \
5cf6e4f6 354 struct backend_info *be = dev_get_drvdata(&dev->dev); \
db6fbc10
BL
355 struct xen_blkif *blkif = be->blkif; \
356 unsigned int i; \
357 unsigned long long result = 0; \
4d05a28d 358 \
db6fbc10
BL
359 if (!blkif->rings) \
360 goto out; \
361 \
362 for (i = 0; i < blkif->nr_rings; i++) { \
363 struct xen_blkif_ring *ring = &blkif->rings[i]; \
364 \
365 result += ring->st_##name; \
366 } \
367 \
368out: \
369 return sprintf(buf, format, result); \
4d05a28d 370 } \
5657a819 371 static DEVICE_ATTR(name, 0444, show_##name, NULL)
4d05a28d 372
db6fbc10
BL
373VBD_SHOW_ALLRING(oo_req, "%llu\n");
374VBD_SHOW_ALLRING(rd_req, "%llu\n");
375VBD_SHOW_ALLRING(wr_req, "%llu\n");
376VBD_SHOW_ALLRING(f_req, "%llu\n");
377VBD_SHOW_ALLRING(ds_req, "%llu\n");
378VBD_SHOW_ALLRING(rd_sect, "%llu\n");
379VBD_SHOW_ALLRING(wr_sect, "%llu\n");
4d05a28d 380
3d814731 381static struct attribute *xen_vbdstat_attrs[] = {
4d05a28d
KRW
382 &dev_attr_oo_req.attr,
383 &dev_attr_rd_req.attr,
384 &dev_attr_wr_req.attr,
24f567f9 385 &dev_attr_f_req.attr,
b3cb0d6a 386 &dev_attr_ds_req.attr,
4d05a28d
KRW
387 &dev_attr_rd_sect.attr,
388 &dev_attr_wr_sect.attr,
389 NULL
390};
391
53043948 392static const struct attribute_group xen_vbdstat_group = {
4d05a28d 393 .name = "statistics",
3d814731 394 .attrs = xen_vbdstat_attrs,
4d05a28d
KRW
395};
396
db6fbc10
BL
397#define VBD_SHOW(name, format, args...) \
398 static ssize_t show_##name(struct device *_dev, \
399 struct device_attribute *attr, \
400 char *buf) \
401 { \
402 struct xenbus_device *dev = to_xenbus_device(_dev); \
403 struct backend_info *be = dev_get_drvdata(&dev->dev); \
404 \
405 return sprintf(buf, format, ##args); \
406 } \
5657a819 407 static DEVICE_ATTR(name, 0444, show_##name, NULL)
db6fbc10 408
4d05a28d
KRW
409VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
410VBD_SHOW(mode, "%s\n", be->mode);
411
2911758f 412static int xenvbd_sysfs_addif(struct xenbus_device *dev)
4d05a28d
KRW
413{
414 int error;
415
416 error = device_create_file(&dev->dev, &dev_attr_physical_device);
d6091b21 417 if (error)
4d05a28d
KRW
418 goto fail1;
419
420 error = device_create_file(&dev->dev, &dev_attr_mode);
421 if (error)
422 goto fail2;
423
3d814731 424 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
4d05a28d
KRW
425 if (error)
426 goto fail3;
427
428 return 0;
429
3d814731 430fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
4d05a28d
KRW
431fail2: device_remove_file(&dev->dev, &dev_attr_mode);
432fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
433 return error;
434}
435
2911758f 436static void xenvbd_sysfs_delif(struct xenbus_device *dev)
4d05a28d 437{
3d814731 438 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
4d05a28d
KRW
439 device_remove_file(&dev->dev, &dev_attr_mode);
440 device_remove_file(&dev->dev, &dev_attr_physical_device);
441}
442
42c7841d 443
3d814731 444static void xen_vbd_free(struct xen_vbd *vbd)
42c7841d
KRW
445{
446 if (vbd->bdev)
447 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
448 vbd->bdev = NULL;
449}
450
3d814731
KRW
451static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
452 unsigned major, unsigned minor, int readonly,
453 int cdrom)
42c7841d 454{
3d814731 455 struct xen_vbd *vbd;
42c7841d 456 struct block_device *bdev;
24f567f9 457 struct request_queue *q;
42c7841d
KRW
458
459 vbd = &blkif->vbd;
460 vbd->handle = handle;
461 vbd->readonly = readonly;
462 vbd->type = 0;
463
464 vbd->pdevice = MKDEV(major, minor);
465
466 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
467 FMODE_READ : FMODE_WRITE, NULL);
468
469 if (IS_ERR(bdev)) {
77387b82 470 pr_warn("xen_vbd_create: device %08x could not be opened\n",
42c7841d
KRW
471 vbd->pdevice);
472 return -ENOENT;
473 }
474
475 vbd->bdev = bdev;
42c7841d 476 if (vbd->bdev->bd_disk == NULL) {
77387b82 477 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
42c7841d 478 vbd->pdevice);
3d814731 479 xen_vbd_free(vbd);
42c7841d
KRW
480 return -ENOENT;
481 }
6464920a 482 vbd->size = vbd_sz(vbd);
42c7841d
KRW
483
484 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
485 vbd->type |= VDISK_CDROM;
486 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
487 vbd->type |= VDISK_REMOVABLE;
488
24f567f9 489 q = bdev_get_queue(bdev);
c888a8f9 490 if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
24f567f9
KRW
491 vbd->flush_support = true;
492
288dab8a 493 if (q && blk_queue_secure_erase(q))
5ea42986
KRW
494 vbd->discard_secure = true;
495
77387b82 496 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
42c7841d
KRW
497 handle, blkif->domid);
498 return 0;
499}
8b6bf747 500static int xen_blkbk_remove(struct xenbus_device *dev)
4d05a28d 501{
5cf6e4f6 502 struct backend_info *be = dev_get_drvdata(&dev->dev);
4d05a28d 503
77387b82 504 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
4d05a28d
KRW
505
506 if (be->major || be->minor)
507 xenvbd_sysfs_delif(dev);
508
509 if (be->backend_watch.node) {
510 unregister_xenbus_watch(&be->backend_watch);
511 kfree(be->backend_watch.node);
512 be->backend_watch.node = NULL;
513 }
514
814d04e7
VP
515 dev_set_drvdata(&dev->dev, NULL);
516
2d4456c7 517 if (be->blkif) {
8b6bf747 518 xen_blkif_disconnect(be->blkif);
4d05a28d 519
2d4456c7
GS
520 /* Put the reference we set in xen_blkif_alloc(). */
521 xen_blkif_put(be->blkif);
522 }
523
4d05a28d
KRW
524 return 0;
525}
526
24f567f9
KRW
527int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
528 struct backend_info *be, int state)
4d05a28d
KRW
529{
530 struct xenbus_device *dev = be->dev;
531 int err;
532
24f567f9 533 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
4d05a28d
KRW
534 "%d", state);
535 if (err)
3389bb8b 536 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
4d05a28d
KRW
537
538 return err;
539}
540
3389bb8b 541static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
b3cb0d6a
LD
542{
543 struct xenbus_device *dev = be->dev;
544 struct xen_blkif *blkif = be->blkif;
b3cb0d6a 545 int err;
8235777b 546 int state = 0;
4dae7670
KRW
547 struct block_device *bdev = be->blkif->vbd.bdev;
548 struct request_queue *q = bdev_get_queue(bdev);
549
8235777b 550 if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
c926b701
OH
551 return;
552
4dae7670
KRW
553 if (blk_queue_discard(q)) {
554 err = xenbus_printf(xbt, dev->nodename,
555 "discard-granularity", "%u",
556 q->limits.discard_granularity);
557 if (err) {
3389bb8b
KRW
558 dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
559 return;
4dae7670
KRW
560 }
561 err = xenbus_printf(xbt, dev->nodename,
562 "discard-alignment", "%u",
563 q->limits.discard_alignment);
564 if (err) {
3389bb8b
KRW
565 dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
566 return;
b3cb0d6a 567 }
4dae7670
KRW
568 state = 1;
569 /* Optional. */
570 err = xenbus_printf(xbt, dev->nodename,
571 "discard-secure", "%d",
572 blkif->vbd.discard_secure);
573 if (err) {
a71e23d9 574 dev_warn(&dev->dev, "writing discard-secure (%d)", err);
3389bb8b 575 return;
b3cb0d6a 576 }
b3cb0d6a 577 }
b3cb0d6a
LD
578 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
579 "%d", state);
580 if (err)
3389bb8b 581 dev_warn(&dev->dev, "writing feature-discard (%d)", err);
b3cb0d6a 582}
29bde093
KRW
583int xen_blkbk_barrier(struct xenbus_transaction xbt,
584 struct backend_info *be, int state)
585{
586 struct xenbus_device *dev = be->dev;
587 int err;
588
589 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
590 "%d", state);
591 if (err)
3389bb8b 592 dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
29bde093
KRW
593
594 return err;
595}
b3cb0d6a 596
01f37f2d 597/*
4d05a28d
KRW
598 * Entry point to this code when a new device is created. Allocate the basic
599 * structures, and watch the store waiting for the hotplug scripts to tell us
600 * the device's physical major and minor numbers. Switch to InitWait.
601 */
8b6bf747
KRW
602static int xen_blkbk_probe(struct xenbus_device *dev,
603 const struct xenbus_device_id *id)
4d05a28d
KRW
604{
605 int err;
606 struct backend_info *be = kzalloc(sizeof(struct backend_info),
607 GFP_KERNEL);
77387b82
TC
608
609 /* match the pr_debug in xen_blkbk_remove */
610 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
611
4d05a28d
KRW
612 if (!be) {
613 xenbus_dev_fatal(dev, -ENOMEM,
614 "allocating backend structure");
615 return -ENOMEM;
616 }
617 be->dev = dev;
5cf6e4f6 618 dev_set_drvdata(&dev->dev, be);
4d05a28d 619
8b6bf747 620 be->blkif = xen_blkif_alloc(dev->otherend_id);
4d05a28d
KRW
621 if (IS_ERR(be->blkif)) {
622 err = PTR_ERR(be->blkif);
623 be->blkif = NULL;
624 xenbus_dev_fatal(dev, err, "creating block interface");
625 goto fail;
626 }
627
5a705845
JB
628 err = xenbus_printf(XBT_NIL, dev->nodename,
629 "feature-max-indirect-segments", "%u",
630 MAX_INDIRECT_SEGMENTS);
631 if (err)
632 dev_warn(&dev->dev,
633 "writing %s/feature-max-indirect-segments (%d)",
634 dev->nodename, err);
635
d62d8600
BL
636 /* Multi-queue: advertise how many queues are supported by us.*/
637 err = xenbus_printf(XBT_NIL, dev->nodename,
638 "multi-queue-max-queues", "%u", xenblk_max_queues);
639 if (err)
640 pr_warn("Error writing multi-queue-max-queues\n");
641
4d05a28d
KRW
642 /* setup back pointer */
643 be->blkif->be = be;
644
88122933
JF
645 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
646 "%s/%s", dev->nodename, "physical-device");
4d05a28d
KRW
647 if (err)
648 goto fail;
649
86839c56
BL
650 err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
651 xen_blkif_max_ring_order);
652 if (err)
653 pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
654
4d05a28d
KRW
655 err = xenbus_switch_state(dev, XenbusStateInitWait);
656 if (err)
657 goto fail;
658
659 return 0;
660
661fail:
77387b82 662 pr_warn("%s failed\n", __func__);
8b6bf747 663 xen_blkbk_remove(dev);
4d05a28d
KRW
664 return err;
665}
666
667
01f37f2d 668/*
4d05a28d
KRW
669 * Callback received when the hotplug scripts have placed the physical-device
670 * node. Read it and the mode node, and create a vbd. If the frontend is
671 * ready, connect.
672 */
673static void backend_changed(struct xenbus_watch *watch,
5584ea25 674 const char *path, const char *token)
4d05a28d
KRW
675{
676 int err;
677 unsigned major;
678 unsigned minor;
679 struct backend_info *be
680 = container_of(watch, struct backend_info, backend_watch);
681 struct xenbus_device *dev = be->dev;
682 int cdrom = 0;
9d092603 683 unsigned long handle;
4d05a28d
KRW
684 char *device_type;
685
77387b82 686 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
4d05a28d
KRW
687
688 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
689 &major, &minor);
690 if (XENBUS_EXIST_ERR(err)) {
01f37f2d
KRW
691 /*
692 * Since this watch will fire once immediately after it is
693 * registered, we expect this. Ignore it, and wait for the
694 * hotplug scripts.
695 */
4d05a28d
KRW
696 return;
697 }
698 if (err != 2) {
699 xenbus_dev_fatal(dev, err, "reading physical-device");
700 return;
701 }
702
9d092603
JB
703 if (be->major | be->minor) {
704 if (be->major != major || be->minor != minor)
77387b82 705 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
9d092603 706 be->major, be->minor, major, minor);
4d05a28d
KRW
707 return;
708 }
709
710 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
711 if (IS_ERR(be->mode)) {
712 err = PTR_ERR(be->mode);
713 be->mode = NULL;
714 xenbus_dev_fatal(dev, err, "reading mode");
715 return;
716 }
717
718 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
719 if (!IS_ERR(device_type)) {
720 cdrom = strcmp(device_type, "cdrom") == 0;
721 kfree(device_type);
722 }
723
9d092603 724 /* Front end dir is a number, which is used as the handle. */
bb8e0e84 725 err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
aea305e1
JB
726 if (err) {
727 kfree(be->mode);
728 be->mode = NULL;
9d092603 729 return;
aea305e1 730 }
4d05a28d 731
9d092603
JB
732 be->major = major;
733 be->minor = minor;
4d05a28d 734
9d092603
JB
735 err = xen_vbd_create(be->blkif, handle, major, minor,
736 !strchr(be->mode, 'w'), cdrom);
4d05a28d 737
9d092603
JB
738 if (err)
739 xenbus_dev_fatal(dev, err, "creating vbd structure");
740 else {
4d05a28d
KRW
741 err = xenvbd_sysfs_addif(dev);
742 if (err) {
3d814731 743 xen_vbd_free(&be->blkif->vbd);
4d05a28d 744 xenbus_dev_fatal(dev, err, "creating sysfs entries");
4d05a28d 745 }
9d092603 746 }
4d05a28d 747
9d092603
JB
748 if (err) {
749 kfree(be->mode);
750 be->mode = NULL;
751 be->major = 0;
752 be->minor = 0;
753 } else {
4d05a28d 754 /* We're potentially connected now */
8b6bf747 755 xen_update_blkif_status(be->blkif);
4d05a28d
KRW
756 }
757}
758
759
01f37f2d 760/*
4d05a28d
KRW
761 * Callback received when the frontend's state changes.
762 */
763static void frontend_changed(struct xenbus_device *dev,
764 enum xenbus_state frontend_state)
765{
5cf6e4f6 766 struct backend_info *be = dev_get_drvdata(&dev->dev);
4d05a28d
KRW
767 int err;
768
77387b82 769 pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
4d05a28d
KRW
770
771 switch (frontend_state) {
772 case XenbusStateInitialising:
773 if (dev->state == XenbusStateClosed) {
77387b82 774 pr_info("%s: prepare for reconnect\n", dev->nodename);
4d05a28d
KRW
775 xenbus_switch_state(dev, XenbusStateInitWait);
776 }
777 break;
778
779 case XenbusStateInitialised:
780 case XenbusStateConnected:
01f37f2d
KRW
781 /*
782 * Ensure we connect even when two watches fire in
42b2aa86 783 * close succession and we miss the intermediate value
01f37f2d
KRW
784 * of frontend_state.
785 */
4d05a28d
KRW
786 if (dev->state == XenbusStateConnected)
787 break;
788
01f37f2d
KRW
789 /*
790 * Enforce precondition before potential leak point.
1bc05b0a 791 * xen_blkif_disconnect() is idempotent.
313d7b00 792 */
814d04e7
VP
793 err = xen_blkif_disconnect(be->blkif);
794 if (err) {
795 xenbus_dev_fatal(dev, err, "pending I/O");
796 break;
797 }
313d7b00 798
4d05a28d 799 err = connect_ring(be);
2d0382fa
KRW
800 if (err) {
801 /*
802 * Clean up so that memory resources can be used by
803 * other devices. connect_ring reported already error.
804 */
805 xen_blkif_disconnect(be->blkif);
4d05a28d 806 break;
2d0382fa 807 }
8b6bf747 808 xen_update_blkif_status(be->blkif);
4d05a28d
KRW
809 break;
810
811 case XenbusStateClosing:
4d05a28d
KRW
812 xenbus_switch_state(dev, XenbusStateClosing);
813 break;
814
815 case XenbusStateClosed:
6f5986bc 816 xen_blkif_disconnect(be->blkif);
4d05a28d
KRW
817 xenbus_switch_state(dev, XenbusStateClosed);
818 if (xenbus_dev_is_online(dev))
819 break;
3f2c9405
BVA
820 /* fall through */
821 /* if not online */
4d05a28d 822 case XenbusStateUnknown:
1bc05b0a 823 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
4d05a28d
KRW
824 device_unregister(&dev->dev);
825 break;
826
827 default:
828 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
829 frontend_state);
830 break;
831 }
832}
833
834
835/* ** Connection ** */
836
837
01f37f2d 838/*
4d05a28d
KRW
839 * Write the physical details regarding the block device to the store, and
840 * switch to Connected state.
841 */
842static void connect(struct backend_info *be)
843{
844 struct xenbus_transaction xbt;
845 int err;
846 struct xenbus_device *dev = be->dev;
847
77387b82 848 pr_debug("%s %s\n", __func__, dev->otherend);
4d05a28d
KRW
849
850 /* Supply the information about the device the frontend needs */
851again:
852 err = xenbus_transaction_start(&xbt);
853 if (err) {
854 xenbus_dev_fatal(dev, err, "starting transaction");
855 return;
856 }
857
3389bb8b
KRW
858 /* If we can't advertise it is OK. */
859 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
4d05a28d 860
3389bb8b 861 xen_blkbk_discard(xbt, be);
b3cb0d6a 862
3389bb8b 863 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
29bde093 864
0a8704a5
RPM
865 err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
866 if (err) {
867 xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
868 dev->nodename);
869 goto abort;
870 }
871
4d05a28d 872 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
42c7841d 873 (unsigned long long)vbd_sz(&be->blkif->vbd));
4d05a28d
KRW
874 if (err) {
875 xenbus_dev_fatal(dev, err, "writing %s/sectors",
876 dev->nodename);
877 goto abort;
878 }
879
880 /* FIXME: use a typename instead */
881 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
42c7841d
KRW
882 be->blkif->vbd.type |
883 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
4d05a28d
KRW
884 if (err) {
885 xenbus_dev_fatal(dev, err, "writing %s/info",
886 dev->nodename);
887 goto abort;
888 }
889 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
42c7841d
KRW
890 (unsigned long)
891 bdev_logical_block_size(be->blkif->vbd.bdev));
4d05a28d
KRW
892 if (err) {
893 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
894 dev->nodename);
895 goto abort;
896 }
7c4d7d71
SB
897 err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
898 bdev_physical_block_size(be->blkif->vbd.bdev));
899 if (err)
900 xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
901 dev->nodename);
4d05a28d
KRW
902
903 err = xenbus_transaction_end(xbt, 0);
904 if (err == -EAGAIN)
905 goto again;
906 if (err)
907 xenbus_dev_fatal(dev, err, "ending transaction");
908
909 err = xenbus_switch_state(dev, XenbusStateConnected);
910 if (err)
08b8bfc1 911 xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
4d05a28d
KRW
912 dev->nodename);
913
914 return;
915 abort:
916 xenbus_transaction_end(xbt, 1);
917}
918
2fb1ef4f
KRW
919/*
920 * Each ring may have multi pages, depends on "ring-page-order".
921 */
922static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
4d05a28d 923{
9cce2914 924 unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
69b91ede
BL
925 struct pending_req *req, *n;
926 int err, i, j;
2fb1ef4f
KRW
927 struct xen_blkif *blkif = ring->blkif;
928 struct xenbus_device *dev = blkif->be->dev;
929 unsigned int ring_page_order, nr_grefs, evtchn;
4d05a28d 930
2fb1ef4f 931 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
86839c56
BL
932 &evtchn);
933 if (err != 1) {
934 err = -EINVAL;
2fb1ef4f 935 xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
4d05a28d
KRW
936 return err;
937 }
86839c56
BL
938
939 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
940 &ring_page_order);
941 if (err != 1) {
2fb1ef4f 942 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
86839c56
BL
943 if (err != 1) {
944 err = -EINVAL;
2fb1ef4f 945 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
86839c56
BL
946 return err;
947 }
948 nr_grefs = 1;
86839c56
BL
949 } else {
950 unsigned int i;
951
952 if (ring_page_order > xen_blkif_max_ring_order) {
953 err = -EINVAL;
954 xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
2fb1ef4f 955 dir, ring_page_order,
86839c56
BL
956 xen_blkif_max_ring_order);
957 return err;
958 }
959
960 nr_grefs = 1 << ring_page_order;
961 for (i = 0; i < nr_grefs; i++) {
962 char ring_ref_name[RINGREF_NAME_LEN];
963
964 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
2fb1ef4f 965 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
86839c56
BL
966 "%u", &ring_ref[i]);
967 if (err != 1) {
968 err = -EINVAL;
969 xenbus_dev_fatal(dev, err, "reading %s/%s",
2fb1ef4f 970 dir, ring_ref_name);
86839c56
BL
971 return err;
972 }
86839c56
BL
973 }
974 }
2fb1ef4f 975 blkif->nr_ring_pages = nr_grefs;
4d05a28d 976
86839c56 977 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
69b91ede
BL
978 req = kzalloc(sizeof(*req), GFP_KERNEL);
979 if (!req)
980 goto fail;
59795700 981 list_add_tail(&req->free_list, &ring->pending_free);
69b91ede
BL
982 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
983 req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
984 if (!req->segments[j])
985 goto fail;
986 }
987 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
988 req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
989 GFP_KERNEL);
990 if (!req->indirect_pages[j])
991 goto fail;
992 }
993 }
994
4d05a28d 995 /* Map the shared frame, irq etc. */
59795700 996 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
4d05a28d 997 if (err) {
86839c56 998 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
4d05a28d
KRW
999 return err;
1000 }
1001
1002 return 0;
69b91ede
BL
1003
1004fail:
59795700 1005 list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
69b91ede
BL
1006 list_del(&req->free_list);
1007 for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1008 if (!req->segments[j])
1009 break;
1010 kfree(req->segments[j]);
1011 }
1012 for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1013 if (!req->indirect_pages[j])
1014 break;
1015 kfree(req->indirect_pages[j]);
1016 }
1017 kfree(req);
1018 }
1019 return -ENOMEM;
2fb1ef4f
KRW
1020
1021}
1022
1023static int connect_ring(struct backend_info *be)
1024{
1025 struct xenbus_device *dev = be->dev;
1026 unsigned int pers_grants;
1027 char protocol[64] = "";
1028 int err, i;
1029 char *xspath;
1030 size_t xspathsize;
1031 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
d62d8600 1032 unsigned int requested_num_queues = 0;
2fb1ef4f
KRW
1033
1034 pr_debug("%s %s\n", __func__, dev->otherend);
1035
1036 be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
6694389a
JB
1037 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1038 "%63s", protocol);
1039 if (err <= 0)
2fb1ef4f
KRW
1040 strcpy(protocol, "unspecified, assuming default");
1041 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1042 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1043 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1044 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1045 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1046 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1047 else {
1048 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
bde21f73 1049 return -ENOSYS;
2fb1ef4f 1050 }
8235777b
JG
1051 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
1052 0);
2fb1ef4f
KRW
1053 be->blkif->vbd.feature_gnt_persistent = pers_grants;
1054 be->blkif->vbd.overflow_max_grants = 0;
1055
d62d8600
BL
1056 /*
1057 * Read the number of hardware queues from frontend.
1058 */
8235777b
JG
1059 requested_num_queues = xenbus_read_unsigned(dev->otherend,
1060 "multi-queue-num-queues",
1061 1);
1062 if (requested_num_queues > xenblk_max_queues
1063 || requested_num_queues == 0) {
1064 /* Buggy or malicious guest. */
1065 xenbus_dev_fatal(dev, err,
1066 "guest requested %u queues, exceeding the maximum of %u.",
1067 requested_num_queues, xenblk_max_queues);
1068 return -ENOSYS;
d62d8600
BL
1069 }
1070 be->blkif->nr_rings = requested_num_queues;
1071 if (xen_blkif_alloc_rings(be->blkif))
1072 return -ENOMEM;
1073
2fb1ef4f
KRW
1074 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1075 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
1076 pers_grants ? "persistent grants" : "");
1077
1078 if (be->blkif->nr_rings == 1)
1079 return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
1080 else {
1081 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1082 xspath = kmalloc(xspathsize, GFP_KERNEL);
1083 if (!xspath) {
1084 xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1085 return -ENOMEM;
1086 }
1087
1088 for (i = 0; i < be->blkif->nr_rings; i++) {
1089 memset(xspath, 0, xspathsize);
1090 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1091 err = read_per_ring_refs(&be->blkif->rings[i], xspath);
1092 if (err) {
1093 kfree(xspath);
1094 return err;
1095 }
1096 }
1097 kfree(xspath);
1098 }
1099 return 0;
4d05a28d
KRW
1100}
1101
8b6bf747 1102static const struct xenbus_device_id xen_blkbk_ids[] = {
4d05a28d
KRW
1103 { "vbd" },
1104 { "" }
1105};
1106
95afae48
DV
1107static struct xenbus_driver xen_blkbk_driver = {
1108 .ids = xen_blkbk_ids,
8b6bf747
KRW
1109 .probe = xen_blkbk_probe,
1110 .remove = xen_blkbk_remove,
4d05a28d 1111 .otherend_changed = frontend_changed
95afae48 1112};
4d05a28d 1113
8b6bf747 1114int xen_blkif_xenbus_init(void)
4d05a28d 1115{
73db144b 1116 return xenbus_register_backend(&xen_blkbk_driver);
4d05a28d 1117}