]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/xen/xenbus/xenbus_client.c
Merge branch 'modsplit-Oct31_2011' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / xen / xenbus / xenbus_client.c
1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
5 *
6 * Copyright (C) 2005 XenSource Ltd
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/export.h>
37 #include <asm/xen/hypervisor.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/event_channel.h>
40 #include <xen/events.h>
41 #include <xen/grant_table.h>
42 #include <xen/xenbus.h>
43
44 const char *xenbus_strstate(enum xenbus_state state)
45 {
46 static const char *const name[] = {
47 [ XenbusStateUnknown ] = "Unknown",
48 [ XenbusStateInitialising ] = "Initialising",
49 [ XenbusStateInitWait ] = "InitWait",
50 [ XenbusStateInitialised ] = "Initialised",
51 [ XenbusStateConnected ] = "Connected",
52 [ XenbusStateClosing ] = "Closing",
53 [ XenbusStateClosed ] = "Closed",
54 [XenbusStateReconfiguring] = "Reconfiguring",
55 [XenbusStateReconfigured] = "Reconfigured",
56 };
57 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
58 }
59 EXPORT_SYMBOL_GPL(xenbus_strstate);
60
61 /**
62 * xenbus_watch_path - register a watch
63 * @dev: xenbus device
64 * @path: path to watch
65 * @watch: watch to register
66 * @callback: callback to register
67 *
68 * Register a @watch on the given path, using the given xenbus_watch structure
69 * for storage, and the given @callback function as the callback. Return 0 on
70 * success, or -errno on error. On success, the given @path will be saved as
71 * @watch->node, and remains the caller's to free. On error, @watch->node will
72 * be NULL, the device will switch to %XenbusStateClosing, and the error will
73 * be saved in the store.
74 */
75 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
76 struct xenbus_watch *watch,
77 void (*callback)(struct xenbus_watch *,
78 const char **, unsigned int))
79 {
80 int err;
81
82 watch->node = path;
83 watch->callback = callback;
84
85 err = register_xenbus_watch(watch);
86
87 if (err) {
88 watch->node = NULL;
89 watch->callback = NULL;
90 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
91 }
92
93 return err;
94 }
95 EXPORT_SYMBOL_GPL(xenbus_watch_path);
96
97
98 /**
99 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
100 * @dev: xenbus device
101 * @watch: watch to register
102 * @callback: callback to register
103 * @pathfmt: format of path to watch
104 *
105 * Register a watch on the given @path, using the given xenbus_watch
106 * structure for storage, and the given @callback function as the callback.
107 * Return 0 on success, or -errno on error. On success, the watched path
108 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
109 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
110 * free, the device will switch to %XenbusStateClosing, and the error will be
111 * saved in the store.
112 */
113 int xenbus_watch_pathfmt(struct xenbus_device *dev,
114 struct xenbus_watch *watch,
115 void (*callback)(struct xenbus_watch *,
116 const char **, unsigned int),
117 const char *pathfmt, ...)
118 {
119 int err;
120 va_list ap;
121 char *path;
122
123 va_start(ap, pathfmt);
124 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
125 va_end(ap);
126
127 if (!path) {
128 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
129 return -ENOMEM;
130 }
131 err = xenbus_watch_path(dev, path, watch, callback);
132
133 if (err)
134 kfree(path);
135 return err;
136 }
137 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
138
139 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
140 const char *, ...);
141
142 static int
143 __xenbus_switch_state(struct xenbus_device *dev,
144 enum xenbus_state state, int depth)
145 {
146 /* We check whether the state is currently set to the given value, and
147 if not, then the state is set. We don't want to unconditionally
148 write the given state, because we don't want to fire watches
149 unnecessarily. Furthermore, if the node has gone, we don't write
150 to it, as the device will be tearing down, and we don't want to
151 resurrect that directory.
152
153 Note that, because of this cached value of our state, this
154 function will not take a caller's Xenstore transaction
155 (something it was trying to in the past) because dev->state
156 would not get reset if the transaction was aborted.
157 */
158
159 struct xenbus_transaction xbt;
160 int current_state;
161 int err, abort;
162
163 if (state == dev->state)
164 return 0;
165
166 again:
167 abort = 1;
168
169 err = xenbus_transaction_start(&xbt);
170 if (err) {
171 xenbus_switch_fatal(dev, depth, err, "starting transaction");
172 return 0;
173 }
174
175 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
176 if (err != 1)
177 goto abort;
178
179 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
180 if (err) {
181 xenbus_switch_fatal(dev, depth, err, "writing new state");
182 goto abort;
183 }
184
185 abort = 0;
186 abort:
187 err = xenbus_transaction_end(xbt, abort);
188 if (err) {
189 if (err == -EAGAIN && !abort)
190 goto again;
191 xenbus_switch_fatal(dev, depth, err, "ending transaction");
192 } else
193 dev->state = state;
194
195 return 0;
196 }
197
198 /**
199 * xenbus_switch_state
200 * @dev: xenbus device
201 * @state: new state
202 *
203 * Advertise in the store a change of the given driver to the given new_state.
204 * Return 0 on success, or -errno on error. On error, the device will switch
205 * to XenbusStateClosing, and the error will be saved in the store.
206 */
207 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
208 {
209 return __xenbus_switch_state(dev, state, 0);
210 }
211
212 EXPORT_SYMBOL_GPL(xenbus_switch_state);
213
214 int xenbus_frontend_closed(struct xenbus_device *dev)
215 {
216 xenbus_switch_state(dev, XenbusStateClosed);
217 complete(&dev->down);
218 return 0;
219 }
220 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
221
222 /**
223 * Return the path to the error node for the given device, or NULL on failure.
224 * If the value returned is non-NULL, then it is the caller's to kfree.
225 */
226 static char *error_path(struct xenbus_device *dev)
227 {
228 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
229 }
230
231
232 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
233 const char *fmt, va_list ap)
234 {
235 int ret;
236 unsigned int len;
237 char *printf_buffer = NULL;
238 char *path_buffer = NULL;
239
240 #define PRINTF_BUFFER_SIZE 4096
241 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
242 if (printf_buffer == NULL)
243 goto fail;
244
245 len = sprintf(printf_buffer, "%i ", -err);
246 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
247
248 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
249
250 dev_err(&dev->dev, "%s\n", printf_buffer);
251
252 path_buffer = error_path(dev);
253
254 if (path_buffer == NULL) {
255 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
256 dev->nodename, printf_buffer);
257 goto fail;
258 }
259
260 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
261 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
262 dev->nodename, printf_buffer);
263 goto fail;
264 }
265
266 fail:
267 kfree(printf_buffer);
268 kfree(path_buffer);
269 }
270
271
272 /**
273 * xenbus_dev_error
274 * @dev: xenbus device
275 * @err: error to report
276 * @fmt: error message format
277 *
278 * Report the given negative errno into the store, along with the given
279 * formatted message.
280 */
281 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
282 {
283 va_list ap;
284
285 va_start(ap, fmt);
286 xenbus_va_dev_error(dev, err, fmt, ap);
287 va_end(ap);
288 }
289 EXPORT_SYMBOL_GPL(xenbus_dev_error);
290
291 /**
292 * xenbus_dev_fatal
293 * @dev: xenbus device
294 * @err: error to report
295 * @fmt: error message format
296 *
297 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
298 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
299 * closedown of this driver and its peer.
300 */
301
302 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
303 {
304 va_list ap;
305
306 va_start(ap, fmt);
307 xenbus_va_dev_error(dev, err, fmt, ap);
308 va_end(ap);
309
310 xenbus_switch_state(dev, XenbusStateClosing);
311 }
312 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
313
314 /**
315 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
316 * avoiding recursion within xenbus_switch_state.
317 */
318 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
319 const char *fmt, ...)
320 {
321 va_list ap;
322
323 va_start(ap, fmt);
324 xenbus_va_dev_error(dev, err, fmt, ap);
325 va_end(ap);
326
327 if (!depth)
328 __xenbus_switch_state(dev, XenbusStateClosing, 1);
329 }
330
331 /**
332 * xenbus_grant_ring
333 * @dev: xenbus device
334 * @ring_mfn: mfn of ring to grant
335
336 * Grant access to the given @ring_mfn to the peer of the given device. Return
337 * 0 on success, or -errno on error. On error, the device will switch to
338 * XenbusStateClosing, and the error will be saved in the store.
339 */
340 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
341 {
342 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
343 if (err < 0)
344 xenbus_dev_fatal(dev, err, "granting access to ring page");
345 return err;
346 }
347 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
348
349
350 /**
351 * Allocate an event channel for the given xenbus_device, assigning the newly
352 * created local port to *port. Return 0 on success, or -errno on error. On
353 * error, the device will switch to XenbusStateClosing, and the error will be
354 * saved in the store.
355 */
356 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
357 {
358 struct evtchn_alloc_unbound alloc_unbound;
359 int err;
360
361 alloc_unbound.dom = DOMID_SELF;
362 alloc_unbound.remote_dom = dev->otherend_id;
363
364 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
365 &alloc_unbound);
366 if (err)
367 xenbus_dev_fatal(dev, err, "allocating event channel");
368 else
369 *port = alloc_unbound.port;
370
371 return err;
372 }
373 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
374
375
376 /**
377 * Bind to an existing interdomain event channel in another domain. Returns 0
378 * on success and stores the local port in *port. On error, returns -errno,
379 * switches the device to XenbusStateClosing, and saves the error in XenStore.
380 */
381 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
382 {
383 struct evtchn_bind_interdomain bind_interdomain;
384 int err;
385
386 bind_interdomain.remote_dom = dev->otherend_id;
387 bind_interdomain.remote_port = remote_port;
388
389 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
390 &bind_interdomain);
391 if (err)
392 xenbus_dev_fatal(dev, err,
393 "binding to event channel %d from domain %d",
394 remote_port, dev->otherend_id);
395 else
396 *port = bind_interdomain.local_port;
397
398 return err;
399 }
400 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
401
402
403 /**
404 * Free an existing event channel. Returns 0 on success or -errno on error.
405 */
406 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
407 {
408 struct evtchn_close close;
409 int err;
410
411 close.port = port;
412
413 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
414 if (err)
415 xenbus_dev_error(dev, err, "freeing event channel %d", port);
416
417 return err;
418 }
419 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
420
421
422 /**
423 * xenbus_map_ring_valloc
424 * @dev: xenbus device
425 * @gnt_ref: grant reference
426 * @vaddr: pointer to address to be filled out by mapping
427 *
428 * Based on Rusty Russell's skeleton driver's map_page.
429 * Map a page of memory into this domain from another domain's grant table.
430 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
431 * page to that address, and sets *vaddr to that address.
432 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
433 * or -ENOMEM on error. If an error is returned, device will switch to
434 * XenbusStateClosing and the error message will be saved in XenStore.
435 */
436 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437 {
438 struct gnttab_map_grant_ref op = {
439 .flags = GNTMAP_host_map,
440 .ref = gnt_ref,
441 .dom = dev->otherend_id,
442 };
443 struct vm_struct *area;
444
445 *vaddr = NULL;
446
447 area = alloc_vm_area(PAGE_SIZE);
448 if (!area)
449 return -ENOMEM;
450
451 op.host_addr = (unsigned long)area->addr;
452
453 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
454 BUG();
455
456 if (op.status != GNTST_okay) {
457 free_vm_area(area);
458 xenbus_dev_fatal(dev, op.status,
459 "mapping in shared page %d from domain %d",
460 gnt_ref, dev->otherend_id);
461 return op.status;
462 }
463
464 /* Stuff the handle in an unused field */
465 area->phys_addr = (unsigned long)op.handle;
466
467 *vaddr = area->addr;
468 return 0;
469 }
470 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
471
472
473 /**
474 * xenbus_map_ring
475 * @dev: xenbus device
476 * @gnt_ref: grant reference
477 * @handle: pointer to grant handle to be filled
478 * @vaddr: address to be mapped to
479 *
480 * Map a page of memory into this domain from another domain's grant table.
481 * xenbus_map_ring does not allocate the virtual address space (you must do
482 * this yourself!). It only maps in the page to the specified address.
483 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
484 * or -ENOMEM on error. If an error is returned, device will switch to
485 * XenbusStateClosing and the error message will be saved in XenStore.
486 */
487 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
488 grant_handle_t *handle, void *vaddr)
489 {
490 struct gnttab_map_grant_ref op = {
491 .host_addr = (unsigned long)vaddr,
492 .flags = GNTMAP_host_map,
493 .ref = gnt_ref,
494 .dom = dev->otherend_id,
495 };
496
497 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
498 BUG();
499
500 if (op.status != GNTST_okay) {
501 xenbus_dev_fatal(dev, op.status,
502 "mapping in shared page %d from domain %d",
503 gnt_ref, dev->otherend_id);
504 } else
505 *handle = op.handle;
506
507 return op.status;
508 }
509 EXPORT_SYMBOL_GPL(xenbus_map_ring);
510
511
512 /**
513 * xenbus_unmap_ring_vfree
514 * @dev: xenbus device
515 * @vaddr: addr to unmap
516 *
517 * Based on Rusty Russell's skeleton driver's unmap_page.
518 * Unmap a page of memory in this domain that was imported from another domain.
519 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
520 * xenbus_map_ring_valloc (it will free the virtual address space).
521 * Returns 0 on success and returns GNTST_* on error
522 * (see xen/include/interface/grant_table.h).
523 */
524 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
525 {
526 struct vm_struct *area;
527 struct gnttab_unmap_grant_ref op = {
528 .host_addr = (unsigned long)vaddr,
529 };
530
531 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
532 * method so that we don't have to muck with vmalloc internals here.
533 * We could force the user to hang on to their struct vm_struct from
534 * xenbus_map_ring_valloc, but these 6 lines considerably simplify
535 * this API.
536 */
537 read_lock(&vmlist_lock);
538 for (area = vmlist; area != NULL; area = area->next) {
539 if (area->addr == vaddr)
540 break;
541 }
542 read_unlock(&vmlist_lock);
543
544 if (!area) {
545 xenbus_dev_error(dev, -ENOENT,
546 "can't find mapped virtual address %p", vaddr);
547 return GNTST_bad_virt_addr;
548 }
549
550 op.handle = (grant_handle_t)area->phys_addr;
551
552 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
553 BUG();
554
555 if (op.status == GNTST_okay)
556 free_vm_area(area);
557 else
558 xenbus_dev_error(dev, op.status,
559 "unmapping page at handle %d error %d",
560 (int16_t)area->phys_addr, op.status);
561
562 return op.status;
563 }
564 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
565
566
567 /**
568 * xenbus_unmap_ring
569 * @dev: xenbus device
570 * @handle: grant handle
571 * @vaddr: addr to unmap
572 *
573 * Unmap a page of memory in this domain that was imported from another domain.
574 * Returns 0 on success and returns GNTST_* on error
575 * (see xen/include/interface/grant_table.h).
576 */
577 int xenbus_unmap_ring(struct xenbus_device *dev,
578 grant_handle_t handle, void *vaddr)
579 {
580 struct gnttab_unmap_grant_ref op = {
581 .host_addr = (unsigned long)vaddr,
582 .handle = handle,
583 };
584
585 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
586 BUG();
587
588 if (op.status != GNTST_okay)
589 xenbus_dev_error(dev, op.status,
590 "unmapping page at handle %d error %d",
591 handle, op.status);
592
593 return op.status;
594 }
595 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
596
597
598 /**
599 * xenbus_read_driver_state
600 * @path: path for driver
601 *
602 * Return the state of the driver rooted at the given store path, or
603 * XenbusStateUnknown if no state can be read.
604 */
605 enum xenbus_state xenbus_read_driver_state(const char *path)
606 {
607 enum xenbus_state result;
608 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
609 if (err)
610 result = XenbusStateUnknown;
611
612 return result;
613 }
614 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);