2 * QEMU Xen emulation: Shared/overlay pages support
4 * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 * Authors: David Woodhouse <dwmw2@infradead.org>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "qemu/host-utils.h"
15 #include "qemu/module.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
20 #include "qom/object.h"
21 #include "migration/vmstate.h"
23 #include "hw/sysbus.h"
24 #include "hw/xen/xen.h"
25 #include "hw/xen/xen_backend_ops.h"
26 #include "xen_overlay.h"
27 #include "xen_evtchn.h"
28 #include "xen_xenstore.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/kvm_xen.h"
35 #include "xenstore_impl.h"
37 #include "hw/xen/interface/io/xs_wire.h"
38 #include "hw/xen/interface/event_channel.h"
39 #include "hw/xen/interface/grant_table.h"
41 #define TYPE_XEN_XENSTORE "xen-xenstore"
42 OBJECT_DECLARE_SIMPLE_TYPE(XenXenstoreState
, XEN_XENSTORE
)
44 #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
45 #define ENTRIES_PER_FRAME_V2 (XEN_PAGE_SIZE / sizeof(grant_entry_v2_t))
47 #define XENSTORE_HEADER_SIZE ((unsigned int)sizeof(struct xsd_sockmsg))
49 struct XenXenstoreState
{
54 XenstoreImplState
*impl
;
55 GList
*watch_events
; /* for the guest */
57 MemoryRegion xenstore_page
;
58 struct xenstore_domain_interface
*xs
;
59 uint8_t req_data
[XENSTORE_HEADER_SIZE
+ XENSTORE_PAYLOAD_MAX
];
60 uint8_t rsp_data
[XENSTORE_HEADER_SIZE
+ XENSTORE_PAYLOAD_MAX
];
66 evtchn_port_t guest_port
;
67 evtchn_port_t be_port
;
68 struct xenevtchn_handle
*eh
;
71 uint32_t impl_state_size
;
73 struct xengntdev_handle
*gt
;
77 struct XenXenstoreState
*xen_xenstore_singleton
;
79 static void xen_xenstore_event(void *opaque
);
80 static void fire_watch_cb(void *opaque
, const char *path
, const char *token
);
82 static struct xenstore_backend_ops emu_xenstore_backend_ops
;
84 static void G_GNUC_PRINTF (4, 5) relpath_printf(XenXenstoreState
*s
,
95 abspath
= g_strdup_printf("/local/domain/%u/%s", xen_domid
, relpath
);
97 value
= g_strdup_vprintf(fmt
, args
);
100 data
= g_byte_array_new_take((void *)value
, strlen(value
));
102 err
= xs_impl_write(s
->impl
, DOMID_QEMU
, XBT_NULL
, abspath
, data
);
105 g_byte_array_unref(data
);
107 err
= xs_impl_set_perms(s
->impl
, DOMID_QEMU
, XBT_NULL
, abspath
, perms
);
113 static void xen_xenstore_realize(DeviceState
*dev
, Error
**errp
)
115 XenXenstoreState
*s
= XEN_XENSTORE(dev
);
118 if (xen_mode
!= XEN_EMULATE
) {
119 error_setg(errp
, "Xen xenstore support is for Xen emulation");
122 memory_region_init_ram(&s
->xenstore_page
, OBJECT(dev
), "xen:xenstore_page",
123 XEN_PAGE_SIZE
, &error_abort
);
124 memory_region_set_enabled(&s
->xenstore_page
, true);
125 s
->xs
= memory_region_get_ram_ptr(&s
->xenstore_page
);
126 memset(s
->xs
, 0, XEN_PAGE_SIZE
);
128 /* We can't map it this early as KVM isn't ready */
129 xen_xenstore_singleton
= s
;
131 s
->eh
= xen_be_evtchn_open();
133 error_setg(errp
, "Xenstore evtchn port init failed");
136 aio_set_fd_handler(qemu_get_aio_context(), xen_be_evtchn_fd(s
->eh
),
137 xen_xenstore_event
, NULL
, NULL
, NULL
, s
);
139 s
->impl
= xs_impl_create(xen_domid
);
141 /* Populate the default nodes */
143 /* Nodes owned by 'dom0' but readable by the guest */
144 perms
= g_list_append(NULL
, xs_perm_as_string(XS_PERM_NONE
, DOMID_QEMU
));
145 perms
= g_list_append(perms
, xs_perm_as_string(XS_PERM_READ
, xen_domid
));
147 relpath_printf(s
, perms
, "", "%s", "");
149 relpath_printf(s
, perms
, "domid", "%u", xen_domid
);
151 relpath_printf(s
, perms
, "control/platform-feature-xs_reset_watches", "%u", 1);
152 relpath_printf(s
, perms
, "control/platform-feature-multiprocessor-suspend", "%u", 1);
154 relpath_printf(s
, perms
, "platform/acpi", "%u", 1);
155 relpath_printf(s
, perms
, "platform/acpi_s3", "%u", 1);
156 relpath_printf(s
, perms
, "platform/acpi_s4", "%u", 1);
157 relpath_printf(s
, perms
, "platform/acpi_laptop_slate", "%u", 0);
159 g_list_free_full(perms
, g_free
);
161 /* Nodes owned by the guest */
162 perms
= g_list_append(NULL
, xs_perm_as_string(XS_PERM_NONE
, xen_domid
));
164 relpath_printf(s
, perms
, "attr", "%s", "");
166 relpath_printf(s
, perms
, "control/shutdown", "%s", "");
167 relpath_printf(s
, perms
, "control/feature-poweroff", "%u", 1);
168 relpath_printf(s
, perms
, "control/feature-reboot", "%u", 1);
169 relpath_printf(s
, perms
, "control/feature-suspend", "%u", 1);
170 relpath_printf(s
, perms
, "control/feature-s3", "%u", 1);
171 relpath_printf(s
, perms
, "control/feature-s4", "%u", 1);
173 relpath_printf(s
, perms
, "data", "%s", "");
174 relpath_printf(s
, perms
, "device", "%s", "");
175 relpath_printf(s
, perms
, "drivers", "%s", "");
176 relpath_printf(s
, perms
, "error", "%s", "");
177 relpath_printf(s
, perms
, "feature", "%s", "");
179 g_list_free_full(perms
, g_free
);
181 xen_xenstore_ops
= &emu_xenstore_backend_ops
;
184 static bool xen_xenstore_is_needed(void *opaque
)
186 return xen_mode
== XEN_EMULATE
;
189 static int xen_xenstore_pre_save(void *opaque
)
191 XenXenstoreState
*s
= opaque
;
195 s
->guest_port
= xen_be_evtchn_get_guest_port(s
->eh
);
198 g_free(s
->impl_state
);
199 save
= xs_impl_serialize(s
->impl
);
200 s
->impl_state
= save
->data
;
201 s
->impl_state_size
= save
->len
;
202 g_byte_array_free(save
, false);
207 static int xen_xenstore_post_load(void *opaque
, int ver
)
209 XenXenstoreState
*s
= opaque
;
214 * As qemu/dom0, rebind to the guest's port. The Windows drivers may
215 * unbind the XenStore evtchn and rebind to it, having obtained the
216 * "remote" port through EVTCHNOP_status. In the case that migration
217 * occurs while it's unbound, the "remote" port needs to be the same
218 * as before so that the guest can find it, but should remain unbound.
221 int be_port
= xen_be_evtchn_bind_interdomain(s
->eh
, xen_domid
,
226 s
->be_port
= be_port
;
229 save
= g_byte_array_new_take(s
->impl_state
, s
->impl_state_size
);
230 s
->impl_state
= NULL
;
231 s
->impl_state_size
= 0;
233 ret
= xs_impl_deserialize(s
->impl
, save
, xen_domid
, fire_watch_cb
, s
);
237 static const VMStateDescription xen_xenstore_vmstate
= {
238 .name
= "xen_xenstore",
239 .unmigratable
= 1, /* The PV back ends don't migrate yet */
241 .minimum_version_id
= 1,
242 .needed
= xen_xenstore_is_needed
,
243 .pre_save
= xen_xenstore_pre_save
,
244 .post_load
= xen_xenstore_post_load
,
245 .fields
= (VMStateField
[]) {
246 VMSTATE_UINT8_ARRAY(req_data
, XenXenstoreState
,
247 sizeof_field(XenXenstoreState
, req_data
)),
248 VMSTATE_UINT8_ARRAY(rsp_data
, XenXenstoreState
,
249 sizeof_field(XenXenstoreState
, rsp_data
)),
250 VMSTATE_UINT32(req_offset
, XenXenstoreState
),
251 VMSTATE_UINT32(rsp_offset
, XenXenstoreState
),
252 VMSTATE_BOOL(rsp_pending
, XenXenstoreState
),
253 VMSTATE_UINT32(guest_port
, XenXenstoreState
),
254 VMSTATE_BOOL(fatal_error
, XenXenstoreState
),
255 VMSTATE_UINT32(impl_state_size
, XenXenstoreState
),
256 VMSTATE_VARRAY_UINT32_ALLOC(impl_state
, XenXenstoreState
,
258 vmstate_info_uint8
, uint8_t),
259 VMSTATE_END_OF_LIST()
263 static void xen_xenstore_class_init(ObjectClass
*klass
, void *data
)
265 DeviceClass
*dc
= DEVICE_CLASS(klass
);
267 dc
->realize
= xen_xenstore_realize
;
268 dc
->vmsd
= &xen_xenstore_vmstate
;
271 static const TypeInfo xen_xenstore_info
= {
272 .name
= TYPE_XEN_XENSTORE
,
273 .parent
= TYPE_SYS_BUS_DEVICE
,
274 .instance_size
= sizeof(XenXenstoreState
),
275 .class_init
= xen_xenstore_class_init
,
278 void xen_xenstore_create(void)
280 DeviceState
*dev
= sysbus_create_simple(TYPE_XEN_XENSTORE
, -1, NULL
);
282 xen_xenstore_singleton
= XEN_XENSTORE(dev
);
285 * Defer the init (xen_xenstore_reset()) until KVM is set up and the
286 * overlay page can be mapped.
290 static void xen_xenstore_register_types(void)
292 type_register_static(&xen_xenstore_info
);
295 type_init(xen_xenstore_register_types
)
297 uint16_t xen_xenstore_get_port(void)
299 XenXenstoreState
*s
= xen_xenstore_singleton
;
303 return s
->guest_port
;
306 static bool req_pending(XenXenstoreState
*s
)
308 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
310 return s
->req_offset
== XENSTORE_HEADER_SIZE
+ req
->len
;
313 static void reset_req(XenXenstoreState
*s
)
315 memset(s
->req_data
, 0, sizeof(s
->req_data
));
319 static void reset_rsp(XenXenstoreState
*s
)
321 s
->rsp_pending
= false;
323 memset(s
->rsp_data
, 0, sizeof(s
->rsp_data
));
327 static void xs_error(XenXenstoreState
*s
, unsigned int id
,
328 xs_transaction_t tx_id
, int errnum
)
330 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
331 const char *errstr
= NULL
;
333 for (unsigned int i
= 0; i
< ARRAY_SIZE(xsd_errors
); i
++) {
334 struct xsd_errors
*xsd_error
= &xsd_errors
[i
];
336 if (xsd_error
->errnum
== errnum
) {
337 errstr
= xsd_error
->errstring
;
343 trace_xenstore_error(id
, tx_id
, errstr
);
345 rsp
->type
= XS_ERROR
;
348 rsp
->len
= (uint32_t)strlen(errstr
) + 1;
350 memcpy(&rsp
[1], errstr
, rsp
->len
);
353 static void xs_ok(XenXenstoreState
*s
, unsigned int type
, unsigned int req_id
,
354 xs_transaction_t tx_id
)
356 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
357 const char *okstr
= "OK";
360 rsp
->req_id
= req_id
;
362 rsp
->len
= (uint32_t)strlen(okstr
) + 1;
364 memcpy(&rsp
[1], okstr
, rsp
->len
);
368 * The correct request and response formats are documented in xen.git:
369 * docs/misc/xenstore.txt. A summary is given below for convenience.
370 * The '|' symbol represents a NUL character.
372 * ---------- Database read, write and permissions operations ----------
374 * READ <path>| <value|>
375 * WRITE <path>|<value|>
376 * Store and read the octet string <value> at <path>.
377 * WRITE creates any missing parent paths, with empty values.
380 * Ensures that the <path> exists, by necessary by creating
381 * it and any missing parents with empty values. If <path>
382 * or any parent already exists, its value is left unchanged.
385 * Ensures that the <path> does not exist, by deleting
386 * it and all of its children. It is not an error if <path> does
387 * not exist, but it _is_ an error if <path>'s immediate parent
388 * does not exist either.
390 * DIRECTORY <path>| <child-leaf-name>|*
391 * Gives a list of the immediate children of <path>, as only the
392 * leafnames. The resulting children are each named
393 * <path>/<child-leaf-name>.
395 * DIRECTORY_PART <path>|<offset> <gencnt>|<child-leaf-name>|*
396 * Same as DIRECTORY, but to be used for children lists longer than
397 * XENSTORE_PAYLOAD_MAX. Input are <path> and the byte offset into
398 * the list of children to return. Return values are the generation
399 * count <gencnt> of the node (to be used to ensure the node hasn't
400 * changed between two reads: <gencnt> being the same for multiple
401 * reads guarantees the node hasn't changed) and the list of children
402 * starting at the specified <offset> of the complete list.
404 * GET_PERMS <path>| <perm-as-string>|+
405 * SET_PERMS <path>|<perm-as-string>|+?
406 * <perm-as-string> is one of the following
407 * w<domid> write only
409 * b<domid> both read and write
411 * See https://wiki.xen.org/wiki/XenBus section
412 * `Permissions' for details of the permissions system.
413 * It is possible to set permissions for the special watch paths
414 * "@introduceDomain" and "@releaseDomain" to enable receiving those
415 * watches in unprivileged domains.
417 * ---------- Watches ----------
419 * WATCH <wpath>|<token>|?
422 * When a <path> is modified (including path creation, removal,
423 * contents change or permissions change) this generates an event
424 * on the changed <path>. Changes made in transactions cause an
425 * event only if and when committed. Each occurring event is
426 * matched against all the watches currently set up, and each
427 * matching watch results in a WATCH_EVENT message (see below).
429 * The event's path matches the watch's <wpath> if it is an child
432 * <wpath> can be a <path> to watch or @<wspecial>. In the
433 * latter case <wspecial> may have any syntax but it matches
434 * (according to the rules above) only the following special
435 * events which are invented by xenstored:
436 * @introduceDomain occurs on INTRODUCE
437 * @releaseDomain occurs on any domain crash or
438 * shutdown, and also on RELEASE
439 * and domain destruction
440 * <wspecial> events are sent to privileged callers or explicitly
441 * via SET_PERMS enabled domains only.
443 * When a watch is first set up it is triggered once straight
444 * away, with <path> equal to <wpath>. Watches may be triggered
445 * spuriously. The tx_id in a WATCH request is ignored.
447 * Watches are supposed to be restricted by the permissions
448 * system but in practice the implementation is imperfect.
449 * Applications should not rely on being sent a notification for
450 * paths that they cannot read; however, an application may rely
451 * on being sent a watch when a path which it _is_ able to read
452 * is deleted even if that leaves only a nonexistent unreadable
453 * parent. A notification may omitted if a node's permissions
454 * are changed so as to make it unreadable, in which case future
455 * notifications may be suppressed (and if the node is later made
456 * readable, some notifications may have been lost).
458 * WATCH_EVENT <epath>|<token>|
459 * Unsolicited `reply' generated for matching modification events
460 * as described above. req_id and tx_id are both 0.
462 * <epath> is the event's path, ie the actual path that was
463 * modified; however if the event was the recursive removal of an
464 * parent of <wpath>, <epath> is just
465 * <wpath> (rather than the actual path which was removed). So
466 * <epath> is a child of <wpath>, regardless.
468 * Iff <wpath> for the watch was specified as a relative pathname,
469 * the <epath> path will also be relative (with the same base,
472 * UNWATCH <wpath>|<token>|?
475 * Reset all watches and transactions of the caller.
477 * ---------- Transactions ----------
479 * TRANSACTION_START | <transid>|
480 * <transid> is an opaque uint32_t allocated by xenstored
481 * represented as unsigned decimal. After this, transaction may
482 * be referenced by using <transid> (as 32-bit binary) in the
483 * tx_id request header field. When transaction is started whole
484 * db is copied; reads and writes happen on the copy.
485 * It is not legal to send non-0 tx_id in TRANSACTION_START.
489 * tx_id must refer to existing transaction. After this
490 * request the tx_id is no longer valid and may be reused by
491 * xenstore. If F, the transaction is discarded. If T,
492 * it is committed: if there were any other intervening writes
493 * then our END gets get EAGAIN.
495 * The plan is that in the future only intervening `conflicting'
496 * writes cause EAGAIN, meaning only writes or other commits
497 * which changed paths which were read or written in the
498 * transaction at hand.
502 static void xs_read(XenXenstoreState
*s
, unsigned int req_id
,
503 xs_transaction_t tx_id
, uint8_t *req_data
, unsigned int len
)
505 const char *path
= (const char *)req_data
;
506 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
507 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
508 g_autoptr(GByteArray
) data
= g_byte_array_new();
511 if (len
== 0 || req_data
[len
- 1] != '\0') {
512 xs_error(s
, req_id
, tx_id
, EINVAL
);
516 trace_xenstore_read(tx_id
, path
);
517 err
= xs_impl_read(s
->impl
, xen_domid
, tx_id
, path
, data
);
519 xs_error(s
, req_id
, tx_id
, err
);
524 rsp
->req_id
= req_id
;
529 if (len
> XENSTORE_PAYLOAD_MAX
) {
530 xs_error(s
, req_id
, tx_id
, E2BIG
);
534 memcpy(&rsp_data
[rsp
->len
], data
->data
, len
);
538 static void xs_write(XenXenstoreState
*s
, unsigned int req_id
,
539 xs_transaction_t tx_id
, uint8_t *req_data
,
542 g_autoptr(GByteArray
) data
= g_byte_array_new();
547 xs_error(s
, req_id
, tx_id
, EINVAL
);
551 path
= (const char *)req_data
;
554 if (*req_data
++ == '\0') {
558 xs_error(s
, req_id
, tx_id
, EINVAL
);
563 g_byte_array_append(data
, req_data
, len
);
565 trace_xenstore_write(tx_id
, path
);
566 err
= xs_impl_write(s
->impl
, xen_domid
, tx_id
, path
, data
);
568 xs_error(s
, req_id
, tx_id
, err
);
572 xs_ok(s
, XS_WRITE
, req_id
, tx_id
);
575 static void xs_mkdir(XenXenstoreState
*s
, unsigned int req_id
,
576 xs_transaction_t tx_id
, uint8_t *req_data
,
579 g_autoptr(GByteArray
) data
= g_byte_array_new();
583 if (len
== 0 || req_data
[len
- 1] != '\0') {
584 xs_error(s
, req_id
, tx_id
, EINVAL
);
588 path
= (const char *)req_data
;
590 trace_xenstore_mkdir(tx_id
, path
);
591 err
= xs_impl_read(s
->impl
, xen_domid
, tx_id
, path
, data
);
593 err
= xs_impl_write(s
->impl
, xen_domid
, tx_id
, path
, data
);
597 xs_error(s
, req_id
, tx_id
, err
);
601 xs_ok(s
, XS_MKDIR
, req_id
, tx_id
);
604 static void xs_append_strings(XenXenstoreState
*s
, struct xsd_sockmsg
*rsp
,
605 GList
*strings
, unsigned int start
, bool truncate
)
607 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
610 for (l
= strings
; l
; l
= l
->next
) {
611 size_t len
= strlen(l
->data
) + 1; /* Including the NUL termination */
614 if (rsp
->len
+ len
> XENSTORE_PAYLOAD_MAX
) {
616 len
= XENSTORE_PAYLOAD_MAX
- rsp
->len
;
621 xs_error(s
, rsp
->req_id
, rsp
->tx_id
, E2BIG
);
637 memcpy(&rsp_data
[rsp
->len
], str
, len
);
640 /* XS_DIRECTORY_PART wants an extra NUL to indicate the end */
641 if (truncate
&& rsp
->len
< XENSTORE_PAYLOAD_MAX
) {
642 rsp_data
[rsp
->len
++] = '\0';
646 static void xs_directory(XenXenstoreState
*s
, unsigned int req_id
,
647 xs_transaction_t tx_id
, uint8_t *req_data
,
650 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
655 if (len
== 0 || req_data
[len
- 1] != '\0') {
656 xs_error(s
, req_id
, tx_id
, EINVAL
);
660 path
= (const char *)req_data
;
662 trace_xenstore_directory(tx_id
, path
);
663 err
= xs_impl_directory(s
->impl
, xen_domid
, tx_id
, path
, NULL
, &items
);
665 xs_error(s
, req_id
, tx_id
, err
);
669 rsp
->type
= XS_DIRECTORY
;
670 rsp
->req_id
= req_id
;
674 xs_append_strings(s
, rsp
, items
, 0, false);
676 g_list_free_full(items
, g_free
);
679 static void xs_directory_part(XenXenstoreState
*s
, unsigned int req_id
,
680 xs_transaction_t tx_id
, uint8_t *req_data
,
683 const char *offset_str
, *path
= (const char *)req_data
;
684 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
685 char *rsp_data
= (char *)&rsp
[1];
692 xs_error(s
, req_id
, tx_id
, EINVAL
);
697 if (*req_data
++ == '\0') {
701 xs_error(s
, req_id
, tx_id
, EINVAL
);
706 offset_str
= (const char *)req_data
;
708 if (*req_data
++ == '\0') {
712 xs_error(s
, req_id
, tx_id
, EINVAL
);
718 xs_error(s
, req_id
, tx_id
, EINVAL
);
722 if (qemu_strtoui(offset_str
, NULL
, 10, &offset
) < 0) {
723 xs_error(s
, req_id
, tx_id
, EINVAL
);
727 trace_xenstore_directory_part(tx_id
, path
, offset
);
728 err
= xs_impl_directory(s
->impl
, xen_domid
, tx_id
, path
, &gencnt
, &items
);
730 xs_error(s
, req_id
, tx_id
, err
);
734 rsp
->type
= XS_DIRECTORY_PART
;
735 rsp
->req_id
= req_id
;
737 rsp
->len
= snprintf(rsp_data
, XENSTORE_PAYLOAD_MAX
, "%" PRIu64
, gencnt
) + 1;
739 xs_append_strings(s
, rsp
, items
, offset
, true);
741 g_list_free_full(items
, g_free
);
744 static void xs_transaction_start(XenXenstoreState
*s
, unsigned int req_id
,
745 xs_transaction_t tx_id
, uint8_t *req_data
,
748 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
749 char *rsp_data
= (char *)&rsp
[1];
752 if (len
!= 1 || req_data
[0] != '\0') {
753 xs_error(s
, req_id
, tx_id
, EINVAL
);
757 rsp
->type
= XS_TRANSACTION_START
;
758 rsp
->req_id
= req_id
;
762 err
= xs_impl_transaction_start(s
->impl
, xen_domid
, &tx_id
);
764 xs_error(s
, req_id
, tx_id
, err
);
768 trace_xenstore_transaction_start(tx_id
);
770 rsp
->len
= snprintf(rsp_data
, XENSTORE_PAYLOAD_MAX
, "%u", tx_id
);
771 assert(rsp
->len
< XENSTORE_PAYLOAD_MAX
);
775 static void xs_transaction_end(XenXenstoreState
*s
, unsigned int req_id
,
776 xs_transaction_t tx_id
, uint8_t *req_data
,
782 if (len
!= 2 || req_data
[1] != '\0') {
783 xs_error(s
, req_id
, tx_id
, EINVAL
);
787 switch (req_data
[0]) {
795 xs_error(s
, req_id
, tx_id
, EINVAL
);
799 trace_xenstore_transaction_end(tx_id
, commit
);
800 err
= xs_impl_transaction_end(s
->impl
, xen_domid
, tx_id
, commit
);
802 xs_error(s
, req_id
, tx_id
, err
);
806 xs_ok(s
, XS_TRANSACTION_END
, req_id
, tx_id
);
809 static void xs_rm(XenXenstoreState
*s
, unsigned int req_id
,
810 xs_transaction_t tx_id
, uint8_t *req_data
, unsigned int len
)
812 const char *path
= (const char *)req_data
;
815 if (len
== 0 || req_data
[len
- 1] != '\0') {
816 xs_error(s
, req_id
, tx_id
, EINVAL
);
820 trace_xenstore_rm(tx_id
, path
);
821 err
= xs_impl_rm(s
->impl
, xen_domid
, tx_id
, path
);
823 xs_error(s
, req_id
, tx_id
, err
);
827 xs_ok(s
, XS_RM
, req_id
, tx_id
);
830 static void xs_get_perms(XenXenstoreState
*s
, unsigned int req_id
,
831 xs_transaction_t tx_id
, uint8_t *req_data
,
834 const char *path
= (const char *)req_data
;
835 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
839 if (len
== 0 || req_data
[len
- 1] != '\0') {
840 xs_error(s
, req_id
, tx_id
, EINVAL
);
844 trace_xenstore_get_perms(tx_id
, path
);
845 err
= xs_impl_get_perms(s
->impl
, xen_domid
, tx_id
, path
, &perms
);
847 xs_error(s
, req_id
, tx_id
, err
);
851 rsp
->type
= XS_GET_PERMS
;
852 rsp
->req_id
= req_id
;
856 xs_append_strings(s
, rsp
, perms
, 0, false);
858 g_list_free_full(perms
, g_free
);
861 static void xs_set_perms(XenXenstoreState
*s
, unsigned int req_id
,
862 xs_transaction_t tx_id
, uint8_t *req_data
,
865 const char *path
= (const char *)req_data
;
871 xs_error(s
, req_id
, tx_id
, EINVAL
);
876 if (*req_data
++ == '\0') {
880 xs_error(s
, req_id
, tx_id
, EINVAL
);
887 if (*req_data
++ == '\0') {
888 perms
= g_list_append(perms
, perm
);
894 * Note that there may be trailing garbage at the end of the buffer.
895 * This is explicitly permitted by the '?' at the end of the definition:
897 * SET_PERMS <path>|<perm-as-string>|+?
900 trace_xenstore_set_perms(tx_id
, path
);
901 err
= xs_impl_set_perms(s
->impl
, xen_domid
, tx_id
, path
, perms
);
904 xs_error(s
, req_id
, tx_id
, err
);
908 xs_ok(s
, XS_SET_PERMS
, req_id
, tx_id
);
911 static void xs_watch(XenXenstoreState
*s
, unsigned int req_id
,
912 xs_transaction_t tx_id
, uint8_t *req_data
,
915 const char *token
, *path
= (const char *)req_data
;
919 xs_error(s
, req_id
, tx_id
, EINVAL
);
924 if (*req_data
++ == '\0') {
928 xs_error(s
, req_id
, tx_id
, EINVAL
);
933 token
= (const char *)req_data
;
935 if (*req_data
++ == '\0') {
939 xs_error(s
, req_id
, tx_id
, EINVAL
);
945 * Note that there may be trailing garbage at the end of the buffer.
946 * This is explicitly permitted by the '?' at the end of the definition:
948 * WATCH <wpath>|<token>|?
951 trace_xenstore_watch(path
, token
);
952 err
= xs_impl_watch(s
->impl
, xen_domid
, path
, token
, fire_watch_cb
, s
);
954 xs_error(s
, req_id
, tx_id
, err
);
958 xs_ok(s
, XS_WATCH
, req_id
, tx_id
);
961 static void xs_unwatch(XenXenstoreState
*s
, unsigned int req_id
,
962 xs_transaction_t tx_id
, uint8_t *req_data
,
965 const char *token
, *path
= (const char *)req_data
;
969 xs_error(s
, req_id
, tx_id
, EINVAL
);
974 if (*req_data
++ == '\0') {
978 xs_error(s
, req_id
, tx_id
, EINVAL
);
983 token
= (const char *)req_data
;
985 if (*req_data
++ == '\0') {
989 xs_error(s
, req_id
, tx_id
, EINVAL
);
994 trace_xenstore_unwatch(path
, token
);
995 err
= xs_impl_unwatch(s
->impl
, xen_domid
, path
, token
, fire_watch_cb
, s
);
997 xs_error(s
, req_id
, tx_id
, err
);
1001 xs_ok(s
, XS_UNWATCH
, req_id
, tx_id
);
1004 static void xs_reset_watches(XenXenstoreState
*s
, unsigned int req_id
,
1005 xs_transaction_t tx_id
, uint8_t *req_data
,
1008 if (len
== 0 || req_data
[len
- 1] != '\0') {
1009 xs_error(s
, req_id
, tx_id
, EINVAL
);
1013 trace_xenstore_reset_watches();
1014 xs_impl_reset_watches(s
->impl
, xen_domid
);
1016 xs_ok(s
, XS_RESET_WATCHES
, req_id
, tx_id
);
1019 static void xs_priv(XenXenstoreState
*s
, unsigned int req_id
,
1020 xs_transaction_t tx_id
, uint8_t *data
,
1023 xs_error(s
, req_id
, tx_id
, EACCES
);
1026 static void xs_unimpl(XenXenstoreState
*s
, unsigned int req_id
,
1027 xs_transaction_t tx_id
, uint8_t *data
,
1030 xs_error(s
, req_id
, tx_id
, ENOSYS
);
1033 typedef void (*xs_impl
)(XenXenstoreState
*s
, unsigned int req_id
,
1034 xs_transaction_t tx_id
, uint8_t *data
,
1041 #define XSD_REQ(_type, _fn) \
1042 [_type] = { .name = #_type, .fn = _fn }
1044 struct xsd_req xsd_reqs
[] = {
1045 XSD_REQ(XS_READ
, xs_read
),
1046 XSD_REQ(XS_WRITE
, xs_write
),
1047 XSD_REQ(XS_MKDIR
, xs_mkdir
),
1048 XSD_REQ(XS_DIRECTORY
, xs_directory
),
1049 XSD_REQ(XS_DIRECTORY_PART
, xs_directory_part
),
1050 XSD_REQ(XS_TRANSACTION_START
, xs_transaction_start
),
1051 XSD_REQ(XS_TRANSACTION_END
, xs_transaction_end
),
1052 XSD_REQ(XS_RM
, xs_rm
),
1053 XSD_REQ(XS_GET_PERMS
, xs_get_perms
),
1054 XSD_REQ(XS_SET_PERMS
, xs_set_perms
),
1055 XSD_REQ(XS_WATCH
, xs_watch
),
1056 XSD_REQ(XS_UNWATCH
, xs_unwatch
),
1057 XSD_REQ(XS_CONTROL
, xs_priv
),
1058 XSD_REQ(XS_INTRODUCE
, xs_priv
),
1059 XSD_REQ(XS_RELEASE
, xs_priv
),
1060 XSD_REQ(XS_IS_DOMAIN_INTRODUCED
, xs_priv
),
1061 XSD_REQ(XS_RESUME
, xs_priv
),
1062 XSD_REQ(XS_SET_TARGET
, xs_priv
),
1063 XSD_REQ(XS_RESET_WATCHES
, xs_reset_watches
),
1066 static void process_req(XenXenstoreState
*s
)
1068 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
1069 xs_impl handler
= NULL
;
1071 assert(req_pending(s
));
1072 assert(!s
->rsp_pending
);
1074 if (req
->type
< ARRAY_SIZE(xsd_reqs
)) {
1075 handler
= xsd_reqs
[req
->type
].fn
;
1078 handler
= &xs_unimpl
;
1081 handler(s
, req
->req_id
, req
->tx_id
, (uint8_t *)&req
[1], req
->len
);
1083 s
->rsp_pending
= true;
1087 static unsigned int copy_from_ring(XenXenstoreState
*s
, uint8_t *ptr
,
1094 XENSTORE_RING_IDX prod
= qatomic_read(&s
->xs
->req_prod
);
1095 XENSTORE_RING_IDX cons
= qatomic_read(&s
->xs
->req_cons
);
1096 unsigned int copied
= 0;
1098 /* Ensure the ring contents don't cross the req_prod access. */
1102 unsigned int avail
= prod
- cons
;
1103 unsigned int offset
= MASK_XENSTORE_IDX(cons
);
1104 unsigned int copylen
= avail
;
1106 if (avail
> XENSTORE_RING_SIZE
) {
1107 error_report("XenStore ring handling error");
1108 s
->fatal_error
= true;
1110 } else if (avail
== 0) {
1114 if (copylen
> len
) {
1117 if (copylen
> XENSTORE_RING_SIZE
- offset
) {
1118 copylen
= XENSTORE_RING_SIZE
- offset
;
1121 memcpy(ptr
, &s
->xs
->req
[offset
], copylen
);
1131 * Not sure this ever mattered except on Alpha, but this barrier
1132 * is to ensure that the update to req_cons is globally visible
1133 * only after we have consumed all the data from the ring, and we
1134 * don't end up seeing data written to the ring *after* the other
1135 * end sees the update and writes more to the ring. Xen's own
1136 * xenstored has the same barrier here (although with no comment
1137 * at all, obviously, because it's Xen code).
1141 qatomic_set(&s
->xs
->req_cons
, cons
);
1146 static unsigned int copy_to_ring(XenXenstoreState
*s
, uint8_t *ptr
,
1153 XENSTORE_RING_IDX cons
= qatomic_read(&s
->xs
->rsp_cons
);
1154 XENSTORE_RING_IDX prod
= qatomic_read(&s
->xs
->rsp_prod
);
1155 unsigned int copied
= 0;
1158 * This matches the barrier in copy_to_ring() (or the guest's
1159 * equivalent) between writing the data to the ring and updating
1160 * rsp_prod. It protects against the pathological case (which
1161 * again I think never happened except on Alpha) where our
1162 * subsequent writes to the ring could *cross* the read of
1163 * rsp_cons and the guest could see the new data when it was
1164 * intending to read the old.
1169 unsigned int avail
= cons
+ XENSTORE_RING_SIZE
- prod
;
1170 unsigned int offset
= MASK_XENSTORE_IDX(prod
);
1171 unsigned int copylen
= len
;
1173 if (avail
> XENSTORE_RING_SIZE
) {
1174 error_report("XenStore ring handling error");
1175 s
->fatal_error
= true;
1177 } else if (avail
== 0) {
1181 if (copylen
> avail
) {
1184 if (copylen
> XENSTORE_RING_SIZE
- offset
) {
1185 copylen
= XENSTORE_RING_SIZE
- offset
;
1189 memcpy(&s
->xs
->rsp
[offset
], ptr
, copylen
);
1198 /* Ensure the ring contents are seen before rsp_prod update. */
1201 qatomic_set(&s
->xs
->rsp_prod
, prod
);
1206 static unsigned int get_req(XenXenstoreState
*s
)
1208 unsigned int copied
= 0;
1210 if (s
->fatal_error
) {
1214 assert(!req_pending(s
));
1216 if (s
->req_offset
< XENSTORE_HEADER_SIZE
) {
1217 void *ptr
= s
->req_data
+ s
->req_offset
;
1218 unsigned int len
= XENSTORE_HEADER_SIZE
;
1219 unsigned int copylen
= copy_from_ring(s
, ptr
, len
);
1222 s
->req_offset
+= copylen
;
1225 if (s
->req_offset
>= XENSTORE_HEADER_SIZE
) {
1226 struct xsd_sockmsg
*req
= (struct xsd_sockmsg
*)s
->req_data
;
1228 if (req
->len
> (uint32_t)XENSTORE_PAYLOAD_MAX
) {
1229 error_report("Illegal XenStore request");
1230 s
->fatal_error
= true;
1234 void *ptr
= s
->req_data
+ s
->req_offset
;
1235 unsigned int len
= XENSTORE_HEADER_SIZE
+ req
->len
- s
->req_offset
;
1236 unsigned int copylen
= copy_from_ring(s
, ptr
, len
);
1239 s
->req_offset
+= copylen
;
1245 static unsigned int put_rsp(XenXenstoreState
*s
)
1247 if (s
->fatal_error
) {
1251 assert(s
->rsp_pending
);
1253 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
1254 assert(s
->rsp_offset
< XENSTORE_HEADER_SIZE
+ rsp
->len
);
1256 void *ptr
= s
->rsp_data
+ s
->rsp_offset
;
1257 unsigned int len
= XENSTORE_HEADER_SIZE
+ rsp
->len
- s
->rsp_offset
;
1258 unsigned int copylen
= copy_to_ring(s
, ptr
, len
);
1260 s
->rsp_offset
+= copylen
;
1262 /* Have we produced a complete response? */
1263 if (s
->rsp_offset
== XENSTORE_HEADER_SIZE
+ rsp
->len
) {
1270 static void deliver_watch(XenXenstoreState
*s
, const char *path
,
1273 struct xsd_sockmsg
*rsp
= (struct xsd_sockmsg
*)s
->rsp_data
;
1274 uint8_t *rsp_data
= (uint8_t *)&rsp
[1];
1277 assert(!s
->rsp_pending
);
1279 trace_xenstore_watch_event(path
, token
);
1281 rsp
->type
= XS_WATCH_EVENT
;
1288 /* XENSTORE_ABS/REL_PATH_MAX should ensure there can be no overflow */
1289 assert(rsp
->len
+ len
< XENSTORE_PAYLOAD_MAX
);
1291 memcpy(&rsp_data
[rsp
->len
], path
, len
);
1293 rsp_data
[rsp
->len
] = '\0';
1296 len
= strlen(token
);
1298 * It is possible for the guest to have chosen a token that will
1299 * not fit (along with the patch) into a watch event. We have no
1300 * choice but to drop the event if this is the case.
1302 if (rsp
->len
+ len
>= XENSTORE_PAYLOAD_MAX
) {
1306 memcpy(&rsp_data
[rsp
->len
], token
, len
);
1308 rsp_data
[rsp
->len
] = '\0';
1311 s
->rsp_pending
= true;
1314 struct watch_event
{
1319 static void free_watch_event(struct watch_event
*ev
)
1328 static void queue_watch(XenXenstoreState
*s
, const char *path
,
1331 struct watch_event
*ev
= g_new0(struct watch_event
, 1);
1333 ev
->path
= g_strdup(path
);
1334 ev
->token
= g_strdup(token
);
1336 s
->watch_events
= g_list_append(s
->watch_events
, ev
);
1339 static void fire_watch_cb(void *opaque
, const char *path
, const char *token
)
1341 XenXenstoreState
*s
= opaque
;
1343 assert(qemu_mutex_iothread_locked());
1346 * If there's a response pending, we obviously can't scribble over
1347 * it. But if there's a request pending, it has dibs on the buffer
1350 * In the common case of a watch firing due to backend activity
1351 * when the ring was otherwise idle, we should be able to copy the
1352 * strings directly into the rsp_data and thence the actual ring,
1353 * without needing to perform any allocations and queue them.
1355 if (s
->rsp_pending
|| req_pending(s
)) {
1356 queue_watch(s
, path
, token
);
1358 deliver_watch(s
, path
, token
);
1360 * If the message was queued because there was already ring activity,
1361 * no need to wake the guest. But if not, we need to send the evtchn.
1363 xen_be_evtchn_notify(s
->eh
, s
->be_port
);
1367 static void process_watch_events(XenXenstoreState
*s
)
1369 struct watch_event
*ev
= s
->watch_events
->data
;
1371 deliver_watch(s
, ev
->path
, ev
->token
);
1373 s
->watch_events
= g_list_remove(s
->watch_events
, ev
);
1374 free_watch_event(ev
);
1377 static void xen_xenstore_event(void *opaque
)
1379 XenXenstoreState
*s
= opaque
;
1380 evtchn_port_t port
= xen_be_evtchn_pending(s
->eh
);
1381 unsigned int copied_to
, copied_from
;
1382 bool processed
, notify
= false;
1384 if (port
!= s
->be_port
) {
1388 /* We know this is a no-op. */
1389 xen_be_evtchn_unmask(s
->eh
, port
);
1392 copied_to
= copied_from
= 0;
1395 if (!s
->rsp_pending
&& s
->watch_events
) {
1396 process_watch_events(s
);
1399 if (s
->rsp_pending
) {
1400 copied_to
= put_rsp(s
);
1403 if (!req_pending(s
)) {
1404 copied_from
= get_req(s
);
1407 if (req_pending(s
) && !s
->rsp_pending
&& !s
->watch_events
) {
1412 notify
|= copied_to
|| copied_from
;
1413 } while (copied_to
|| copied_from
|| processed
);
1416 xen_be_evtchn_notify(s
->eh
, s
->be_port
);
1420 static void alloc_guest_port(XenXenstoreState
*s
)
1422 struct evtchn_alloc_unbound alloc
= {
1424 .remote_dom
= DOMID_QEMU
,
1427 if (!xen_evtchn_alloc_unbound_op(&alloc
)) {
1428 s
->guest_port
= alloc
.port
;
1432 int xen_xenstore_reset(void)
1434 XenXenstoreState
*s
= xen_xenstore_singleton
;
1441 s
->req_offset
= s
->rsp_offset
= 0;
1442 s
->rsp_pending
= false;
1444 if (!memory_region_is_mapped(&s
->xenstore_page
)) {
1445 uint64_t gpa
= XEN_SPECIAL_PFN(XENSTORE
) << TARGET_PAGE_BITS
;
1446 xen_overlay_do_map_page(&s
->xenstore_page
, gpa
);
1449 alloc_guest_port(s
);
1452 * As qemu/dom0, bind to the guest's port. For incoming migration, this
1453 * will be unbound as the guest's evtchn table is overwritten. We then
1454 * rebind to the correct guest port in xen_xenstore_post_load().
1456 err
= xen_be_evtchn_bind_interdomain(s
->eh
, xen_domid
, s
->guest_port
);
1463 * We don't actually access the guest's page through the grant, because
1464 * this isn't real Xen, and we can just use the page we gave it in the
1465 * first place. Map the grant anyway, mostly for cosmetic purposes so
1466 * it *looks* like it's in use in the guest-visible grant table.
1468 s
->gt
= qemu_xen_gnttab_open();
1469 uint32_t xs_gntref
= GNTTAB_RESERVED_XENSTORE
;
1470 s
->granted_xs
= qemu_xen_gnttab_map_refs(s
->gt
, 1, xen_domid
, &xs_gntref
,
1471 PROT_READ
| PROT_WRITE
);
1476 struct qemu_xs_handle
{
1477 XenstoreImplState
*impl
;
1482 struct qemu_xs_watch
{
1483 struct qemu_xs_handle
*h
;
1490 static char *xs_be_get_domain_path(struct qemu_xs_handle
*h
, unsigned int domid
)
1492 return g_strdup_printf("/local/domain/%u", domid
);
1495 static char **xs_be_directory(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1496 const char *path
, unsigned int *num
)
1498 GList
*items
= NULL
, *l
;
1503 err
= xs_impl_directory(h
->impl
, DOMID_QEMU
, t
, path
, NULL
, &items
);
1509 items_ret
= g_new0(char *, g_list_length(items
) + 1);
1511 for (l
= items
; l
; l
= l
->next
) {
1512 items_ret
[i
++] = l
->data
;
1519 static void *xs_be_read(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1520 const char *path
, unsigned int *len
)
1522 GByteArray
*data
= g_byte_array_new();
1523 bool free_segment
= false;
1526 err
= xs_impl_read(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1528 free_segment
= true;
1534 /* The xen-bus-helper code expects to get NUL terminated string! */
1535 g_byte_array_append(data
, (void *)"", 1);
1538 return g_byte_array_free(data
, free_segment
);
1541 static bool xs_be_write(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1542 const char *path
, const void *data
, unsigned int len
)
1544 GByteArray
*gdata
= g_byte_array_new();
1547 g_byte_array_append(gdata
, data
, len
);
1548 err
= xs_impl_write(h
->impl
, DOMID_QEMU
, t
, path
, gdata
);
1549 g_byte_array_unref(gdata
);
1557 static bool xs_be_create(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1558 unsigned int owner
, unsigned int domid
,
1559 unsigned int perms
, const char *path
)
1561 g_autoptr(GByteArray
) data
= g_byte_array_new();
1562 GList
*perms_list
= NULL
;
1565 /* mkdir does this */
1566 err
= xs_impl_read(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1567 if (err
== ENOENT
) {
1568 err
= xs_impl_write(h
->impl
, DOMID_QEMU
, t
, path
, data
);
1575 perms_list
= g_list_append(perms_list
,
1576 xs_perm_as_string(XS_PERM_NONE
, owner
));
1577 perms_list
= g_list_append(perms_list
,
1578 xs_perm_as_string(perms
, domid
));
1580 err
= xs_impl_set_perms(h
->impl
, DOMID_QEMU
, t
, path
, perms_list
);
1581 g_list_free_full(perms_list
, g_free
);
1589 static bool xs_be_destroy(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1592 int err
= xs_impl_rm(h
->impl
, DOMID_QEMU
, t
, path
);
1600 static void be_watch_bh(void *_h
)
1602 struct qemu_xs_handle
*h
= _h
;
1605 for (l
= h
->watches
; l
; l
= l
->next
) {
1606 struct qemu_xs_watch
*w
= l
->data
;
1609 struct watch_event
*ev
= w
->events
->data
;
1611 w
->fn(w
->opaque
, ev
->path
);
1613 w
->events
= g_list_remove(w
->events
, ev
);
1614 free_watch_event(ev
);
1619 static void xs_be_watch_cb(void *opaque
, const char *path
, const char *token
)
1621 struct watch_event
*ev
= g_new0(struct watch_event
, 1);
1622 struct qemu_xs_watch
*w
= opaque
;
1624 /* We don't care about the token */
1625 ev
->path
= g_strdup(path
);
1626 w
->events
= g_list_append(w
->events
, ev
);
1628 qemu_bh_schedule(w
->h
->watch_bh
);
1631 static struct qemu_xs_watch
*xs_be_watch(struct qemu_xs_handle
*h
,
1632 const char *path
, xs_watch_fn fn
,
1635 struct qemu_xs_watch
*w
= g_new0(struct qemu_xs_watch
, 1);
1642 err
= xs_impl_watch(h
->impl
, DOMID_QEMU
, path
, NULL
, xs_be_watch_cb
, w
);
1649 w
->path
= g_strdup(path
);
1650 h
->watches
= g_list_append(h
->watches
, w
);
1654 static void xs_be_unwatch(struct qemu_xs_handle
*h
, struct qemu_xs_watch
*w
)
1656 xs_impl_unwatch(h
->impl
, DOMID_QEMU
, w
->path
, NULL
, xs_be_watch_cb
, w
);
1658 h
->watches
= g_list_remove(h
->watches
, w
);
1659 g_list_free_full(w
->events
, (GDestroyNotify
)free_watch_event
);
1664 static xs_transaction_t
xs_be_transaction_start(struct qemu_xs_handle
*h
)
1666 unsigned int new_tx
= XBT_NULL
;
1667 int err
= xs_impl_transaction_start(h
->impl
, DOMID_QEMU
, &new_tx
);
1675 static bool xs_be_transaction_end(struct qemu_xs_handle
*h
, xs_transaction_t t
,
1678 int err
= xs_impl_transaction_end(h
->impl
, DOMID_QEMU
, t
, !abort
);
1686 static struct qemu_xs_handle
*xs_be_open(void)
1688 XenXenstoreState
*s
= xen_xenstore_singleton
;
1689 struct qemu_xs_handle
*h
;
1691 if (!s
|| !s
->impl
) {
1696 h
= g_new0(struct qemu_xs_handle
, 1);
1699 h
->watch_bh
= aio_bh_new(qemu_get_aio_context(), be_watch_bh
, h
);
1704 static void xs_be_close(struct qemu_xs_handle
*h
)
1706 while (h
->watches
) {
1707 struct qemu_xs_watch
*w
= h
->watches
->data
;
1708 xs_be_unwatch(h
, w
);
1711 qemu_bh_delete(h
->watch_bh
);
1715 static struct xenstore_backend_ops emu_xenstore_backend_ops
= {
1717 .close
= xs_be_close
,
1718 .get_domain_path
= xs_be_get_domain_path
,
1719 .directory
= xs_be_directory
,
1721 .write
= xs_be_write
,
1722 .create
= xs_be_create
,
1723 .destroy
= xs_be_destroy
,
1724 .watch
= xs_be_watch
,
1725 .unwatch
= xs_be_unwatch
,
1726 .transaction_start
= xs_be_transaction_start
,
1727 .transaction_end
= xs_be_transaction_end
,