//\r
// Xen interface version used by Tianocore\r
//\r
-#define __XEN_INTERFACE_VERSION__ 0x00040400\r
+#define __XEN_INTERFACE_VERSION__ 0x00040400\r
\r
#include "xen-compat.h"\r
\r
-#if defined(MDE_CPU_IA32) || defined(MDE_CPU_X64)\r
-#include "arch-x86/xen.h"\r
-#elif defined(__arm__) || defined (__aarch64__)\r
-#include "arch-arm/xen.h"\r
+#if defined (MDE_CPU_IA32) || defined (MDE_CPU_X64)\r
+ #include "arch-x86/xen.h"\r
+#elif defined (__arm__) || defined (__aarch64__)\r
+ #include "arch-arm/xen.h"\r
#else\r
-#error "Unsupported architecture"\r
+ #error "Unsupported architecture"\r
#endif\r
\r
#ifndef __ASSEMBLY__\r
/* Guest handles for primitive C types. */\r
-DEFINE_XEN_GUEST_HANDLE(CHAR8);\r
-__DEFINE_XEN_GUEST_HANDLE(uchar, UINT8);\r
-DEFINE_XEN_GUEST_HANDLE(INT32);\r
-__DEFINE_XEN_GUEST_HANDLE(uint, UINT32);\r
-#if __XEN_INTERFACE_VERSION__ < 0x00040300\r
-DEFINE_XEN_GUEST_HANDLE(INTN);\r
-__DEFINE_XEN_GUEST_HANDLE(ulong, UINTN);\r
-#endif\r
-DEFINE_XEN_GUEST_HANDLE(VOID);\r
-\r
-DEFINE_XEN_GUEST_HANDLE(UINT64);\r
-DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);\r
-DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);\r
+DEFINE_XEN_GUEST_HANDLE (CHAR8);\r
+__DEFINE_XEN_GUEST_HANDLE (uchar, UINT8);\r
+DEFINE_XEN_GUEST_HANDLE (INT32);\r
+__DEFINE_XEN_GUEST_HANDLE (uint, UINT32);\r
+ #if __XEN_INTERFACE_VERSION__ < 0x00040300\r
+DEFINE_XEN_GUEST_HANDLE (INTN);\r
+__DEFINE_XEN_GUEST_HANDLE (ulong, UINTN);\r
+ #endif\r
+DEFINE_XEN_GUEST_HANDLE (VOID);\r
+\r
+DEFINE_XEN_GUEST_HANDLE (UINT64);\r
+DEFINE_XEN_GUEST_HANDLE (xen_pfn_t);\r
+DEFINE_XEN_GUEST_HANDLE (xen_ulong_t);\r
#endif\r
\r
/*\r
* ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()\r
*/\r
\r
-#define __HYPERVISOR_set_trap_table 0\r
-#define __HYPERVISOR_mmu_update 1\r
-#define __HYPERVISOR_set_gdt 2\r
-#define __HYPERVISOR_stack_switch 3\r
-#define __HYPERVISOR_set_callbacks 4\r
-#define __HYPERVISOR_fpu_taskswitch 5\r
-#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */\r
-#define __HYPERVISOR_platform_op 7\r
-#define __HYPERVISOR_set_debugreg 8\r
-#define __HYPERVISOR_get_debugreg 9\r
-#define __HYPERVISOR_update_descriptor 10\r
-#define __HYPERVISOR_memory_op 12\r
-#define __HYPERVISOR_multicall 13\r
-#define __HYPERVISOR_update_va_mapping 14\r
-#define __HYPERVISOR_set_timer_op 15\r
-#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */\r
-#define __HYPERVISOR_xen_version 17\r
-#define __HYPERVISOR_console_io 18\r
-#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */\r
-#define __HYPERVISOR_grant_table_op 20\r
-#define __HYPERVISOR_vm_assist 21\r
-#define __HYPERVISOR_update_va_mapping_otherdomain 22\r
-#define __HYPERVISOR_iret 23 /* x86 only */\r
-#define __HYPERVISOR_vcpu_op 24\r
-#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */\r
-#define __HYPERVISOR_mmuext_op 26\r
-#define __HYPERVISOR_xsm_op 27\r
-#define __HYPERVISOR_nmi_op 28\r
-#define __HYPERVISOR_sched_op 29\r
-#define __HYPERVISOR_callback_op 30\r
-#define __HYPERVISOR_xenoprof_op 31\r
-#define __HYPERVISOR_event_channel_op 32\r
-#define __HYPERVISOR_physdev_op 33\r
-#define __HYPERVISOR_hvm_op 34\r
-#define __HYPERVISOR_sysctl 35\r
-#define __HYPERVISOR_domctl 36\r
-#define __HYPERVISOR_kexec_op 37\r
-#define __HYPERVISOR_tmem_op 38\r
-#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */\r
+#define __HYPERVISOR_set_trap_table 0\r
+#define __HYPERVISOR_mmu_update 1\r
+#define __HYPERVISOR_set_gdt 2\r
+#define __HYPERVISOR_stack_switch 3\r
+#define __HYPERVISOR_set_callbacks 4\r
+#define __HYPERVISOR_fpu_taskswitch 5\r
+#define __HYPERVISOR_sched_op_compat 6/* compat since 0x00030101 */\r
+#define __HYPERVISOR_platform_op 7\r
+#define __HYPERVISOR_set_debugreg 8\r
+#define __HYPERVISOR_get_debugreg 9\r
+#define __HYPERVISOR_update_descriptor 10\r
+#define __HYPERVISOR_memory_op 12\r
+#define __HYPERVISOR_multicall 13\r
+#define __HYPERVISOR_update_va_mapping 14\r
+#define __HYPERVISOR_set_timer_op 15\r
+#define __HYPERVISOR_event_channel_op_compat 16/* compat since 0x00030202 */\r
+#define __HYPERVISOR_xen_version 17\r
+#define __HYPERVISOR_console_io 18\r
+#define __HYPERVISOR_physdev_op_compat 19/* compat since 0x00030202 */\r
+#define __HYPERVISOR_grant_table_op 20\r
+#define __HYPERVISOR_vm_assist 21\r
+#define __HYPERVISOR_update_va_mapping_otherdomain 22\r
+#define __HYPERVISOR_iret 23/* x86 only */\r
+#define __HYPERVISOR_vcpu_op 24\r
+#define __HYPERVISOR_set_segment_base 25/* x86/64 only */\r
+#define __HYPERVISOR_mmuext_op 26\r
+#define __HYPERVISOR_xsm_op 27\r
+#define __HYPERVISOR_nmi_op 28\r
+#define __HYPERVISOR_sched_op 29\r
+#define __HYPERVISOR_callback_op 30\r
+#define __HYPERVISOR_xenoprof_op 31\r
+#define __HYPERVISOR_event_channel_op 32\r
+#define __HYPERVISOR_physdev_op 33\r
+#define __HYPERVISOR_hvm_op 34\r
+#define __HYPERVISOR_sysctl 35\r
+#define __HYPERVISOR_domctl 36\r
+#define __HYPERVISOR_kexec_op 37\r
+#define __HYPERVISOR_tmem_op 38\r
+#define __HYPERVISOR_xc_reserved_op 39/* reserved for XenClient */\r
\r
/* Architecture-specific hypercall definitions. */\r
-#define __HYPERVISOR_arch_0 48\r
-#define __HYPERVISOR_arch_1 49\r
-#define __HYPERVISOR_arch_2 50\r
-#define __HYPERVISOR_arch_3 51\r
-#define __HYPERVISOR_arch_4 52\r
-#define __HYPERVISOR_arch_5 53\r
-#define __HYPERVISOR_arch_6 54\r
-#define __HYPERVISOR_arch_7 55\r
+#define __HYPERVISOR_arch_0 48\r
+#define __HYPERVISOR_arch_1 49\r
+#define __HYPERVISOR_arch_2 50\r
+#define __HYPERVISOR_arch_3 51\r
+#define __HYPERVISOR_arch_4 52\r
+#define __HYPERVISOR_arch_5 53\r
+#define __HYPERVISOR_arch_6 54\r
+#define __HYPERVISOR_arch_7 55\r
\r
/* ` } */\r
\r
\r
/* New sched_op hypercall introduced in 0x00030101. */\r
#if __XEN_INTERFACE_VERSION__ < 0x00030101\r
-#undef __HYPERVISOR_sched_op\r
-#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat\r
+ #undef __HYPERVISOR_sched_op\r
+#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat\r
#endif\r
\r
/* New event-channel and physdev hypercalls introduced in 0x00030202. */\r
#if __XEN_INTERFACE_VERSION__ < 0x00030202\r
-#undef __HYPERVISOR_event_channel_op\r
-#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat\r
-#undef __HYPERVISOR_physdev_op\r
-#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat\r
+ #undef __HYPERVISOR_event_channel_op\r
+#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat\r
+ #undef __HYPERVISOR_physdev_op\r
+#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat\r
#endif\r
\r
/* New platform_op hypercall introduced in 0x00030204. */\r
#if __XEN_INTERFACE_VERSION__ < 0x00030204\r
-#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op\r
+#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op\r
#endif\r
\r
#ifndef __ASSEMBLY__\r
typedef UINT16 domid_t;\r
\r
/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */\r
-#define DOMID_FIRST_RESERVED (0x7FF0U)\r
+#define DOMID_FIRST_RESERVED (0x7FF0U)\r
\r
/* DOMID_SELF is used in certain contexts to refer to oneself. */\r
-#define DOMID_SELF (0x7FF0U)\r
+#define DOMID_SELF (0x7FF0U)\r
\r
/*\r
* DOMID_IO is used to restrict page-table updates to mapping I/O memory.\r
* This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can\r
* be specified by any calling domain.\r
*/\r
-#define DOMID_IO (0x7FF1U)\r
+#define DOMID_IO (0x7FF1U)\r
\r
/*\r
* DOMID_XEN is used to allow privileged domains to map restricted parts of\r
#define DOMID_COW (0x7FF3U)\r
\r
/* DOMID_INVALID is used to identify pages with unknown owner. */\r
-#define DOMID_INVALID (0x7FF4U)\r
+#define DOMID_INVALID (0x7FF4U)\r
\r
/* Idle domain. */\r
-#define DOMID_IDLE (0x7FFFU)\r
+#define DOMID_IDLE (0x7FFFU)\r
+\r
+ #if __XEN_INTERFACE_VERSION__ < 0x00040400\r
\r
-#if __XEN_INTERFACE_VERSION__ < 0x00040400\r
/*\r
* Event channel endpoints per domain (when using the 2-level ABI):\r
* 1024 if a INTN is 32 bits; 4096 if a INTN is 64 bits.\r
*/\r
-#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS\r
-#endif\r
+#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS\r
+ #endif\r
\r
struct vcpu_time_info {\r
- /*\r
- * Updates to the following values are preceded and followed by an\r
- * increment of 'version'. The guest can therefore detect updates by\r
- * looking for changes to 'version'. If the least-significant bit of\r
- * the version number is set then an update is in progress and the guest\r
- * must wait to read a consistent set of values.\r
- * The correct way to interact with the version number is similar to\r
- * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.\r
- */\r
- UINT32 Version;\r
- UINT32 pad0;\r
- UINT64 TscTimestamp; /* TSC at last update of time vals. */\r
- UINT64 SystemTime; /* Time, in nanosecs, since boot. */\r
- /*\r
- * Current system time:\r
- * system_time +\r
- * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)\r
- * CPU frequency (Hz):\r
- * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift\r
- */\r
- UINT32 TscToSystemMultiplier;\r
- INT8 TscShift;\r
- INT8 pad1[3];\r
+ /*\r
+ * Updates to the following values are preceded and followed by an\r
+ * increment of 'version'. The guest can therefore detect updates by\r
+ * looking for changes to 'version'. If the least-significant bit of\r
+ * the version number is set then an update is in progress and the guest\r
+ * must wait to read a consistent set of values.\r
+ * The correct way to interact with the version number is similar to\r
+ * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.\r
+ */\r
+ UINT32 Version;\r
+ UINT32 pad0;\r
+ UINT64 TscTimestamp; /* TSC at last update of time vals. */\r
+ UINT64 SystemTime; /* Time, in nanosecs, since boot. */\r
+\r
+ /*\r
+ * Current system time:\r
+ * system_time +\r
+ * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)\r
+ * CPU frequency (Hz):\r
+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift\r
+ */\r
+ UINT32 TscToSystemMultiplier;\r
+ INT8 TscShift;\r
+ INT8 pad1[3];\r
}; /* 32 bytes */\r
+\r
typedef struct vcpu_time_info XEN_VCPU_TIME_INFO;\r
\r
struct vcpu_info {\r
- /*\r
- * 'evtchn_upcall_pending' is written non-zero by Xen to indicate\r
- * a pending notification for a particular VCPU. It is then cleared\r
- * by the guest OS /before/ checking for pending work, thus avoiding\r
- * a set-and-check race. Note that the mask is only accessed by Xen\r
- * on the CPU that is currently hosting the VCPU. This means that the\r
- * pending and mask flags can be updated by the guest without special\r
- * synchronisation (i.e., no need for the x86 LOCK prefix).\r
- * This may seem suboptimal because if the pending flag is set by\r
- * a different CPU then an IPI may be scheduled even when the mask\r
- * is set. However, note:\r
- * 1. The task of 'interrupt holdoff' is covered by the per-event-\r
- * channel mask bits. A 'noisy' event that is continually being\r
- * triggered can be masked at source at this very precise\r
- * granularity.\r
- * 2. The main purpose of the per-VCPU mask is therefore to restrict\r
- * reentrant execution: whether for concurrency control, or to\r
- * prevent unbounded stack usage. Whatever the purpose, we expect\r
- * that the mask will be asserted only for short periods at a time,\r
- * and so the likelihood of a 'spurious' IPI is suitably small.\r
- * The mask is read before making an event upcall to the guest: a\r
- * non-zero mask therefore guarantees that the VCPU will not receive\r
- * an upcall activation. The mask is cleared when the VCPU requests\r
- * to block: this avoids wakeup-waiting races.\r
- */\r
- UINT8 evtchn_upcall_pending;\r
-#ifdef XEN_HAVE_PV_UPCALL_MASK\r
- UINT8 evtchn_upcall_mask;\r
-#else /* XEN_HAVE_PV_UPCALL_MASK */\r
- UINT8 pad0;\r
-#endif /* XEN_HAVE_PV_UPCALL_MASK */\r
- xen_ulong_t evtchn_pending_sel;\r
- struct arch_vcpu_info arch;\r
- struct vcpu_time_info Time;\r
+ /*\r
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate\r
+ * a pending notification for a particular VCPU. It is then cleared\r
+ * by the guest OS /before/ checking for pending work, thus avoiding\r
+ * a set-and-check race. Note that the mask is only accessed by Xen\r
+ * on the CPU that is currently hosting the VCPU. This means that the\r
+ * pending and mask flags can be updated by the guest without special\r
+ * synchronisation (i.e., no need for the x86 LOCK prefix).\r
+ * This may seem suboptimal because if the pending flag is set by\r
+ * a different CPU then an IPI may be scheduled even when the mask\r
+ * is set. However, note:\r
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-\r
+ * channel mask bits. A 'noisy' event that is continually being\r
+ * triggered can be masked at source at this very precise\r
+ * granularity.\r
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict\r
+ * reentrant execution: whether for concurrency control, or to\r
+ * prevent unbounded stack usage. Whatever the purpose, we expect\r
+ * that the mask will be asserted only for short periods at a time,\r
+ * and so the likelihood of a 'spurious' IPI is suitably small.\r
+ * The mask is read before making an event upcall to the guest: a\r
+ * non-zero mask therefore guarantees that the VCPU will not receive\r
+ * an upcall activation. The mask is cleared when the VCPU requests\r
+ * to block: this avoids wakeup-waiting races.\r
+ */\r
+ UINT8 evtchn_upcall_pending;\r
+ #ifdef XEN_HAVE_PV_UPCALL_MASK\r
+ UINT8 evtchn_upcall_mask;\r
+ #else /* XEN_HAVE_PV_UPCALL_MASK */\r
+ UINT8 pad0;\r
+ #endif /* XEN_HAVE_PV_UPCALL_MASK */\r
+ xen_ulong_t evtchn_pending_sel;\r
+ struct arch_vcpu_info arch;\r
+ struct vcpu_time_info Time;\r
}; /* 64 bytes (x86) */\r
-#ifndef __XEN__\r
+\r
+ #ifndef __XEN__\r
typedef struct vcpu_info vcpu_info_t;\r
-#endif\r
+ #endif\r
\r
/*\r
* `incontents 200 startofday_shared Start-of-day shared data structure\r
* of this structure remaining constant.\r
*/\r
struct shared_info {\r
- struct vcpu_info VcpuInfo[XEN_LEGACY_MAX_VCPUS];\r
-\r
- /*\r
- * A domain can create "event channels" on which it can send and receive\r
- * asynchronous event notifications. There are three classes of event that\r
- * are delivered by this mechanism:\r
- * 1. Bi-directional inter- and intra-domain connections. Domains must\r
- * arrange out-of-band to set up a connection (usually by allocating\r
- * an unbound 'listener' port and avertising that via a storage service\r
- * such as xenstore).\r
- * 2. Physical interrupts. A domain with suitable hardware-access\r
- * privileges can bind an event-channel port to a physical interrupt\r
- * source.\r
- * 3. Virtual interrupts ('events'). A domain can bind an event-channel\r
- * port to a virtual interrupt source, such as the virtual-timer\r
- * device or the emergency console.\r
- *\r
- * Event channels are addressed by a "port index". Each channel is\r
- * associated with two bits of information:\r
- * 1. PENDING -- notifies the domain that there is a pending notification\r
- * to be processed. This bit is cleared by the guest.\r
- * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING\r
- * will cause an asynchronous upcall to be scheduled. This bit is only\r
- * updated by the guest. It is read-only within Xen. If a channel\r
- * becomes pending while the channel is masked then the 'edge' is lost\r
- * (i.e., when the channel is unmasked, the guest must manually handle\r
- * pending notifications as no upcall will be scheduled by Xen).\r
- *\r
- * To expedite scanning of pending notifications, any 0->1 pending\r
- * transition on an unmasked channel causes a corresponding bit in a\r
- * per-vcpu selector word to be set. Each bit in the selector covers a\r
- * 'C INTN' in the PENDING bitfield array.\r
- */\r
- xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];\r
- xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];\r
-\r
- /*\r
- * Wallclock time: updated only by control software. Guests should base\r
- * their gettimeofday() syscall on this wallclock-base value.\r
- */\r
- UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */\r
- UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */\r
- UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */\r
-\r
- struct arch_shared_info arch;\r
-\r
+ struct vcpu_info VcpuInfo[XEN_LEGACY_MAX_VCPUS];\r
+\r
+ /*\r
+ * A domain can create "event channels" on which it can send and receive\r
+ * asynchronous event notifications. There are three classes of event that\r
+ * are delivered by this mechanism:\r
+ * 1. Bi-directional inter- and intra-domain connections. Domains must\r
+ * arrange out-of-band to set up a connection (usually by allocating\r
+ * an unbound 'listener' port and avertising that via a storage service\r
+ * such as xenstore).\r
+ * 2. Physical interrupts. A domain with suitable hardware-access\r
+ * privileges can bind an event-channel port to a physical interrupt\r
+ * source.\r
+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel\r
+ * port to a virtual interrupt source, such as the virtual-timer\r
+ * device or the emergency console.\r
+ *\r
+ * Event channels are addressed by a "port index". Each channel is\r
+ * associated with two bits of information:\r
+ * 1. PENDING -- notifies the domain that there is a pending notification\r
+ * to be processed. This bit is cleared by the guest.\r
+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING\r
+ * will cause an asynchronous upcall to be scheduled. This bit is only\r
+ * updated by the guest. It is read-only within Xen. If a channel\r
+ * becomes pending while the channel is masked then the 'edge' is lost\r
+ * (i.e., when the channel is unmasked, the guest must manually handle\r
+ * pending notifications as no upcall will be scheduled by Xen).\r
+ *\r
+ * To expedite scanning of pending notifications, any 0->1 pending\r
+ * transition on an unmasked channel causes a corresponding bit in a\r
+ * per-vcpu selector word to be set. Each bit in the selector covers a\r
+ * 'C INTN' in the PENDING bitfield array.\r
+ */\r
+ xen_ulong_t evtchn_pending[sizeof (xen_ulong_t) * 8];\r
+ xen_ulong_t evtchn_mask[sizeof (xen_ulong_t) * 8];\r
+\r
+ /*\r
+ * Wallclock time: updated only by control software. Guests should base\r
+ * their gettimeofday() syscall on this wallclock-base value.\r
+ */\r
+ UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */\r
+ UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */\r
+ UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */\r
+\r
+ struct arch_shared_info arch;\r
};\r
-#ifndef __XEN__\r
+\r
+ #ifndef __XEN__\r
typedef struct shared_info shared_info_t;\r
typedef struct shared_info XEN_SHARED_INFO;\r
-#endif\r
+ #endif\r
\r
/* Turn a plain number into a C UINTN constant. */\r
-#define __mk_unsigned_long(x) x ## UL\r
-#define mk_unsigned_long(x) __mk_unsigned_long(x)\r
+#define __mk_unsigned_long(x) x ## UL\r
+#define mk_unsigned_long(x) __mk_unsigned_long(x)\r
\r
-__DEFINE_XEN_GUEST_HANDLE(uint8, UINT8);\r
-__DEFINE_XEN_GUEST_HANDLE(uint16, UINT16);\r
-__DEFINE_XEN_GUEST_HANDLE(uint32, UINT32);\r
-__DEFINE_XEN_GUEST_HANDLE(uint64, UINT64);\r
+__DEFINE_XEN_GUEST_HANDLE (uint8, UINT8);\r
+__DEFINE_XEN_GUEST_HANDLE (uint16, UINT16);\r
+__DEFINE_XEN_GUEST_HANDLE (uint32, UINT32);\r
+__DEFINE_XEN_GUEST_HANDLE (uint64, UINT64);\r
\r
#else /* __ASSEMBLY__ */\r
\r
/* In assembly code we cannot use C numeric constant suffixes. */\r
-#define mk_unsigned_long(x) x\r
+#define mk_unsigned_long(x) x\r
\r
#endif /* !__ASSEMBLY__ */\r
\r