]>
Commit | Line | Data |
---|---|---|
6b621f95 AP |
1 | /******************************************************************************\r |
2 | * xen.h\r | |
3 | * \r | |
4 | * Guest OS interface to Xen.\r | |
5 | * \r | |
6f21d772 | 6 | * SPDX-License-Identifier: MIT\r |
6b621f95 AP |
7 | *\r |
8 | * Copyright (c) 2004, K A Fraser\r | |
9 | */\r | |
10 | \r | |
11 | #ifndef __XEN_PUBLIC_XEN_H__\r | |
12 | #define __XEN_PUBLIC_XEN_H__\r | |
13 | \r | |
0ac10d1d AB |
14 | //\r |
15 | // Xen interface version used by Tianocore\r | |
16 | //\r | |
17 | #define __XEN_INTERFACE_VERSION__ 0x00040400\r | |
18 | \r | |
6b621f95 AP |
19 | #include "xen-compat.h"\r |
20 | \r | |
8f148aee | 21 | #if defined(MDE_CPU_IA32) || defined(MDE_CPU_X64)\r |
6b621f95 AP |
22 | #include "arch-x86/xen.h"\r |
23 | #elif defined(__arm__) || defined (__aarch64__)\r | |
b94c3ac9 | 24 | #include "arch-arm/xen.h"\r |
6b621f95 AP |
25 | #else\r |
26 | #error "Unsupported architecture"\r | |
27 | #endif\r | |
28 | \r | |
29 | #ifndef __ASSEMBLY__\r | |
30 | /* Guest handles for primitive C types. */\r | |
31 | DEFINE_XEN_GUEST_HANDLE(CHAR8);\r | |
32 | __DEFINE_XEN_GUEST_HANDLE(uchar, UINT8);\r | |
33 | DEFINE_XEN_GUEST_HANDLE(INT32);\r | |
34 | __DEFINE_XEN_GUEST_HANDLE(uint, UINT32);\r | |
35 | #if __XEN_INTERFACE_VERSION__ < 0x00040300\r | |
36 | DEFINE_XEN_GUEST_HANDLE(INTN);\r | |
37 | __DEFINE_XEN_GUEST_HANDLE(ulong, UINTN);\r | |
38 | #endif\r | |
39 | DEFINE_XEN_GUEST_HANDLE(VOID);\r | |
40 | \r | |
41 | DEFINE_XEN_GUEST_HANDLE(UINT64);\r | |
42 | DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);\r | |
43 | DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);\r | |
44 | #endif\r | |
45 | \r | |
46 | /*\r | |
47 | * HYPERCALLS\r | |
48 | */\r | |
49 | \r | |
50 | /* `incontents 100 hcalls List of hypercalls\r | |
51 | * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()\r | |
52 | */\r | |
53 | \r | |
54 | #define __HYPERVISOR_set_trap_table 0\r | |
55 | #define __HYPERVISOR_mmu_update 1\r | |
56 | #define __HYPERVISOR_set_gdt 2\r | |
57 | #define __HYPERVISOR_stack_switch 3\r | |
58 | #define __HYPERVISOR_set_callbacks 4\r | |
59 | #define __HYPERVISOR_fpu_taskswitch 5\r | |
60 | #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */\r | |
61 | #define __HYPERVISOR_platform_op 7\r | |
62 | #define __HYPERVISOR_set_debugreg 8\r | |
63 | #define __HYPERVISOR_get_debugreg 9\r | |
64 | #define __HYPERVISOR_update_descriptor 10\r | |
65 | #define __HYPERVISOR_memory_op 12\r | |
66 | #define __HYPERVISOR_multicall 13\r | |
67 | #define __HYPERVISOR_update_va_mapping 14\r | |
68 | #define __HYPERVISOR_set_timer_op 15\r | |
69 | #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */\r | |
70 | #define __HYPERVISOR_xen_version 17\r | |
71 | #define __HYPERVISOR_console_io 18\r | |
72 | #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */\r | |
73 | #define __HYPERVISOR_grant_table_op 20\r | |
74 | #define __HYPERVISOR_vm_assist 21\r | |
75 | #define __HYPERVISOR_update_va_mapping_otherdomain 22\r | |
76 | #define __HYPERVISOR_iret 23 /* x86 only */\r | |
77 | #define __HYPERVISOR_vcpu_op 24\r | |
78 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */\r | |
79 | #define __HYPERVISOR_mmuext_op 26\r | |
80 | #define __HYPERVISOR_xsm_op 27\r | |
81 | #define __HYPERVISOR_nmi_op 28\r | |
82 | #define __HYPERVISOR_sched_op 29\r | |
83 | #define __HYPERVISOR_callback_op 30\r | |
84 | #define __HYPERVISOR_xenoprof_op 31\r | |
85 | #define __HYPERVISOR_event_channel_op 32\r | |
86 | #define __HYPERVISOR_physdev_op 33\r | |
87 | #define __HYPERVISOR_hvm_op 34\r | |
88 | #define __HYPERVISOR_sysctl 35\r | |
89 | #define __HYPERVISOR_domctl 36\r | |
90 | #define __HYPERVISOR_kexec_op 37\r | |
91 | #define __HYPERVISOR_tmem_op 38\r | |
92 | #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */\r | |
93 | \r | |
94 | /* Architecture-specific hypercall definitions. */\r | |
95 | #define __HYPERVISOR_arch_0 48\r | |
96 | #define __HYPERVISOR_arch_1 49\r | |
97 | #define __HYPERVISOR_arch_2 50\r | |
98 | #define __HYPERVISOR_arch_3 51\r | |
99 | #define __HYPERVISOR_arch_4 52\r | |
100 | #define __HYPERVISOR_arch_5 53\r | |
101 | #define __HYPERVISOR_arch_6 54\r | |
102 | #define __HYPERVISOR_arch_7 55\r | |
103 | \r | |
104 | /* ` } */\r | |
105 | \r | |
106 | /*\r | |
107 | * HYPERCALL COMPATIBILITY.\r | |
108 | */\r | |
109 | \r | |
110 | /* New sched_op hypercall introduced in 0x00030101. */\r | |
111 | #if __XEN_INTERFACE_VERSION__ < 0x00030101\r | |
112 | #undef __HYPERVISOR_sched_op\r | |
113 | #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat\r | |
114 | #endif\r | |
115 | \r | |
116 | /* New event-channel and physdev hypercalls introduced in 0x00030202. */\r | |
117 | #if __XEN_INTERFACE_VERSION__ < 0x00030202\r | |
118 | #undef __HYPERVISOR_event_channel_op\r | |
119 | #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat\r | |
120 | #undef __HYPERVISOR_physdev_op\r | |
121 | #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat\r | |
122 | #endif\r | |
123 | \r | |
124 | /* New platform_op hypercall introduced in 0x00030204. */\r | |
125 | #if __XEN_INTERFACE_VERSION__ < 0x00030204\r | |
126 | #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op\r | |
127 | #endif\r | |
128 | \r | |
129 | #ifndef __ASSEMBLY__\r | |
130 | \r | |
131 | typedef UINT16 domid_t;\r | |
132 | \r | |
133 | /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */\r | |
134 | #define DOMID_FIRST_RESERVED (0x7FF0U)\r | |
135 | \r | |
136 | /* DOMID_SELF is used in certain contexts to refer to oneself. */\r | |
137 | #define DOMID_SELF (0x7FF0U)\r | |
138 | \r | |
139 | /*\r | |
140 | * DOMID_IO is used to restrict page-table updates to mapping I/O memory.\r | |
141 | * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO\r | |
142 | * is useful to ensure that no mappings to the OS's own heap are accidentally\r | |
143 | * installed. (e.g., in Linux this could cause havoc as reference counts\r | |
144 | * aren't adjusted on the I/O-mapping code path).\r | |
145 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can\r | |
146 | * be specified by any calling domain.\r | |
147 | */\r | |
148 | #define DOMID_IO (0x7FF1U)\r | |
149 | \r | |
150 | /*\r | |
151 | * DOMID_XEN is used to allow privileged domains to map restricted parts of\r | |
152 | * Xen's heap space (e.g., the machine_to_phys table).\r | |
153 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if\r | |
154 | * the caller is privileged.\r | |
155 | */\r | |
156 | #define DOMID_XEN (0x7FF2U)\r | |
157 | \r | |
158 | /*\r | |
159 | * DOMID_COW is used as the owner of sharable pages */\r | |
160 | #define DOMID_COW (0x7FF3U)\r | |
161 | \r | |
162 | /* DOMID_INVALID is used to identify pages with unknown owner. */\r | |
163 | #define DOMID_INVALID (0x7FF4U)\r | |
164 | \r | |
165 | /* Idle domain. */\r | |
166 | #define DOMID_IDLE (0x7FFFU)\r | |
167 | \r | |
168 | #if __XEN_INTERFACE_VERSION__ < 0x00040400\r | |
169 | /*\r | |
170 | * Event channel endpoints per domain (when using the 2-level ABI):\r | |
171 | * 1024 if a INTN is 32 bits; 4096 if a INTN is 64 bits.\r | |
172 | */\r | |
173 | #define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS\r | |
174 | #endif\r | |
175 | \r | |
176 | struct vcpu_time_info {\r | |
177 | /*\r | |
178 | * Updates to the following values are preceded and followed by an\r | |
179 | * increment of 'version'. The guest can therefore detect updates by\r | |
180 | * looking for changes to 'version'. If the least-significant bit of\r | |
181 | * the version number is set then an update is in progress and the guest\r | |
182 | * must wait to read a consistent set of values.\r | |
183 | * The correct way to interact with the version number is similar to\r | |
184 | * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.\r | |
185 | */\r | |
186 | UINT32 version;\r | |
187 | UINT32 pad0;\r | |
188 | UINT64 tsc_timestamp; /* TSC at last update of time vals. */\r | |
189 | UINT64 system_time; /* Time, in nanosecs, since boot. */\r | |
190 | /*\r | |
191 | * Current system time:\r | |
192 | * system_time +\r | |
193 | * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)\r | |
194 | * CPU frequency (Hz):\r | |
195 | * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift\r | |
196 | */\r | |
197 | UINT32 tsc_to_system_mul;\r | |
198 | INT8 tsc_shift;\r | |
199 | INT8 pad1[3];\r | |
200 | }; /* 32 bytes */\r | |
201 | typedef struct vcpu_time_info vcpu_time_info_t;\r | |
202 | \r | |
203 | struct vcpu_info {\r | |
204 | /*\r | |
205 | * 'evtchn_upcall_pending' is written non-zero by Xen to indicate\r | |
206 | * a pending notification for a particular VCPU. It is then cleared \r | |
207 | * by the guest OS /before/ checking for pending work, thus avoiding\r | |
208 | * a set-and-check race. Note that the mask is only accessed by Xen\r | |
209 | * on the CPU that is currently hosting the VCPU. This means that the\r | |
210 | * pending and mask flags can be updated by the guest without special\r | |
211 | * synchronisation (i.e., no need for the x86 LOCK prefix).\r | |
212 | * This may seem suboptimal because if the pending flag is set by\r | |
213 | * a different CPU then an IPI may be scheduled even when the mask\r | |
214 | * is set. However, note:\r | |
215 | * 1. The task of 'interrupt holdoff' is covered by the per-event-\r | |
216 | * channel mask bits. A 'noisy' event that is continually being\r | |
217 | * triggered can be masked at source at this very precise\r | |
218 | * granularity.\r | |
219 | * 2. The main purpose of the per-VCPU mask is therefore to restrict\r | |
220 | * reentrant execution: whether for concurrency control, or to\r | |
221 | * prevent unbounded stack usage. Whatever the purpose, we expect\r | |
222 | * that the mask will be asserted only for short periods at a time,\r | |
223 | * and so the likelihood of a 'spurious' IPI is suitably small.\r | |
224 | * The mask is read before making an event upcall to the guest: a\r | |
225 | * non-zero mask therefore guarantees that the VCPU will not receive\r | |
226 | * an upcall activation. The mask is cleared when the VCPU requests\r | |
227 | * to block: this avoids wakeup-waiting races.\r | |
228 | */\r | |
229 | UINT8 evtchn_upcall_pending;\r | |
230 | #ifdef XEN_HAVE_PV_UPCALL_MASK\r | |
231 | UINT8 evtchn_upcall_mask;\r | |
232 | #else /* XEN_HAVE_PV_UPCALL_MASK */\r | |
233 | UINT8 pad0;\r | |
234 | #endif /* XEN_HAVE_PV_UPCALL_MASK */\r | |
235 | xen_ulong_t evtchn_pending_sel;\r | |
236 | struct arch_vcpu_info arch;\r | |
237 | struct vcpu_time_info time;\r | |
238 | }; /* 64 bytes (x86) */\r | |
239 | #ifndef __XEN__\r | |
240 | typedef struct vcpu_info vcpu_info_t;\r | |
241 | #endif\r | |
242 | \r | |
243 | /*\r | |
244 | * `incontents 200 startofday_shared Start-of-day shared data structure\r | |
245 | * Xen/kernel shared data -- pointer provided in start_info.\r | |
246 | *\r | |
247 | * This structure is defined to be both smaller than a page, and the\r | |
248 | * only data on the shared page, but may vary in actual size even within\r | |
249 | * compatible Xen versions; guests should not rely on the size\r | |
250 | * of this structure remaining constant.\r | |
251 | */\r | |
252 | struct shared_info {\r | |
253 | struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];\r | |
254 | \r | |
255 | /*\r | |
256 | * A domain can create "event channels" on which it can send and receive\r | |
257 | * asynchronous event notifications. There are three classes of event that\r | |
258 | * are delivered by this mechanism:\r | |
259 | * 1. Bi-directional inter- and intra-domain connections. Domains must\r | |
260 | * arrange out-of-band to set up a connection (usually by allocating\r | |
261 | * an unbound 'listener' port and avertising that via a storage service\r | |
262 | * such as xenstore).\r | |
263 | * 2. Physical interrupts. A domain with suitable hardware-access\r | |
264 | * privileges can bind an event-channel port to a physical interrupt\r | |
265 | * source.\r | |
266 | * 3. Virtual interrupts ('events'). A domain can bind an event-channel\r | |
267 | * port to a virtual interrupt source, such as the virtual-timer\r | |
268 | * device or the emergency console.\r | |
269 | * \r | |
270 | * Event channels are addressed by a "port index". Each channel is\r | |
271 | * associated with two bits of information:\r | |
272 | * 1. PENDING -- notifies the domain that there is a pending notification\r | |
273 | * to be processed. This bit is cleared by the guest.\r | |
274 | * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING\r | |
275 | * will cause an asynchronous upcall to be scheduled. This bit is only\r | |
276 | * updated by the guest. It is read-only within Xen. If a channel\r | |
277 | * becomes pending while the channel is masked then the 'edge' is lost\r | |
278 | * (i.e., when the channel is unmasked, the guest must manually handle\r | |
279 | * pending notifications as no upcall will be scheduled by Xen).\r | |
280 | * \r | |
281 | * To expedite scanning of pending notifications, any 0->1 pending\r | |
282 | * transition on an unmasked channel causes a corresponding bit in a\r | |
283 | * per-vcpu selector word to be set. Each bit in the selector covers a\r | |
284 | * 'C INTN' in the PENDING bitfield array.\r | |
285 | */\r | |
286 | xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];\r | |
287 | xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];\r | |
288 | \r | |
289 | /*\r | |
290 | * Wallclock time: updated only by control software. Guests should base\r | |
291 | * their gettimeofday() syscall on this wallclock-base value.\r | |
292 | */\r | |
293 | UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */\r | |
294 | UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */\r | |
295 | UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */\r | |
296 | \r | |
297 | struct arch_shared_info arch;\r | |
298 | \r | |
299 | };\r | |
300 | #ifndef __XEN__\r | |
301 | typedef struct shared_info shared_info_t;\r | |
302 | #endif\r | |
303 | \r | |
304 | /* Turn a plain number into a C UINTN constant. */\r | |
305 | #define __mk_unsigned_long(x) x ## UL\r | |
306 | #define mk_unsigned_long(x) __mk_unsigned_long(x)\r | |
307 | \r | |
308 | __DEFINE_XEN_GUEST_HANDLE(uint8, UINT8);\r | |
309 | __DEFINE_XEN_GUEST_HANDLE(uint16, UINT16);\r | |
310 | __DEFINE_XEN_GUEST_HANDLE(uint32, UINT32);\r | |
311 | __DEFINE_XEN_GUEST_HANDLE(uint64, UINT64);\r | |
312 | \r | |
313 | #else /* __ASSEMBLY__ */\r | |
314 | \r | |
315 | /* In assembly code we cannot use C numeric constant suffixes. */\r | |
316 | #define mk_unsigned_long(x) x\r | |
317 | \r | |
318 | #endif /* !__ASSEMBLY__ */\r | |
319 | \r | |
320 | #endif /* __XEN_PUBLIC_XEN_H__ */\r | |
321 | \r | |
322 | /*\r | |
323 | * Local variables:\r | |
324 | * mode: C\r | |
325 | * c-file-style: "BSD"\r | |
326 | * c-basic-offset: 4\r | |
327 | * tab-width: 4\r | |
328 | * indent-tabs-mode: nil\r | |
329 | * End:\r | |
330 | */\r |