]>
Commit | Line | Data |
---|---|---|
6b621f95 AP |
1 | /******************************************************************************\r |
2 | * xen.h\r | |
3 | * \r | |
4 | * Guest OS interface to Xen.\r | |
5 | * \r | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy\r | |
7 | * of this software and associated documentation files (the "Software"), to\r | |
8 | * deal in the Software without restriction, including without limitation the\r | |
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\r | |
10 | * sell copies of the Software, and to permit persons to whom the Software is\r | |
11 | * furnished to do so, subject to the following conditions:\r | |
12 | *\r | |
13 | * The above copyright notice and this permission notice shall be included in\r | |
14 | * all copies or substantial portions of the Software.\r | |
15 | *\r | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r | |
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\r | |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\r | |
22 | * DEALINGS IN THE SOFTWARE.\r | |
23 | *\r | |
24 | * Copyright (c) 2004, K A Fraser\r | |
25 | */\r | |
26 | \r | |
27 | #ifndef __XEN_PUBLIC_XEN_H__\r | |
28 | #define __XEN_PUBLIC_XEN_H__\r | |
29 | \r | |
0ac10d1d AB |
30 | //\r |
31 | // Xen interface version used by Tianocore\r | |
32 | //\r | |
33 | #define __XEN_INTERFACE_VERSION__ 0x00040400\r | |
34 | \r | |
6b621f95 AP |
35 | #include "xen-compat.h"\r |
36 | \r | |
8f148aee | 37 | #if defined(MDE_CPU_IA32) || defined(MDE_CPU_X64)\r |
6b621f95 AP |
38 | #include "arch-x86/xen.h"\r |
39 | #elif defined(__arm__) || defined (__aarch64__)\r | |
b94c3ac9 | 40 | #include "arch-arm/xen.h"\r |
6b621f95 AP |
41 | #else\r |
42 | #error "Unsupported architecture"\r | |
43 | #endif\r | |
44 | \r | |
45 | #ifndef __ASSEMBLY__\r | |
46 | /* Guest handles for primitive C types. */\r | |
47 | DEFINE_XEN_GUEST_HANDLE(CHAR8);\r | |
48 | __DEFINE_XEN_GUEST_HANDLE(uchar, UINT8);\r | |
49 | DEFINE_XEN_GUEST_HANDLE(INT32);\r | |
50 | __DEFINE_XEN_GUEST_HANDLE(uint, UINT32);\r | |
51 | #if __XEN_INTERFACE_VERSION__ < 0x00040300\r | |
52 | DEFINE_XEN_GUEST_HANDLE(INTN);\r | |
53 | __DEFINE_XEN_GUEST_HANDLE(ulong, UINTN);\r | |
54 | #endif\r | |
55 | DEFINE_XEN_GUEST_HANDLE(VOID);\r | |
56 | \r | |
57 | DEFINE_XEN_GUEST_HANDLE(UINT64);\r | |
58 | DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);\r | |
59 | DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);\r | |
60 | #endif\r | |
61 | \r | |
62 | /*\r | |
63 | * HYPERCALLS\r | |
64 | */\r | |
65 | \r | |
66 | /* `incontents 100 hcalls List of hypercalls\r | |
67 | * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*()\r | |
68 | */\r | |
69 | \r | |
70 | #define __HYPERVISOR_set_trap_table 0\r | |
71 | #define __HYPERVISOR_mmu_update 1\r | |
72 | #define __HYPERVISOR_set_gdt 2\r | |
73 | #define __HYPERVISOR_stack_switch 3\r | |
74 | #define __HYPERVISOR_set_callbacks 4\r | |
75 | #define __HYPERVISOR_fpu_taskswitch 5\r | |
76 | #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */\r | |
77 | #define __HYPERVISOR_platform_op 7\r | |
78 | #define __HYPERVISOR_set_debugreg 8\r | |
79 | #define __HYPERVISOR_get_debugreg 9\r | |
80 | #define __HYPERVISOR_update_descriptor 10\r | |
81 | #define __HYPERVISOR_memory_op 12\r | |
82 | #define __HYPERVISOR_multicall 13\r | |
83 | #define __HYPERVISOR_update_va_mapping 14\r | |
84 | #define __HYPERVISOR_set_timer_op 15\r | |
85 | #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */\r | |
86 | #define __HYPERVISOR_xen_version 17\r | |
87 | #define __HYPERVISOR_console_io 18\r | |
88 | #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */\r | |
89 | #define __HYPERVISOR_grant_table_op 20\r | |
90 | #define __HYPERVISOR_vm_assist 21\r | |
91 | #define __HYPERVISOR_update_va_mapping_otherdomain 22\r | |
92 | #define __HYPERVISOR_iret 23 /* x86 only */\r | |
93 | #define __HYPERVISOR_vcpu_op 24\r | |
94 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */\r | |
95 | #define __HYPERVISOR_mmuext_op 26\r | |
96 | #define __HYPERVISOR_xsm_op 27\r | |
97 | #define __HYPERVISOR_nmi_op 28\r | |
98 | #define __HYPERVISOR_sched_op 29\r | |
99 | #define __HYPERVISOR_callback_op 30\r | |
100 | #define __HYPERVISOR_xenoprof_op 31\r | |
101 | #define __HYPERVISOR_event_channel_op 32\r | |
102 | #define __HYPERVISOR_physdev_op 33\r | |
103 | #define __HYPERVISOR_hvm_op 34\r | |
104 | #define __HYPERVISOR_sysctl 35\r | |
105 | #define __HYPERVISOR_domctl 36\r | |
106 | #define __HYPERVISOR_kexec_op 37\r | |
107 | #define __HYPERVISOR_tmem_op 38\r | |
108 | #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */\r | |
109 | \r | |
110 | /* Architecture-specific hypercall definitions. */\r | |
111 | #define __HYPERVISOR_arch_0 48\r | |
112 | #define __HYPERVISOR_arch_1 49\r | |
113 | #define __HYPERVISOR_arch_2 50\r | |
114 | #define __HYPERVISOR_arch_3 51\r | |
115 | #define __HYPERVISOR_arch_4 52\r | |
116 | #define __HYPERVISOR_arch_5 53\r | |
117 | #define __HYPERVISOR_arch_6 54\r | |
118 | #define __HYPERVISOR_arch_7 55\r | |
119 | \r | |
120 | /* ` } */\r | |
121 | \r | |
122 | /*\r | |
123 | * HYPERCALL COMPATIBILITY.\r | |
124 | */\r | |
125 | \r | |
126 | /* New sched_op hypercall introduced in 0x00030101. */\r | |
127 | #if __XEN_INTERFACE_VERSION__ < 0x00030101\r | |
128 | #undef __HYPERVISOR_sched_op\r | |
129 | #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat\r | |
130 | #endif\r | |
131 | \r | |
132 | /* New event-channel and physdev hypercalls introduced in 0x00030202. */\r | |
133 | #if __XEN_INTERFACE_VERSION__ < 0x00030202\r | |
134 | #undef __HYPERVISOR_event_channel_op\r | |
135 | #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat\r | |
136 | #undef __HYPERVISOR_physdev_op\r | |
137 | #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat\r | |
138 | #endif\r | |
139 | \r | |
140 | /* New platform_op hypercall introduced in 0x00030204. */\r | |
141 | #if __XEN_INTERFACE_VERSION__ < 0x00030204\r | |
142 | #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op\r | |
143 | #endif\r | |
144 | \r | |
145 | #ifndef __ASSEMBLY__\r | |
146 | \r | |
147 | typedef UINT16 domid_t;\r | |
148 | \r | |
149 | /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */\r | |
150 | #define DOMID_FIRST_RESERVED (0x7FF0U)\r | |
151 | \r | |
152 | /* DOMID_SELF is used in certain contexts to refer to oneself. */\r | |
153 | #define DOMID_SELF (0x7FF0U)\r | |
154 | \r | |
155 | /*\r | |
156 | * DOMID_IO is used to restrict page-table updates to mapping I/O memory.\r | |
157 | * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO\r | |
158 | * is useful to ensure that no mappings to the OS's own heap are accidentally\r | |
159 | * installed. (e.g., in Linux this could cause havoc as reference counts\r | |
160 | * aren't adjusted on the I/O-mapping code path).\r | |
161 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can\r | |
162 | * be specified by any calling domain.\r | |
163 | */\r | |
164 | #define DOMID_IO (0x7FF1U)\r | |
165 | \r | |
166 | /*\r | |
167 | * DOMID_XEN is used to allow privileged domains to map restricted parts of\r | |
168 | * Xen's heap space (e.g., the machine_to_phys table).\r | |
169 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if\r | |
170 | * the caller is privileged.\r | |
171 | */\r | |
172 | #define DOMID_XEN (0x7FF2U)\r | |
173 | \r | |
174 | /*\r | |
175 | * DOMID_COW is used as the owner of sharable pages */\r | |
176 | #define DOMID_COW (0x7FF3U)\r | |
177 | \r | |
178 | /* DOMID_INVALID is used to identify pages with unknown owner. */\r | |
179 | #define DOMID_INVALID (0x7FF4U)\r | |
180 | \r | |
181 | /* Idle domain. */\r | |
182 | #define DOMID_IDLE (0x7FFFU)\r | |
183 | \r | |
184 | #if __XEN_INTERFACE_VERSION__ < 0x00040400\r | |
185 | /*\r | |
186 | * Event channel endpoints per domain (when using the 2-level ABI):\r | |
187 | * 1024 if a INTN is 32 bits; 4096 if a INTN is 64 bits.\r | |
188 | */\r | |
189 | #define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS\r | |
190 | #endif\r | |
191 | \r | |
192 | struct vcpu_time_info {\r | |
193 | /*\r | |
194 | * Updates to the following values are preceded and followed by an\r | |
195 | * increment of 'version'. The guest can therefore detect updates by\r | |
196 | * looking for changes to 'version'. If the least-significant bit of\r | |
197 | * the version number is set then an update is in progress and the guest\r | |
198 | * must wait to read a consistent set of values.\r | |
199 | * The correct way to interact with the version number is similar to\r | |
200 | * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.\r | |
201 | */\r | |
202 | UINT32 version;\r | |
203 | UINT32 pad0;\r | |
204 | UINT64 tsc_timestamp; /* TSC at last update of time vals. */\r | |
205 | UINT64 system_time; /* Time, in nanosecs, since boot. */\r | |
206 | /*\r | |
207 | * Current system time:\r | |
208 | * system_time +\r | |
209 | * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)\r | |
210 | * CPU frequency (Hz):\r | |
211 | * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift\r | |
212 | */\r | |
213 | UINT32 tsc_to_system_mul;\r | |
214 | INT8 tsc_shift;\r | |
215 | INT8 pad1[3];\r | |
216 | }; /* 32 bytes */\r | |
217 | typedef struct vcpu_time_info vcpu_time_info_t;\r | |
218 | \r | |
219 | struct vcpu_info {\r | |
220 | /*\r | |
221 | * 'evtchn_upcall_pending' is written non-zero by Xen to indicate\r | |
222 | * a pending notification for a particular VCPU. It is then cleared \r | |
223 | * by the guest OS /before/ checking for pending work, thus avoiding\r | |
224 | * a set-and-check race. Note that the mask is only accessed by Xen\r | |
225 | * on the CPU that is currently hosting the VCPU. This means that the\r | |
226 | * pending and mask flags can be updated by the guest without special\r | |
227 | * synchronisation (i.e., no need for the x86 LOCK prefix).\r | |
228 | * This may seem suboptimal because if the pending flag is set by\r | |
229 | * a different CPU then an IPI may be scheduled even when the mask\r | |
230 | * is set. However, note:\r | |
231 | * 1. The task of 'interrupt holdoff' is covered by the per-event-\r | |
232 | * channel mask bits. A 'noisy' event that is continually being\r | |
233 | * triggered can be masked at source at this very precise\r | |
234 | * granularity.\r | |
235 | * 2. The main purpose of the per-VCPU mask is therefore to restrict\r | |
236 | * reentrant execution: whether for concurrency control, or to\r | |
237 | * prevent unbounded stack usage. Whatever the purpose, we expect\r | |
238 | * that the mask will be asserted only for short periods at a time,\r | |
239 | * and so the likelihood of a 'spurious' IPI is suitably small.\r | |
240 | * The mask is read before making an event upcall to the guest: a\r | |
241 | * non-zero mask therefore guarantees that the VCPU will not receive\r | |
242 | * an upcall activation. The mask is cleared when the VCPU requests\r | |
243 | * to block: this avoids wakeup-waiting races.\r | |
244 | */\r | |
245 | UINT8 evtchn_upcall_pending;\r | |
246 | #ifdef XEN_HAVE_PV_UPCALL_MASK\r | |
247 | UINT8 evtchn_upcall_mask;\r | |
248 | #else /* XEN_HAVE_PV_UPCALL_MASK */\r | |
249 | UINT8 pad0;\r | |
250 | #endif /* XEN_HAVE_PV_UPCALL_MASK */\r | |
251 | xen_ulong_t evtchn_pending_sel;\r | |
252 | struct arch_vcpu_info arch;\r | |
253 | struct vcpu_time_info time;\r | |
254 | }; /* 64 bytes (x86) */\r | |
255 | #ifndef __XEN__\r | |
256 | typedef struct vcpu_info vcpu_info_t;\r | |
257 | #endif\r | |
258 | \r | |
259 | /*\r | |
260 | * `incontents 200 startofday_shared Start-of-day shared data structure\r | |
261 | * Xen/kernel shared data -- pointer provided in start_info.\r | |
262 | *\r | |
263 | * This structure is defined to be both smaller than a page, and the\r | |
264 | * only data on the shared page, but may vary in actual size even within\r | |
265 | * compatible Xen versions; guests should not rely on the size\r | |
266 | * of this structure remaining constant.\r | |
267 | */\r | |
268 | struct shared_info {\r | |
269 | struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];\r | |
270 | \r | |
271 | /*\r | |
272 | * A domain can create "event channels" on which it can send and receive\r | |
273 | * asynchronous event notifications. There are three classes of event that\r | |
274 | * are delivered by this mechanism:\r | |
275 | * 1. Bi-directional inter- and intra-domain connections. Domains must\r | |
276 | * arrange out-of-band to set up a connection (usually by allocating\r | |
277 | * an unbound 'listener' port and avertising that via a storage service\r | |
278 | * such as xenstore).\r | |
279 | * 2. Physical interrupts. A domain with suitable hardware-access\r | |
280 | * privileges can bind an event-channel port to a physical interrupt\r | |
281 | * source.\r | |
282 | * 3. Virtual interrupts ('events'). A domain can bind an event-channel\r | |
283 | * port to a virtual interrupt source, such as the virtual-timer\r | |
284 | * device or the emergency console.\r | |
285 | * \r | |
286 | * Event channels are addressed by a "port index". Each channel is\r | |
287 | * associated with two bits of information:\r | |
288 | * 1. PENDING -- notifies the domain that there is a pending notification\r | |
289 | * to be processed. This bit is cleared by the guest.\r | |
290 | * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING\r | |
291 | * will cause an asynchronous upcall to be scheduled. This bit is only\r | |
292 | * updated by the guest. It is read-only within Xen. If a channel\r | |
293 | * becomes pending while the channel is masked then the 'edge' is lost\r | |
294 | * (i.e., when the channel is unmasked, the guest must manually handle\r | |
295 | * pending notifications as no upcall will be scheduled by Xen).\r | |
296 | * \r | |
297 | * To expedite scanning of pending notifications, any 0->1 pending\r | |
298 | * transition on an unmasked channel causes a corresponding bit in a\r | |
299 | * per-vcpu selector word to be set. Each bit in the selector covers a\r | |
300 | * 'C INTN' in the PENDING bitfield array.\r | |
301 | */\r | |
302 | xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];\r | |
303 | xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];\r | |
304 | \r | |
305 | /*\r | |
306 | * Wallclock time: updated only by control software. Guests should base\r | |
307 | * their gettimeofday() syscall on this wallclock-base value.\r | |
308 | */\r | |
309 | UINT32 wc_version; /* Version counter: see vcpu_time_info_t. */\r | |
310 | UINT32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */\r | |
311 | UINT32 wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */\r | |
312 | \r | |
313 | struct arch_shared_info arch;\r | |
314 | \r | |
315 | };\r | |
316 | #ifndef __XEN__\r | |
317 | typedef struct shared_info shared_info_t;\r | |
318 | #endif\r | |
319 | \r | |
320 | /* Turn a plain number into a C UINTN constant. */\r | |
321 | #define __mk_unsigned_long(x) x ## UL\r | |
322 | #define mk_unsigned_long(x) __mk_unsigned_long(x)\r | |
323 | \r | |
324 | __DEFINE_XEN_GUEST_HANDLE(uint8, UINT8);\r | |
325 | __DEFINE_XEN_GUEST_HANDLE(uint16, UINT16);\r | |
326 | __DEFINE_XEN_GUEST_HANDLE(uint32, UINT32);\r | |
327 | __DEFINE_XEN_GUEST_HANDLE(uint64, UINT64);\r | |
328 | \r | |
329 | #else /* __ASSEMBLY__ */\r | |
330 | \r | |
331 | /* In assembly code we cannot use C numeric constant suffixes. */\r | |
332 | #define mk_unsigned_long(x) x\r | |
333 | \r | |
334 | #endif /* !__ASSEMBLY__ */\r | |
335 | \r | |
336 | #endif /* __XEN_PUBLIC_XEN_H__ */\r | |
337 | \r | |
338 | /*\r | |
339 | * Local variables:\r | |
340 | * mode: C\r | |
341 | * c-file-style: "BSD"\r | |
342 | * c-basic-offset: 4\r | |
343 | * tab-width: 4\r | |
344 | * indent-tabs-mode: nil\r | |
345 | * End:\r | |
346 | */\r |