]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Kevin Tian <kevin.tian@intel.com> | |
25 | * Eddie Dong <eddie.dong@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Niu Bing <bing.niu@intel.com> | |
29 | * Zhi Wang <zhi.a.wang@intel.com> | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef _GVT_H_ | |
34 | #define _GVT_H_ | |
35 | ||
36 | #include "debug.h" | |
37 | #include "hypercall.h" | |
38 | #include "mmio.h" | |
39 | #include "reg.h" | |
40 | #include "interrupt.h" | |
41 | #include "gtt.h" | |
42 | #include "display.h" | |
43 | #include "edid.h" | |
44 | #include "execlist.h" | |
45 | #include "scheduler.h" | |
46 | #include "sched_policy.h" | |
47 | #include "render.h" | |
48 | #include "cmd_parser.h" | |
49 | ||
50 | #define GVT_MAX_VGPU 8 | |
51 | ||
52 | enum { | |
53 | INTEL_GVT_HYPERVISOR_XEN = 0, | |
54 | INTEL_GVT_HYPERVISOR_KVM, | |
55 | }; | |
56 | ||
57 | struct intel_gvt_host { | |
58 | bool initialized; | |
59 | int hypervisor_type; | |
60 | struct intel_gvt_mpt *mpt; | |
61 | }; | |
62 | ||
63 | extern struct intel_gvt_host intel_gvt_host; | |
64 | ||
65 | /* Describe per-platform limitations. */ | |
66 | struct intel_gvt_device_info { | |
67 | u32 max_support_vgpus; | |
68 | u32 cfg_space_size; | |
69 | u32 mmio_size; | |
70 | u32 mmio_bar; | |
71 | unsigned long msi_cap_offset; | |
72 | u32 gtt_start_offset; | |
73 | u32 gtt_entry_size; | |
74 | u32 gtt_entry_size_shift; | |
75 | int gmadr_bytes_in_cmd; | |
76 | u32 max_surface_size; | |
77 | }; | |
78 | ||
79 | /* GM resources owned by a vGPU */ | |
80 | struct intel_vgpu_gm { | |
81 | u64 aperture_sz; | |
82 | u64 hidden_sz; | |
83 | struct drm_mm_node low_gm_node; | |
84 | struct drm_mm_node high_gm_node; | |
85 | }; | |
86 | ||
87 | #define INTEL_GVT_MAX_NUM_FENCES 32 | |
88 | ||
89 | /* Fences owned by a vGPU */ | |
90 | struct intel_vgpu_fence { | |
91 | struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; | |
92 | u32 base; | |
93 | u32 size; | |
94 | }; | |
95 | ||
96 | struct intel_vgpu_mmio { | |
97 | void *vreg; | |
98 | void *sreg; | |
99 | bool disable_warn_untrack; | |
100 | }; | |
101 | ||
102 | #define INTEL_GVT_MAX_CFG_SPACE_SZ 256 | |
103 | #define INTEL_GVT_MAX_BAR_NUM 4 | |
104 | ||
105 | struct intel_vgpu_pci_bar { | |
106 | u64 size; | |
107 | bool tracked; | |
108 | }; | |
109 | ||
110 | struct intel_vgpu_cfg_space { | |
111 | unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; | |
112 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; | |
113 | }; | |
114 | ||
115 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) | |
116 | ||
117 | #define INTEL_GVT_MAX_PIPE 4 | |
118 | ||
119 | struct intel_vgpu_irq { | |
120 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; | |
121 | DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], | |
122 | INTEL_GVT_EVENT_MAX); | |
123 | }; | |
124 | ||
125 | struct intel_vgpu_opregion { | |
126 | void *va; | |
127 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; | |
128 | struct page *pages[INTEL_GVT_OPREGION_PAGES]; | |
129 | }; | |
130 | ||
131 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) | |
132 | ||
133 | #define INTEL_GVT_MAX_PORT 5 | |
134 | ||
135 | struct intel_vgpu_display { | |
136 | struct intel_vgpu_i2c_edid i2c_edid; | |
137 | struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; | |
138 | struct intel_vgpu_sbi sbi; | |
139 | }; | |
140 | ||
141 | struct vgpu_sched_ctl { | |
142 | int weight; | |
143 | }; | |
144 | ||
145 | struct intel_vgpu { | |
146 | struct intel_gvt *gvt; | |
147 | int id; | |
148 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ | |
149 | bool active; | |
150 | bool pv_notified; | |
151 | bool failsafe; | |
152 | bool resetting; | |
153 | void *sched_data; | |
154 | struct vgpu_sched_ctl sched_ctl; | |
155 | ||
156 | struct intel_vgpu_fence fence; | |
157 | struct intel_vgpu_gm gm; | |
158 | struct intel_vgpu_cfg_space cfg_space; | |
159 | struct intel_vgpu_mmio mmio; | |
160 | struct intel_vgpu_irq irq; | |
161 | struct intel_vgpu_gtt gtt; | |
162 | struct intel_vgpu_opregion opregion; | |
163 | struct intel_vgpu_display display; | |
164 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; | |
165 | struct list_head workload_q_head[I915_NUM_ENGINES]; | |
166 | struct kmem_cache *workloads; | |
167 | atomic_t running_workload_num; | |
168 | ktime_t last_ctx_submit_time; | |
169 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); | |
170 | struct i915_gem_context *shadow_ctx; | |
171 | ||
172 | #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) | |
173 | struct { | |
174 | struct mdev_device *mdev; | |
175 | struct vfio_region *region; | |
176 | int num_regions; | |
177 | struct eventfd_ctx *intx_trigger; | |
178 | struct eventfd_ctx *msi_trigger; | |
179 | struct rb_root cache; | |
180 | struct mutex cache_lock; | |
181 | struct notifier_block iommu_notifier; | |
182 | struct notifier_block group_notifier; | |
183 | struct kvm *kvm; | |
184 | struct work_struct release_work; | |
185 | atomic_t released; | |
186 | } vdev; | |
187 | #endif | |
188 | }; | |
189 | ||
190 | struct intel_gvt_gm { | |
191 | unsigned long vgpu_allocated_low_gm_size; | |
192 | unsigned long vgpu_allocated_high_gm_size; | |
193 | }; | |
194 | ||
195 | struct intel_gvt_fence { | |
196 | unsigned long vgpu_allocated_fence_num; | |
197 | }; | |
198 | ||
199 | #define INTEL_GVT_MMIO_HASH_BITS 9 | |
200 | ||
201 | struct intel_gvt_mmio { | |
202 | u32 *mmio_attribute; | |
203 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); | |
204 | }; | |
205 | ||
206 | struct intel_gvt_firmware { | |
207 | void *cfg_space; | |
208 | void *mmio; | |
209 | bool firmware_loaded; | |
210 | }; | |
211 | ||
212 | struct intel_gvt_opregion { | |
213 | void *opregion_va; | |
214 | u32 opregion_pa; | |
215 | }; | |
216 | ||
217 | #define NR_MAX_INTEL_VGPU_TYPES 20 | |
218 | struct intel_vgpu_type { | |
219 | char name[16]; | |
220 | unsigned int avail_instance; | |
221 | unsigned int low_gm_size; | |
222 | unsigned int high_gm_size; | |
223 | unsigned int fence; | |
224 | unsigned int weight; | |
225 | enum intel_vgpu_edid resolution; | |
226 | }; | |
227 | ||
228 | struct intel_gvt { | |
229 | struct mutex lock; | |
230 | struct drm_i915_private *dev_priv; | |
231 | struct idr vgpu_idr; /* vGPU IDR pool */ | |
232 | ||
233 | struct intel_gvt_device_info device_info; | |
234 | struct intel_gvt_gm gm; | |
235 | struct intel_gvt_fence fence; | |
236 | struct intel_gvt_mmio mmio; | |
237 | struct intel_gvt_firmware firmware; | |
238 | struct intel_gvt_irq irq; | |
239 | struct intel_gvt_gtt gtt; | |
240 | struct intel_gvt_opregion opregion; | |
241 | struct intel_gvt_workload_scheduler scheduler; | |
242 | struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; | |
243 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); | |
244 | struct intel_vgpu_type *types; | |
245 | unsigned int num_types; | |
246 | ||
247 | struct task_struct *service_thread; | |
248 | wait_queue_head_t service_thread_wq; | |
249 | unsigned long service_request; | |
250 | }; | |
251 | ||
252 | static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) | |
253 | { | |
254 | return i915->gvt; | |
255 | } | |
256 | ||
257 | enum { | |
258 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, | |
259 | INTEL_GVT_REQUEST_SCHED = 1, | |
260 | }; | |
261 | ||
262 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, | |
263 | int service) | |
264 | { | |
265 | set_bit(service, (void *)&gvt->service_request); | |
266 | wake_up(&gvt->service_thread_wq); | |
267 | } | |
268 | ||
269 | void intel_gvt_free_firmware(struct intel_gvt *gvt); | |
270 | int intel_gvt_load_firmware(struct intel_gvt *gvt); | |
271 | ||
272 | /* Aperture/GM space definitions for GVT device */ | |
273 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) | |
274 | #define BYTES_TO_MB(b) ((b) >> 20ULL) | |
275 | ||
276 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) | |
277 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) | |
278 | #define HOST_FENCE 4 | |
279 | ||
280 | /* Aperture/GM space definitions for GVT device */ | |
281 | #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) | |
282 | #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) | |
283 | ||
284 | #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) | |
285 | #define gvt_ggtt_sz(gvt) \ | |
286 | ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) | |
287 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) | |
288 | ||
289 | #define gvt_aperture_gmadr_base(gvt) (0) | |
290 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
291 | + gvt_aperture_sz(gvt) - 1) | |
292 | ||
293 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
294 | + gvt_aperture_sz(gvt)) | |
295 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ | |
296 | + gvt_hidden_sz(gvt) - 1) | |
297 | ||
298 | #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) | |
299 | ||
300 | /* Aperture/GM space definitions for vGPU */ | |
301 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) | |
302 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) | |
303 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) | |
304 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) | |
305 | ||
306 | #define vgpu_aperture_pa_base(vgpu) \ | |
307 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) | |
308 | ||
309 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) | |
310 | ||
311 | #define vgpu_aperture_pa_end(vgpu) \ | |
312 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
313 | ||
314 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) | |
315 | #define vgpu_aperture_gmadr_end(vgpu) \ | |
316 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
317 | ||
318 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) | |
319 | #define vgpu_hidden_gmadr_end(vgpu) \ | |
320 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) | |
321 | ||
322 | #define vgpu_fence_base(vgpu) (vgpu->fence.base) | |
323 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) | |
324 | ||
325 | struct intel_vgpu_creation_params { | |
326 | __u64 handle; | |
327 | __u64 low_gm_sz; /* in MB */ | |
328 | __u64 high_gm_sz; /* in MB */ | |
329 | __u64 fence_sz; | |
330 | __u64 resolution; | |
331 | __s32 primary; | |
332 | __u64 vgpu_id; | |
333 | ||
334 | __u32 weight; | |
335 | }; | |
336 | ||
337 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, | |
338 | struct intel_vgpu_creation_params *param); | |
339 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); | |
340 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); | |
341 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | |
342 | u32 fence, u64 value); | |
343 | ||
344 | /* Macros for easily accessing vGPU virtual/shadow register */ | |
345 | #define vgpu_vreg(vgpu, reg) \ | |
346 | (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
347 | #define vgpu_vreg8(vgpu, reg) \ | |
348 | (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
349 | #define vgpu_vreg16(vgpu, reg) \ | |
350 | (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
351 | #define vgpu_vreg64(vgpu, reg) \ | |
352 | (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
353 | #define vgpu_sreg(vgpu, reg) \ | |
354 | (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
355 | #define vgpu_sreg8(vgpu, reg) \ | |
356 | (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
357 | #define vgpu_sreg16(vgpu, reg) \ | |
358 | (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
359 | #define vgpu_sreg64(vgpu, reg) \ | |
360 | (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
361 | ||
362 | #define for_each_active_vgpu(gvt, vgpu, id) \ | |
363 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ | |
364 | for_each_if(vgpu->active) | |
365 | ||
366 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, | |
367 | u32 offset, u32 val, bool low) | |
368 | { | |
369 | u32 *pval; | |
370 | ||
371 | /* BAR offset should be 32 bits algiend */ | |
372 | offset = rounddown(offset, 4); | |
373 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); | |
374 | ||
375 | if (low) { | |
376 | /* | |
377 | * only update bit 31 - bit 4, | |
378 | * leave the bit 3 - bit 0 unchanged. | |
379 | */ | |
380 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); | |
381 | } else { | |
382 | *pval = val; | |
383 | } | |
384 | } | |
385 | ||
386 | int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); | |
387 | void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); | |
388 | ||
389 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |
390 | struct intel_vgpu_type *type); | |
391 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); | |
392 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |
393 | unsigned int engine_mask); | |
394 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); | |
395 | ||
396 | ||
397 | /* validating GM functions */ | |
398 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ | |
399 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ | |
400 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) | |
401 | ||
402 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ | |
403 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ | |
404 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) | |
405 | ||
406 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ | |
407 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ | |
408 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) | |
409 | ||
410 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ | |
411 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ | |
412 | (gmadr <= gvt_aperture_gmadr_end(gvt))) | |
413 | ||
414 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ | |
415 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ | |
416 | (gmadr <= gvt_hidden_gmadr_end(gvt))) | |
417 | ||
418 | #define gvt_gmadr_is_valid(gvt, gmadr) \ | |
419 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ | |
420 | gvt_gmadr_is_hidden(gvt, gmadr)) | |
421 | ||
422 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); | |
423 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); | |
424 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); | |
425 | int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, | |
426 | unsigned long *h_index); | |
427 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, | |
428 | unsigned long *g_index); | |
429 | ||
430 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | |
431 | bool primary); | |
432 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); | |
433 | ||
434 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, | |
435 | void *p_data, unsigned int bytes); | |
436 | ||
437 | int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |
438 | void *p_data, unsigned int bytes); | |
439 | ||
440 | void intel_gvt_clean_opregion(struct intel_gvt *gvt); | |
441 | int intel_gvt_init_opregion(struct intel_gvt *gvt); | |
442 | ||
443 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); | |
444 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); | |
445 | ||
446 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); | |
447 | void populate_pvinfo_page(struct intel_vgpu *vgpu); | |
448 | ||
449 | struct intel_gvt_ops { | |
450 | int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, | |
451 | unsigned int); | |
452 | int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, | |
453 | unsigned int); | |
454 | int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, | |
455 | unsigned int); | |
456 | int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, | |
457 | unsigned int); | |
458 | struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, | |
459 | struct intel_vgpu_type *); | |
460 | void (*vgpu_destroy)(struct intel_vgpu *); | |
461 | void (*vgpu_reset)(struct intel_vgpu *); | |
462 | }; | |
463 | ||
464 | ||
465 | enum { | |
466 | GVT_FAILSAFE_UNSUPPORTED_GUEST, | |
467 | GVT_FAILSAFE_INSUFFICIENT_RESOURCE, | |
468 | }; | |
469 | ||
470 | #include "mpt.h" | |
471 | ||
472 | #endif |