* All generic sub-operations\r
*\r
* HYPERVISOR_physdev_op\r
- * No sub-operations are currenty supported\r
+ * No sub-operations are currently supported\r
*\r
* HYPERVISOR_sysctl\r
* All generic sub-operations, with the exception of:\r
* at Documentation/devicetree/bindings/arm/xen.txt.\r
*/\r
\r
-#define XEN_HYPERCALL_TAG 0xEA1\r
+#define XEN_HYPERCALL_TAG 0xEA1\r
\r
-#define uint64_aligned_t UINT64 __attribute__((aligned(8)))\r
+#define uint64_aligned_t UINT64 __attribute__((aligned(8)))\r
\r
#ifndef __ASSEMBLY__\r
#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \\r
#define __DEFINE_XEN_GUEST_HANDLE(name, type) \\r
___DEFINE_XEN_GUEST_HANDLE(name, type); \\r
___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)\r
-#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)\r
-#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name\r
-#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)\r
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)\r
+#define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name\r
+#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)\r
/* this is going to be changed on 64 bit */\r
-#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name\r
+#define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name\r
#define set_xen_guest_handle_raw(hnd, val) \\r
do { \\r
typeof(&(hnd)) _sxghr_tmp = &(hnd); \\r
_sxghr_tmp->q = 0; \\r
_sxghr_tmp->p = val; \\r
} while ( 0 )\r
-#ifdef __XEN_TOOLS__\r
+ #ifdef __XEN_TOOLS__\r
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)\r
-#endif\r
-#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)\r
+ #endif\r
+#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)\r
\r
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)\r
+ #if defined (__GNUC__) && !defined (__STRICT_ANSI__)\r
/* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */\r
-# define __DECL_REG(n64, n32) union { \\r
+#define __DECL_REG(n64, n32) union { \\r
UINT64 n64; \\r
UINT32 n32; \\r
}\r
-#else\r
+ #else\r
/* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */\r
-#define __DECL_REG(n64, n32) UINT64 n64\r
-#endif\r
-\r
-struct vcpu_guest_core_regs\r
-{\r
- /* Aarch64 Aarch32 */\r
- __DECL_REG(x0, r0_usr);\r
- __DECL_REG(x1, r1_usr);\r
- __DECL_REG(x2, r2_usr);\r
- __DECL_REG(x3, r3_usr);\r
- __DECL_REG(x4, r4_usr);\r
- __DECL_REG(x5, r5_usr);\r
- __DECL_REG(x6, r6_usr);\r
- __DECL_REG(x7, r7_usr);\r
- __DECL_REG(x8, r8_usr);\r
- __DECL_REG(x9, r9_usr);\r
- __DECL_REG(x10, r10_usr);\r
- __DECL_REG(x11, r11_usr);\r
- __DECL_REG(x12, r12_usr);\r
-\r
- __DECL_REG(x13, sp_usr);\r
- __DECL_REG(x14, lr_usr);\r
-\r
- __DECL_REG(x15, __unused_sp_hyp);\r
-\r
- __DECL_REG(x16, lr_irq);\r
- __DECL_REG(x17, sp_irq);\r
-\r
- __DECL_REG(x18, lr_svc);\r
- __DECL_REG(x19, sp_svc);\r
-\r
- __DECL_REG(x20, lr_abt);\r
- __DECL_REG(x21, sp_abt);\r
-\r
- __DECL_REG(x22, lr_und);\r
- __DECL_REG(x23, sp_und);\r
-\r
- __DECL_REG(x24, r8_fiq);\r
- __DECL_REG(x25, r9_fiq);\r
- __DECL_REG(x26, r10_fiq);\r
- __DECL_REG(x27, r11_fiq);\r
- __DECL_REG(x28, r12_fiq);\r
-\r
- __DECL_REG(x29, sp_fiq);\r
- __DECL_REG(x30, lr_fiq);\r
-\r
- /* Return address and mode */\r
- __DECL_REG(pc64, pc32); /* ELR_EL2 */\r
- UINT32 cpsr; /* SPSR_EL2 */\r
-\r
- union {\r
- UINT32 spsr_el1; /* AArch64 */\r
- UINT32 spsr_svc; /* AArch32 */\r
- };\r
-\r
- /* AArch32 guests only */\r
- UINT32 spsr_fiq, spsr_irq, spsr_und, spsr_abt;\r
-\r
- /* AArch64 guests only */\r
- UINT64 sp_el0;\r
- UINT64 sp_el1, elr_el1;\r
+#define __DECL_REG(n64, n32) UINT64 n64\r
+ #endif\r
+\r
+struct vcpu_guest_core_regs {\r
+ /* Aarch64 Aarch32 */\r
+ __DECL_REG (x0, r0_usr);\r
+ __DECL_REG (x1, r1_usr);\r
+ __DECL_REG (x2, r2_usr);\r
+ __DECL_REG (x3, r3_usr);\r
+ __DECL_REG (x4, r4_usr);\r
+ __DECL_REG (x5, r5_usr);\r
+ __DECL_REG (x6, r6_usr);\r
+ __DECL_REG (x7, r7_usr);\r
+ __DECL_REG (x8, r8_usr);\r
+ __DECL_REG (x9, r9_usr);\r
+ __DECL_REG (x10, r10_usr);\r
+ __DECL_REG (x11, r11_usr);\r
+ __DECL_REG (x12, r12_usr);\r
+\r
+ __DECL_REG (x13, sp_usr);\r
+ __DECL_REG (x14, lr_usr);\r
+\r
+ __DECL_REG (x15, __unused_sp_hyp);\r
+\r
+ __DECL_REG (x16, lr_irq);\r
+ __DECL_REG (x17, sp_irq);\r
+\r
+ __DECL_REG (x18, lr_svc);\r
+ __DECL_REG (x19, sp_svc);\r
+\r
+ __DECL_REG (x20, lr_abt);\r
+ __DECL_REG (x21, sp_abt);\r
+\r
+ __DECL_REG (x22, lr_und);\r
+ __DECL_REG (x23, sp_und);\r
+\r
+ __DECL_REG (x24, r8_fiq);\r
+ __DECL_REG (x25, r9_fiq);\r
+ __DECL_REG (x26, r10_fiq);\r
+ __DECL_REG (x27, r11_fiq);\r
+ __DECL_REG (x28, r12_fiq);\r
+\r
+ __DECL_REG (x29, sp_fiq);\r
+ __DECL_REG (x30, lr_fiq);\r
+\r
+ /* Return address and mode */\r
+ __DECL_REG (pc64, pc32); /* ELR_EL2 */\r
+ UINT32 cpsr; /* SPSR_EL2 */\r
+\r
+ union {\r
+ UINT32 spsr_el1; /* AArch64 */\r
+ UINT32 spsr_svc; /* AArch32 */\r
+ };\r
+\r
+ /* AArch32 guests only */\r
+ UINT32 spsr_fiq, spsr_irq, spsr_und, spsr_abt;\r
+\r
+ /* AArch64 guests only */\r
+ UINT64 sp_el0;\r
+ UINT64 sp_el1, elr_el1;\r
};\r
+\r
typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t;\r
-DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t);\r
+DEFINE_XEN_GUEST_HANDLE (vcpu_guest_core_regs_t);\r
\r
-#undef __DECL_REG\r
+ #undef __DECL_REG\r
\r
typedef UINT64 xen_pfn_t;\r
-#define PRI_xen_pfn PRIx64\r
+#define PRI_xen_pfn PRIx64\r
\r
/* Maximum number of virtual CPUs in legacy multi-processor guests. */\r
/* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */\r
-#define XEN_LEGACY_MAX_VCPUS 1\r
+#define XEN_LEGACY_MAX_VCPUS 1\r
\r
typedef UINT64 xen_ulong_t;\r
-#define PRI_xen_ulong PRIx64\r
+#define PRI_xen_ulong PRIx64\r
\r
-#if defined(__XEN__) || defined(__XEN_TOOLS__)\r
+ #if defined (__XEN__) || defined (__XEN_TOOLS__)\r
struct vcpu_guest_context {\r
-#define _VGCF_online 0\r
-#define VGCF_online (1<<_VGCF_online)\r
- UINT32 flags; /* VGCF_* */\r
+ #define _VGCF_online 0\r
+ #define VGCF_online (1<<_VGCF_online)\r
+ UINT32 flags; /* VGCF_* */\r
\r
- struct vcpu_guest_core_regs user_regs; /* Core CPU registers */\r
+ struct vcpu_guest_core_regs user_regs; /* Core CPU registers */\r
\r
- UINT32 sctlr;\r
- UINT64 ttbcr, ttbr0, ttbr1;\r
+ UINT32 sctlr;\r
+ UINT64 ttbcr, ttbr0, ttbr1;\r
};\r
+\r
typedef struct vcpu_guest_context vcpu_guest_context_t;\r
-DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);\r
-#endif\r
+DEFINE_XEN_GUEST_HANDLE (vcpu_guest_context_t);\r
+ #endif\r
\r
struct arch_vcpu_info {\r
};\r
+\r
typedef struct arch_vcpu_info arch_vcpu_info_t;\r
\r
struct arch_shared_info {\r
};\r
+\r
typedef struct arch_shared_info arch_shared_info_t;\r
-typedef UINT64 xen_callback_t;\r
+typedef UINT64 xen_callback_t;\r
\r
#endif\r
\r
-#if defined(__XEN__) || defined(__XEN_TOOLS__)\r
+#if defined (__XEN__) || defined (__XEN_TOOLS__)\r
\r
/* PSR bits (CPSR, SPSR)*/\r
\r
#define PSR_JAZELLE (1<<24) /* Jazelle Mode */\r
\r
/* 32 bit modes */\r
-#define PSR_MODE_USR 0x10\r
-#define PSR_MODE_FIQ 0x11\r
-#define PSR_MODE_IRQ 0x12\r
-#define PSR_MODE_SVC 0x13\r
-#define PSR_MODE_MON 0x16\r
-#define PSR_MODE_ABT 0x17\r
-#define PSR_MODE_HYP 0x1a\r
-#define PSR_MODE_UND 0x1b\r
-#define PSR_MODE_SYS 0x1f\r
+#define PSR_MODE_USR 0x10\r
+#define PSR_MODE_FIQ 0x11\r
+#define PSR_MODE_IRQ 0x12\r
+#define PSR_MODE_SVC 0x13\r
+#define PSR_MODE_MON 0x16\r
+#define PSR_MODE_ABT 0x17\r
+#define PSR_MODE_HYP 0x1a\r
+#define PSR_MODE_UND 0x1b\r
+#define PSR_MODE_SYS 0x1f\r
\r
/* 64 bit modes */\r
-#define PSR_MODE_BIT 0x10 /* Set iff AArch32 */\r
-#define PSR_MODE_EL3h 0x0d\r
-#define PSR_MODE_EL3t 0x0c\r
-#define PSR_MODE_EL2h 0x09\r
-#define PSR_MODE_EL2t 0x08\r
-#define PSR_MODE_EL1h 0x05\r
-#define PSR_MODE_EL1t 0x04\r
-#define PSR_MODE_EL0t 0x00\r
+#define PSR_MODE_BIT 0x10/* Set iff AArch32 */\r
+#define PSR_MODE_EL3h 0x0d\r
+#define PSR_MODE_EL3t 0x0c\r
+#define PSR_MODE_EL2h 0x09\r
+#define PSR_MODE_EL2t 0x08\r
+#define PSR_MODE_EL1h 0x05\r
+#define PSR_MODE_EL1t 0x04\r
+#define PSR_MODE_EL0t 0x00\r
\r
#define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC)\r
-#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)\r
+#define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h)\r
\r
-#define SCTLR_GUEST_INIT 0x00c50078\r
+#define SCTLR_GUEST_INIT 0x00c50078\r
\r
/*\r
* Virtual machine platform (memory layout, interrupts)\r
*/\r
\r
/* vGIC v2 mappings */\r
-#define GUEST_GICD_BASE 0x03001000ULL\r
-#define GUEST_GICD_SIZE 0x00001000ULL\r
-#define GUEST_GICC_BASE 0x03002000ULL\r
-#define GUEST_GICC_SIZE 0x00000100ULL\r
+#define GUEST_GICD_BASE 0x03001000ULL\r
+#define GUEST_GICD_SIZE 0x00001000ULL\r
+#define GUEST_GICC_BASE 0x03002000ULL\r
+#define GUEST_GICC_SIZE 0x00000100ULL\r
\r
/* vGIC v3 mappings */\r
-#define GUEST_GICV3_GICD_BASE 0x03001000ULL\r
-#define GUEST_GICV3_GICD_SIZE 0x00010000ULL\r
+#define GUEST_GICV3_GICD_BASE 0x03001000ULL\r
+#define GUEST_GICV3_GICD_SIZE 0x00010000ULL\r
\r
#define GUEST_GICV3_RDIST_STRIDE 0x20000ULL\r
#define GUEST_GICV3_RDIST_REGIONS 1\r
\r
-#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */\r
-#define GUEST_GICV3_GICR0_SIZE 0x00100000ULL\r
+#define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */\r
+#define GUEST_GICV3_GICR0_SIZE 0x00100000ULL\r
\r
/* 16MB == 4096 pages reserved for guest to use as a region to map its\r
* grant table in.\r
*/\r
-#define GUEST_GNTTAB_BASE 0x38000000ULL\r
-#define GUEST_GNTTAB_SIZE 0x01000000ULL\r
+#define GUEST_GNTTAB_BASE 0x38000000ULL\r
+#define GUEST_GNTTAB_SIZE 0x01000000ULL\r
\r
#define GUEST_MAGIC_BASE 0x39000000ULL\r
#define GUEST_MAGIC_SIZE 0x01000000ULL\r
\r
-#define GUEST_RAM_BANKS 2\r
+#define GUEST_RAM_BANKS 2\r
\r
-#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */\r
-#define GUEST_RAM0_SIZE 0xc0000000ULL\r
+#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */\r
+#define GUEST_RAM0_SIZE 0xc0000000ULL\r
\r
-#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */\r
-#define GUEST_RAM1_SIZE 0xfe00000000ULL\r
+#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */\r
+#define GUEST_RAM1_SIZE 0xfe00000000ULL\r
\r
-#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */\r
+#define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */\r
/* Largest amount of actual RAM, not including holes */\r
-#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)\r
+#define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE)\r
/* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */\r
-#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }\r
-#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }\r
+#define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE }\r
+#define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE }\r
\r
/* Interrupts */\r
-#define GUEST_TIMER_VIRT_PPI 27\r
-#define GUEST_TIMER_PHYS_S_PPI 29\r
-#define GUEST_TIMER_PHYS_NS_PPI 30\r
-#define GUEST_EVTCHN_PPI 31\r
+#define GUEST_TIMER_VIRT_PPI 27\r
+#define GUEST_TIMER_PHYS_S_PPI 29\r
+#define GUEST_TIMER_PHYS_NS_PPI 30\r
+#define GUEST_EVTCHN_PPI 31\r
\r
/* PSCI functions */\r
-#define PSCI_cpu_suspend 0\r
-#define PSCI_cpu_off 1\r
-#define PSCI_cpu_on 2\r
-#define PSCI_migrate 3\r
+#define PSCI_cpu_suspend 0\r
+#define PSCI_cpu_off 1\r
+#define PSCI_cpu_on 2\r
+#define PSCI_migrate 3\r
\r
#endif\r
\r