]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/arch_memory_mapping.c
hw/xen: Implement EVTCHNOP_alloc_unbound
[mirror_qemu.git] / target / i386 / arch_memory_mapping.c
CommitLineData
fae001f5
WC
1/*
2 * i386 memory mapping
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
fc0608ac
SW
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
fae001f5
WC
11 *
12 */
13
b6a0aa05 14#include "qemu/osdep.h"
fae001f5 15#include "cpu.h"
9c17d615 16#include "sysemu/memory_mapping.h"
fae001f5
WC
17
18/* PAE Paging or IA-32e Paging */
fdfba1a2
EI
19static void walk_pte(MemoryMappingList *list, AddressSpace *as,
20 hwaddr pte_start_addr,
fae001f5
WC
21 int32_t a20_mask, target_ulong start_line_addr)
22{
a8170e5e 23 hwaddr pte_addr, start_paddr;
fae001f5
WC
24 uint64_t pte;
25 target_ulong start_vaddr;
26 int i;
27
28 for (i = 0; i < 512; i++) {
29 pte_addr = (pte_start_addr + i * 8) & a20_mask;
42874d3a 30 pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
31 if (!(pte & PG_PRESENT_MASK)) {
32 /* not present */
33 continue;
34 }
35
36 start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
37 if (cpu_physical_memory_is_io(start_paddr)) {
38 /* I/O region */
39 continue;
40 }
41
bff63471 42 start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
fae001f5
WC
43 memory_mapping_list_add_merge_sorted(list, start_paddr,
44 start_vaddr, 1 << 12);
45 }
46}
47
48/* 32-bit Paging */
fdfba1a2 49static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
a8170e5e 50 hwaddr pte_start_addr, int32_t a20_mask,
fae001f5
WC
51 target_ulong start_line_addr)
52{
a8170e5e 53 hwaddr pte_addr, start_paddr;
fae001f5
WC
54 uint32_t pte;
55 target_ulong start_vaddr;
56 int i;
57
58 for (i = 0; i < 1024; i++) {
59 pte_addr = (pte_start_addr + i * 4) & a20_mask;
42874d3a 60 pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
61 if (!(pte & PG_PRESENT_MASK)) {
62 /* not present */
63 continue;
64 }
65
66 start_paddr = pte & ~0xfff;
67 if (cpu_physical_memory_is_io(start_paddr)) {
68 /* I/O region */
69 continue;
70 }
71
72 start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
73 memory_mapping_list_add_merge_sorted(list, start_paddr,
74 start_vaddr, 1 << 12);
75 }
76}
77
78/* PAE Paging or IA-32e Paging */
00fdef65 79#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
fbc2ed95 80
fdfba1a2
EI
81static void walk_pde(MemoryMappingList *list, AddressSpace *as,
82 hwaddr pde_start_addr,
fae001f5
WC
83 int32_t a20_mask, target_ulong start_line_addr)
84{
a8170e5e 85 hwaddr pde_addr, pte_start_addr, start_paddr;
fae001f5
WC
86 uint64_t pde;
87 target_ulong line_addr, start_vaddr;
88 int i;
89
90 for (i = 0; i < 512; i++) {
91 pde_addr = (pde_start_addr + i * 8) & a20_mask;
42874d3a 92 pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
93 if (!(pde & PG_PRESENT_MASK)) {
94 /* not present */
95 continue;
96 }
97
98 line_addr = start_line_addr | ((i & 0x1ff) << 21);
99 if (pde & PG_PSE_MASK) {
100 /* 2 MB page */
101 start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
102 if (cpu_physical_memory_is_io(start_paddr)) {
103 /* I/O region */
104 continue;
105 }
106 start_vaddr = line_addr;
107 memory_mapping_list_add_merge_sorted(list, start_paddr,
108 start_vaddr, 1 << 21);
109 continue;
110 }
111
fbc2ed95 112 pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
fdfba1a2 113 walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
fae001f5
WC
114 }
115}
116
117/* 32-bit Paging */
fdfba1a2 118static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
a8170e5e 119 hwaddr pde_start_addr, int32_t a20_mask,
fae001f5
WC
120 bool pse)
121{
6ad53bdf 122 hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
fae001f5
WC
123 uint32_t pde;
124 target_ulong line_addr, start_vaddr;
125 int i;
126
127 for (i = 0; i < 1024; i++) {
128 pde_addr = (pde_start_addr + i * 4) & a20_mask;
42874d3a 129 pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
130 if (!(pde & PG_PRESENT_MASK)) {
131 /* not present */
132 continue;
133 }
134
135 line_addr = (((unsigned int)i & 0x3ff) << 22);
136 if ((pde & PG_PSE_MASK) && pse) {
6ad53bdf
WC
137 /*
138 * 4 MB page:
139 * bits 39:32 are bits 20:13 of the PDE
140 * bit3 31:22 are bits 31:22 of the PDE
141 */
142 high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
143 start_paddr = (pde & ~0x3fffff) | high_paddr;
fae001f5
WC
144 if (cpu_physical_memory_is_io(start_paddr)) {
145 /* I/O region */
146 continue;
147 }
148 start_vaddr = line_addr;
149 memory_mapping_list_add_merge_sorted(list, start_paddr,
150 start_vaddr, 1 << 22);
151 continue;
152 }
153
154 pte_start_addr = (pde & ~0xfff) & a20_mask;
fdfba1a2 155 walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
fae001f5
WC
156 }
157}
158
159/* PAE Paging */
fdfba1a2 160static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
a8170e5e 161 hwaddr pdpe_start_addr, int32_t a20_mask)
fae001f5 162{
a8170e5e 163 hwaddr pdpe_addr, pde_start_addr;
fae001f5
WC
164 uint64_t pdpe;
165 target_ulong line_addr;
166 int i;
167
168 for (i = 0; i < 4; i++) {
169 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
42874d3a 170 pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
171 if (!(pdpe & PG_PRESENT_MASK)) {
172 /* not present */
173 continue;
174 }
175
176 line_addr = (((unsigned int)i & 0x3) << 30);
177 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
fdfba1a2 178 walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
fae001f5
WC
179 }
180}
181
182#ifdef TARGET_X86_64
183/* IA-32e Paging */
fdfba1a2 184static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
a8170e5e 185 hwaddr pdpe_start_addr, int32_t a20_mask,
fae001f5
WC
186 target_ulong start_line_addr)
187{
a8170e5e 188 hwaddr pdpe_addr, pde_start_addr, start_paddr;
fae001f5
WC
189 uint64_t pdpe;
190 target_ulong line_addr, start_vaddr;
191 int i;
192
193 for (i = 0; i < 512; i++) {
194 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
42874d3a 195 pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
fae001f5
WC
196 if (!(pdpe & PG_PRESENT_MASK)) {
197 /* not present */
198 continue;
199 }
200
201 line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
202 if (pdpe & PG_PSE_MASK) {
203 /* 1 GB page */
204 start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
205 if (cpu_physical_memory_is_io(start_paddr)) {
206 /* I/O region */
207 continue;
208 }
209 start_vaddr = line_addr;
210 memory_mapping_list_add_merge_sorted(list, start_paddr,
211 start_vaddr, 1 << 30);
212 continue;
213 }
214
fbc2ed95 215 pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
fdfba1a2 216 walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
fae001f5
WC
217 }
218}
219
220/* IA-32e Paging */
fdfba1a2 221static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
6c7c3c21
KS
222 hwaddr pml4e_start_addr, int32_t a20_mask,
223 target_ulong start_line_addr)
fae001f5 224{
a8170e5e 225 hwaddr pml4e_addr, pdpe_start_addr;
fae001f5
WC
226 uint64_t pml4e;
227 target_ulong line_addr;
228 int i;
229
230 for (i = 0; i < 512; i++) {
231 pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
42874d3a
PM
232 pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
233 NULL);
fae001f5
WC
234 if (!(pml4e & PG_PRESENT_MASK)) {
235 /* not present */
236 continue;
237 }
238
6c7c3c21 239 line_addr = start_line_addr | ((i & 0x1ffULL) << 39);
fbc2ed95 240 pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
fdfba1a2 241 walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
fae001f5
WC
242 }
243}
6c7c3c21
KS
244
245static void walk_pml5e(MemoryMappingList *list, AddressSpace *as,
246 hwaddr pml5e_start_addr, int32_t a20_mask)
247{
248 hwaddr pml5e_addr, pml4e_start_addr;
249 uint64_t pml5e;
250 target_ulong line_addr;
251 int i;
252
253 for (i = 0; i < 512; i++) {
254 pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask;
255 pml5e = address_space_ldq(as, pml5e_addr, MEMTXATTRS_UNSPECIFIED,
256 NULL);
257 if (!(pml5e & PG_PRESENT_MASK)) {
258 /* not present */
259 continue;
260 }
261
262 line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48);
263 pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask;
264 walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr);
265 }
266}
fae001f5
WC
267#endif
268
a23bbfda
AF
269void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
270 Error **errp)
fae001f5 271{
a23bbfda
AF
272 X86CPU *cpu = X86_CPU(cs);
273 CPUX86State *env = &cpu->env;
c8bc83a4 274 int32_t a20_mask;
a23bbfda
AF
275
276 if (!cpu_paging_enabled(cs)) {
fae001f5 277 /* paging is disabled */
a23bbfda 278 return;
fae001f5
WC
279 }
280
c8bc83a4 281 a20_mask = x86_get_a20_mask(env);
fae001f5
WC
282 if (env->cr[4] & CR4_PAE_MASK) {
283#ifdef TARGET_X86_64
284 if (env->hflags & HF_LMA_MASK) {
6c7c3c21
KS
285 if (env->cr[4] & CR4_LA57_MASK) {
286 hwaddr pml5e_addr;
287
c8bc83a4
PB
288 pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
289 walk_pml5e(list, cs->as, pml5e_addr, a20_mask);
6c7c3c21
KS
290 } else {
291 hwaddr pml4e_addr;
fae001f5 292
c8bc83a4
PB
293 pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
294 walk_pml4e(list, cs->as, pml4e_addr, a20_mask,
6c7c3c21
KS
295 0xffffULL << 48);
296 }
fae001f5
WC
297 } else
298#endif
299 {
a8170e5e 300 hwaddr pdpe_addr;
fae001f5 301
c8bc83a4
PB
302 pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask;
303 walk_pdpe2(list, cs->as, pdpe_addr, a20_mask);
fae001f5
WC
304 }
305 } else {
a8170e5e 306 hwaddr pde_addr;
fae001f5
WC
307 bool pse;
308
c8bc83a4 309 pde_addr = (env->cr[3] & ~0xfff) & a20_mask;
fae001f5 310 pse = !!(env->cr[4] & CR4_PSE_MASK);
c8bc83a4 311 walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
fae001f5 312 }
fae001f5 313}
31a2207a 314