]> git.proxmox.com Git - qemu.git/blob - target-i386/arch_memory_mapping.c
Merge remote-tracking branch 'qmp/queue/qmp' into staging
[qemu.git] / target-i386 / arch_memory_mapping.c
1 /*
2 * i386 memory mapping
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "cpu.h"
15 #include "cpu-all.h"
16
17 /* PAE Paging or IA-32e Paging */
18 static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
19 int32_t a20_mask, target_ulong start_line_addr)
20 {
21 target_phys_addr_t pte_addr, start_paddr;
22 uint64_t pte;
23 target_ulong start_vaddr;
24 int i;
25
26 for (i = 0; i < 512; i++) {
27 pte_addr = (pte_start_addr + i * 8) & a20_mask;
28 pte = ldq_phys(pte_addr);
29 if (!(pte & PG_PRESENT_MASK)) {
30 /* not present */
31 continue;
32 }
33
34 start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
35 if (cpu_physical_memory_is_io(start_paddr)) {
36 /* I/O region */
37 continue;
38 }
39
40 start_vaddr = start_line_addr | ((i & 0x1fff) << 12);
41 memory_mapping_list_add_merge_sorted(list, start_paddr,
42 start_vaddr, 1 << 12);
43 }
44 }
45
46 /* 32-bit Paging */
47 static void walk_pte2(MemoryMappingList *list,
48 target_phys_addr_t pte_start_addr, int32_t a20_mask,
49 target_ulong start_line_addr)
50 {
51 target_phys_addr_t pte_addr, start_paddr;
52 uint32_t pte;
53 target_ulong start_vaddr;
54 int i;
55
56 for (i = 0; i < 1024; i++) {
57 pte_addr = (pte_start_addr + i * 4) & a20_mask;
58 pte = ldl_phys(pte_addr);
59 if (!(pte & PG_PRESENT_MASK)) {
60 /* not present */
61 continue;
62 }
63
64 start_paddr = pte & ~0xfff;
65 if (cpu_physical_memory_is_io(start_paddr)) {
66 /* I/O region */
67 continue;
68 }
69
70 start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
71 memory_mapping_list_add_merge_sorted(list, start_paddr,
72 start_vaddr, 1 << 12);
73 }
74 }
75
76 /* PAE Paging or IA-32e Paging */
77 static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
78 int32_t a20_mask, target_ulong start_line_addr)
79 {
80 target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
81 uint64_t pde;
82 target_ulong line_addr, start_vaddr;
83 int i;
84
85 for (i = 0; i < 512; i++) {
86 pde_addr = (pde_start_addr + i * 8) & a20_mask;
87 pde = ldq_phys(pde_addr);
88 if (!(pde & PG_PRESENT_MASK)) {
89 /* not present */
90 continue;
91 }
92
93 line_addr = start_line_addr | ((i & 0x1ff) << 21);
94 if (pde & PG_PSE_MASK) {
95 /* 2 MB page */
96 start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
97 if (cpu_physical_memory_is_io(start_paddr)) {
98 /* I/O region */
99 continue;
100 }
101 start_vaddr = line_addr;
102 memory_mapping_list_add_merge_sorted(list, start_paddr,
103 start_vaddr, 1 << 21);
104 continue;
105 }
106
107 pte_start_addr = (pde & ~0xfff) & a20_mask;
108 walk_pte(list, pte_start_addr, a20_mask, line_addr);
109 }
110 }
111
112 /* 32-bit Paging */
113 static void walk_pde2(MemoryMappingList *list,
114 target_phys_addr_t pde_start_addr, int32_t a20_mask,
115 bool pse)
116 {
117 target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
118 uint32_t pde;
119 target_ulong line_addr, start_vaddr;
120 int i;
121
122 for (i = 0; i < 1024; i++) {
123 pde_addr = (pde_start_addr + i * 4) & a20_mask;
124 pde = ldl_phys(pde_addr);
125 if (!(pde & PG_PRESENT_MASK)) {
126 /* not present */
127 continue;
128 }
129
130 line_addr = (((unsigned int)i & 0x3ff) << 22);
131 if ((pde & PG_PSE_MASK) && pse) {
132 /* 4 MB page */
133 start_paddr = (pde & ~0x3fffff) | ((pde & 0x1fe000) << 19);
134 if (cpu_physical_memory_is_io(start_paddr)) {
135 /* I/O region */
136 continue;
137 }
138 start_vaddr = line_addr;
139 memory_mapping_list_add_merge_sorted(list, start_paddr,
140 start_vaddr, 1 << 22);
141 continue;
142 }
143
144 pte_start_addr = (pde & ~0xfff) & a20_mask;
145 walk_pte2(list, pte_start_addr, a20_mask, line_addr);
146 }
147 }
148
149 /* PAE Paging */
150 static void walk_pdpe2(MemoryMappingList *list,
151 target_phys_addr_t pdpe_start_addr, int32_t a20_mask)
152 {
153 target_phys_addr_t pdpe_addr, pde_start_addr;
154 uint64_t pdpe;
155 target_ulong line_addr;
156 int i;
157
158 for (i = 0; i < 4; i++) {
159 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
160 pdpe = ldq_phys(pdpe_addr);
161 if (!(pdpe & PG_PRESENT_MASK)) {
162 /* not present */
163 continue;
164 }
165
166 line_addr = (((unsigned int)i & 0x3) << 30);
167 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
168 walk_pde(list, pde_start_addr, a20_mask, line_addr);
169 }
170 }
171
172 #ifdef TARGET_X86_64
173 /* IA-32e Paging */
174 static void walk_pdpe(MemoryMappingList *list,
175 target_phys_addr_t pdpe_start_addr, int32_t a20_mask,
176 target_ulong start_line_addr)
177 {
178 target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr;
179 uint64_t pdpe;
180 target_ulong line_addr, start_vaddr;
181 int i;
182
183 for (i = 0; i < 512; i++) {
184 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
185 pdpe = ldq_phys(pdpe_addr);
186 if (!(pdpe & PG_PRESENT_MASK)) {
187 /* not present */
188 continue;
189 }
190
191 line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
192 if (pdpe & PG_PSE_MASK) {
193 /* 1 GB page */
194 start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
195 if (cpu_physical_memory_is_io(start_paddr)) {
196 /* I/O region */
197 continue;
198 }
199 start_vaddr = line_addr;
200 memory_mapping_list_add_merge_sorted(list, start_paddr,
201 start_vaddr, 1 << 30);
202 continue;
203 }
204
205 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
206 walk_pde(list, pde_start_addr, a20_mask, line_addr);
207 }
208 }
209
210 /* IA-32e Paging */
211 static void walk_pml4e(MemoryMappingList *list,
212 target_phys_addr_t pml4e_start_addr, int32_t a20_mask)
213 {
214 target_phys_addr_t pml4e_addr, pdpe_start_addr;
215 uint64_t pml4e;
216 target_ulong line_addr;
217 int i;
218
219 for (i = 0; i < 512; i++) {
220 pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
221 pml4e = ldq_phys(pml4e_addr);
222 if (!(pml4e & PG_PRESENT_MASK)) {
223 /* not present */
224 continue;
225 }
226
227 line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
228 pdpe_start_addr = (pml4e & ~0xfff) & a20_mask;
229 walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr);
230 }
231 }
232 #endif
233
234 int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
235 {
236 if (!cpu_paging_enabled(env)) {
237 /* paging is disabled */
238 return 0;
239 }
240
241 if (env->cr[4] & CR4_PAE_MASK) {
242 #ifdef TARGET_X86_64
243 if (env->hflags & HF_LMA_MASK) {
244 target_phys_addr_t pml4e_addr;
245
246 pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
247 walk_pml4e(list, pml4e_addr, env->a20_mask);
248 } else
249 #endif
250 {
251 target_phys_addr_t pdpe_addr;
252
253 pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
254 walk_pdpe2(list, pdpe_addr, env->a20_mask);
255 }
256 } else {
257 target_phys_addr_t pde_addr;
258 bool pse;
259
260 pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
261 pse = !!(env->cr[4] & CR4_PSE_MASK);
262 walk_pde2(list, pde_addr, env->a20_mask, pse);
263 }
264
265 return 0;
266 }
267
268 bool cpu_paging_enabled(CPUArchState *env)
269 {
270 return env->cr[0] & CR0_PG_MASK;
271 }