]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/xen/setup.c
x86/xen: cleanup arch/x86/xen/setup.c
[mirror_ubuntu-artful-kernel.git] / arch / x86 / xen / setup.c
CommitLineData
5ead97c8
JF
1/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
a9ce6bc1 11#include <linux/memblock.h>
d91ee586 12#include <linux/cpuidle.h>
48cdd828 13#include <linux/cpufreq.h>
5ead97c8
JF
14
15#include <asm/elf.h>
6c3652ef 16#include <asm/vdso.h>
5ead97c8
JF
17#include <asm/e820.h>
18#include <asm/setup.h>
b792c755 19#include <asm/acpi.h>
8d54db79 20#include <asm/numa.h>
5ead97c8
JF
21#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
45263cb0 24#include <xen/xen.h>
8006ec3e 25#include <xen/page.h>
e2a81baf 26#include <xen/interface/callback.h>
35ae11fd 27#include <xen/interface/memory.h>
5ead97c8
JF
28#include <xen/interface/physdev.h>
29#include <xen/features.h>
5ead97c8 30#include "xen-ops.h"
d2eea68e 31#include "vdso.h"
4fbb67e3 32#include "p2m.h"
1f3ac86b 33#include "mmu.h"
5ead97c8 34
42ee1471 35/* Amount of extra memory space we add to the e820 ranges */
8b5d44a5 36struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42ee1471 37
aa24411b
DV
38/* Number of pages released from the initial allocation. */
39unsigned long xen_released_pages;
40
1f3ac86b
JG
41/*
42 * Buffer used to remap identity mapped pages. We only need the virtual space.
43 * The physical page behind this address is remapped as needed to different
44 * buffer pages.
45 */
46#define REMAP_SIZE (P2M_PER_PAGE - 3)
47static struct {
48 unsigned long next_area_mfn;
49 unsigned long target_pfn;
50 unsigned long size;
51 unsigned long mfns[REMAP_SIZE];
52} xen_remap_buf __initdata __aligned(PAGE_SIZE);
53static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
4fbb67e3 54
698bb8d1
JF
55/*
56 * The maximum amount of extra memory compared to the base size. The
57 * main scaling factor is the size of struct page. At extreme ratios
58 * of base:extra, all the base memory can be filled with page
59 * structures for the extra memory, leaving no space for anything
60 * else.
61 *
62 * 10x seems like a reasonable balance between scaling flexibility and
63 * leaving a practically usable system.
64 */
65#define EXTRA_MEM_RATIO (10)
66
dc91c728 67static void __init xen_add_extra_mem(u64 start, u64 size)
42ee1471 68{
dc91c728 69 int i;
6eaa412f 70
dc91c728
DV
71 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
72 /* Add new region. */
73 if (xen_extra_mem[i].size == 0) {
74 xen_extra_mem[i].start = start;
75 xen_extra_mem[i].size = size;
76 break;
77 }
78 /* Append to existing region. */
79 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
80 xen_extra_mem[i].size += size;
81 break;
82 }
83 }
84 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
85 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
42ee1471 86
d4bbf7e7 87 memblock_reserve(start, size);
5b8e7d80 88}
2f7acb20 89
5b8e7d80
JG
90static void __init xen_del_extra_mem(u64 start, u64 size)
91{
92 int i;
93 u64 start_r, size_r;
c96aae1f 94
5b8e7d80
JG
95 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
96 start_r = xen_extra_mem[i].start;
97 size_r = xen_extra_mem[i].size;
98
99 /* Start of region. */
100 if (start_r == start) {
101 BUG_ON(size > size_r);
102 xen_extra_mem[i].start += size;
103 xen_extra_mem[i].size -= size;
104 break;
105 }
106 /* End of region. */
107 if (start_r + size_r == start + size) {
108 BUG_ON(size > size_r);
109 xen_extra_mem[i].size -= size;
110 break;
111 }
112 /* Mid of region. */
113 if (start > start_r && start < start_r + size_r) {
114 BUG_ON(start + size > start_r + size_r);
115 xen_extra_mem[i].size = start - start_r;
116 /* Calling memblock_reserve() again is okay. */
117 xen_add_extra_mem(start + size, start_r + size_r -
118 (start + size));
119 break;
120 }
121 }
122 memblock_free(start, size);
123}
124
125/*
126 * Called during boot before the p2m list can take entries beyond the
127 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
128 * invalid.
129 */
130unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
131{
132 int i;
e86f9496 133 phys_addr_t addr = PFN_PHYS(pfn);
6eaa412f 134
5b8e7d80
JG
135 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
136 if (addr >= xen_extra_mem[i].start &&
137 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
138 return INVALID_P2M_ENTRY;
139 }
140
141 return IDENTITY_FRAME(pfn);
142}
143
144/*
145 * Mark all pfns of extra mem as invalid in p2m list.
146 */
147void __init xen_inv_extra_mem(void)
148{
149 unsigned long pfn, pfn_s, pfn_e;
150 int i;
151
152 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
9a17ad7f
JG
153 if (!xen_extra_mem[i].size)
154 continue;
5b8e7d80
JG
155 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
156 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
157 for (pfn = pfn_s; pfn < pfn_e; pfn++)
158 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 159 }
42ee1471
JF
160}
161
4fbb67e3
MR
162/*
163 * Finds the next RAM pfn available in the E820 map after min_pfn.
164 * This function updates min_pfn with the pfn found and returns
165 * the size of that range or zero if not found.
166 */
167static unsigned long __init xen_find_pfn_range(
2e2fb754 168 const struct e820entry *list, size_t map_size,
4fbb67e3 169 unsigned long *min_pfn)
2e2fb754
KRW
170{
171 const struct e820entry *entry;
172 unsigned int i;
173 unsigned long done = 0;
2e2fb754
KRW
174
175 for (i = 0, entry = list; i < map_size; i++, entry++) {
2e2fb754
KRW
176 unsigned long s_pfn;
177 unsigned long e_pfn;
2e2fb754
KRW
178
179 if (entry->type != E820_RAM)
180 continue;
181
c3d93f88 182 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754 183
4fbb67e3
MR
184 /* We only care about E820 after this */
185 if (e_pfn < *min_pfn)
2e2fb754
KRW
186 continue;
187
c3d93f88 188 s_pfn = PFN_UP(entry->addr);
4fbb67e3
MR
189
190 /* If min_pfn falls within the E820 entry, we want to start
191 * at the min_pfn PFN.
2e2fb754 192 */
4fbb67e3
MR
193 if (s_pfn <= *min_pfn) {
194 done = e_pfn - *min_pfn;
2e2fb754 195 } else {
4fbb67e3
MR
196 done = e_pfn - s_pfn;
197 *min_pfn = s_pfn;
2e2fb754 198 }
4fbb67e3
MR
199 break;
200 }
2e2fb754 201
4fbb67e3
MR
202 return done;
203}
2e2fb754 204
1f3ac86b
JG
205static int __init xen_free_mfn(unsigned long mfn)
206{
207 struct xen_memory_reservation reservation = {
208 .address_bits = 0,
209 .extent_order = 0,
210 .domid = DOMID_SELF
211 };
212
213 set_xen_guest_handle(reservation.extent_start, &mfn);
214 reservation.nr_extents = 1;
215
216 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
217}
218
4fbb67e3 219/*
1f3ac86b 220 * This releases a chunk of memory and then does the identity map. It's used
4fbb67e3
MR
221 * as a fallback if the remapping fails.
222 */
223static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
bc7142cf 224 unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
4fbb67e3 225{
1f3ac86b
JG
226 unsigned long pfn, end;
227 int ret;
228
4fbb67e3
MR
229 WARN_ON(start_pfn > end_pfn);
230
bc7142cf 231 /* Release pages first. */
1f3ac86b
JG
232 end = min(end_pfn, nr_pages);
233 for (pfn = start_pfn; pfn < end; pfn++) {
234 unsigned long mfn = pfn_to_mfn(pfn);
235
236 /* Make sure pfn exists to start with */
237 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
238 continue;
239
240 ret = xen_free_mfn(mfn);
241 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
242
243 if (ret == 1) {
bc7142cf 244 (*released)++;
1f3ac86b
JG
245 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
246 break;
1f3ac86b
JG
247 } else
248 break;
249 }
250
bc7142cf 251 set_phys_range_identity(start_pfn, end_pfn);
4fbb67e3
MR
252}
253
254/*
1f3ac86b 255 * Helper function to update the p2m and m2p tables and kernel mapping.
4fbb67e3 256 */
1f3ac86b 257static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
4fbb67e3
MR
258{
259 struct mmu_update update = {
260 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
261 .val = pfn
262 };
263
264 /* Update p2m */
1f3ac86b 265 if (!set_phys_to_machine(pfn, mfn)) {
4fbb67e3
MR
266 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
267 pfn, mfn);
1f3ac86b 268 BUG();
2e2fb754 269 }
4fbb67e3
MR
270
271 /* Update m2p */
272 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
273 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
274 mfn, pfn);
1f3ac86b 275 BUG();
4fbb67e3
MR
276 }
277
1f3ac86b 278 /* Update kernel mapping, but not for highmem. */
e86f9496 279 if (pfn >= PFN_UP(__pa(high_memory - 1)))
1f3ac86b
JG
280 return;
281
282 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
283 mfn_pte(mfn, PAGE_KERNEL), 0)) {
284 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
285 mfn, pfn);
286 BUG();
287 }
2e2fb754 288}
83d51ab4 289
4fbb67e3
MR
290/*
291 * This function updates the p2m and m2p tables with an identity map from
1f3ac86b
JG
292 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
293 * original allocation at remap_pfn. The information needed for remapping is
294 * saved in the memory itself to avoid the need for allocating buffers. The
295 * complete remap information is contained in a list of MFNs each containing
296 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
297 * This enables us to preserve the original mfn sequence while doing the
298 * remapping at a time when the memory management is capable of allocating
299 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
300 * its callers.
4fbb67e3 301 */
1f3ac86b 302static void __init xen_do_set_identity_and_remap_chunk(
4fbb67e3 303 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
83d51ab4 304{
1f3ac86b
JG
305 unsigned long buf = (unsigned long)&xen_remap_buf;
306 unsigned long mfn_save, mfn;
4fbb67e3 307 unsigned long ident_pfn_iter, remap_pfn_iter;
1f3ac86b 308 unsigned long ident_end_pfn = start_pfn + size;
4fbb67e3 309 unsigned long left = size;
1f3ac86b 310 unsigned int i, chunk;
4fbb67e3
MR
311
312 WARN_ON(size == 0);
313
314 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
83d51ab4 315
1f3ac86b 316 mfn_save = virt_to_mfn(buf);
e201bfcc 317
1f3ac86b
JG
318 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
319 ident_pfn_iter < ident_end_pfn;
320 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
321 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
4fbb67e3 322
1f3ac86b
JG
323 /* Map first pfn to xen_remap_buf */
324 mfn = pfn_to_mfn(ident_pfn_iter);
325 set_pte_mfn(buf, mfn, PAGE_KERNEL);
4fbb67e3 326
1f3ac86b
JG
327 /* Save mapping information in page */
328 xen_remap_buf.next_area_mfn = xen_remap_mfn;
329 xen_remap_buf.target_pfn = remap_pfn_iter;
330 xen_remap_buf.size = chunk;
331 for (i = 0; i < chunk; i++)
332 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
4fbb67e3 333
1f3ac86b
JG
334 /* Put remap buf into list. */
335 xen_remap_mfn = mfn;
4fbb67e3 336
1f3ac86b 337 /* Set identity map */
bc7142cf 338 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
83d51ab4 339
1f3ac86b 340 left -= chunk;
4fbb67e3 341 }
83d51ab4 342
1f3ac86b
JG
343 /* Restore old xen_remap_buf mapping */
344 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
83d51ab4
DV
345}
346
4fbb67e3
MR
347/*
348 * This function takes a contiguous pfn range that needs to be identity mapped
349 * and:
350 *
351 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
352 * 2) Calls the do_ function to actually do the mapping/remapping work.
353 *
354 * The goal is to not allocate additional memory but to remap the existing
355 * pages. In the case of an error the underlying memory is simply released back
356 * to Xen and not remapped.
357 */
76f0a486 358static unsigned long __init xen_set_identity_and_remap_chunk(
4fbb67e3
MR
359 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
360 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
a97dae1a 361 unsigned long *released, unsigned long *remapped)
4fbb67e3
MR
362{
363 unsigned long pfn;
364 unsigned long i = 0;
365 unsigned long n = end_pfn - start_pfn;
366
367 while (i < n) {
368 unsigned long cur_pfn = start_pfn + i;
369 unsigned long left = n - i;
370 unsigned long size = left;
371 unsigned long remap_range_size;
372
373 /* Do not remap pages beyond the current allocation */
374 if (cur_pfn >= nr_pages) {
375 /* Identity map remaining pages */
bc7142cf 376 set_phys_range_identity(cur_pfn, cur_pfn + size);
4fbb67e3
MR
377 break;
378 }
379 if (cur_pfn + size > nr_pages)
380 size = nr_pages - cur_pfn;
381
382 remap_range_size = xen_find_pfn_range(list, map_size,
383 &remap_pfn);
384 if (!remap_range_size) {
385 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
386 xen_set_identity_and_release_chunk(cur_pfn,
bc7142cf 387 cur_pfn + left, nr_pages, released);
4fbb67e3
MR
388 break;
389 }
390 /* Adjust size to fit in current e820 RAM region */
391 if (size > remap_range_size)
392 size = remap_range_size;
393
1f3ac86b 394 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
4fbb67e3
MR
395
396 /* Update variables to reflect new mappings. */
397 i += size;
398 remap_pfn += size;
a97dae1a 399 *remapped += size;
4fbb67e3
MR
400 }
401
402 /*
403 * If the PFNs are currently mapped, the VA mapping also needs
404 * to be updated to be 1:1.
405 */
406 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
407 (void)HYPERVISOR_update_va_mapping(
408 (unsigned long)__va(pfn << PAGE_SHIFT),
409 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
410
411 return remap_pfn;
412}
413
5b8e7d80 414static void __init xen_set_identity_and_remap(
4fbb67e3 415 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
a97dae1a 416 unsigned long *released, unsigned long *remapped)
093d7b46 417{
f3f436e3 418 phys_addr_t start = 0;
4fbb67e3 419 unsigned long last_pfn = nr_pages;
f3f436e3 420 const struct e820entry *entry;
4fbb67e3 421 unsigned long num_released = 0;
a97dae1a 422 unsigned long num_remapped = 0;
68df0da7
KRW
423 int i;
424
f3f436e3
DV
425 /*
426 * Combine non-RAM regions and gaps until a RAM region (or the
427 * end of the map) is reached, then set the 1:1 map and
4fbb67e3 428 * remap the memory in those non-RAM regions.
f3f436e3
DV
429 *
430 * The combined non-RAM regions are rounded to a whole number
431 * of pages so any partial pages are accessible via the 1:1
432 * mapping. This is needed for some BIOSes that put (for
433 * example) the DMI tables in a reserved region that begins on
434 * a non-page boundary.
435 */
68df0da7 436 for (i = 0, entry = list; i < map_size; i++, entry++) {
f3f436e3 437 phys_addr_t end = entry->addr + entry->size;
f3f436e3
DV
438 if (entry->type == E820_RAM || i == map_size - 1) {
439 unsigned long start_pfn = PFN_DOWN(start);
440 unsigned long end_pfn = PFN_UP(end);
68df0da7 441
f3f436e3
DV
442 if (entry->type == E820_RAM)
443 end_pfn = PFN_UP(entry->addr);
68df0da7 444
83d51ab4 445 if (start_pfn < end_pfn)
4fbb67e3
MR
446 last_pfn = xen_set_identity_and_remap_chunk(
447 list, map_size, start_pfn,
448 end_pfn, nr_pages, last_pfn,
a97dae1a 449 &num_released, &num_remapped);
f3f436e3 450 start = end;
68df0da7 451 }
68df0da7 452 }
f3f436e3 453
4fbb67e3 454 *released = num_released;
a97dae1a 455 *remapped = num_remapped;
f3f436e3 456
4fbb67e3 457 pr_info("Released %ld page(s)\n", num_released);
4fbb67e3 458}
1f3ac86b
JG
459
460/*
461 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
462 * The remap information (which mfn remap to which pfn) is contained in the
463 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
464 * This scheme allows to remap the different chunks in arbitrary order while
465 * the resulting mapping will be independant from the order.
466 */
467void __init xen_remap_memory(void)
468{
469 unsigned long buf = (unsigned long)&xen_remap_buf;
470 unsigned long mfn_save, mfn, pfn;
471 unsigned long remapped = 0;
472 unsigned int i;
473 unsigned long pfn_s = ~0UL;
474 unsigned long len = 0;
475
476 mfn_save = virt_to_mfn(buf);
477
478 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
479 /* Map the remap information */
480 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
481
482 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
483
484 pfn = xen_remap_buf.target_pfn;
485 for (i = 0; i < xen_remap_buf.size; i++) {
486 mfn = xen_remap_buf.mfns[i];
487 xen_update_mem_tables(pfn, mfn);
488 remapped++;
489 pfn++;
490 }
491 if (pfn_s == ~0UL || pfn == pfn_s) {
492 pfn_s = xen_remap_buf.target_pfn;
493 len += xen_remap_buf.size;
494 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
495 len += xen_remap_buf.size;
496 } else {
5b8e7d80 497 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
1f3ac86b
JG
498 pfn_s = xen_remap_buf.target_pfn;
499 len = xen_remap_buf.size;
500 }
501
502 mfn = xen_remap_mfn;
503 xen_remap_mfn = xen_remap_buf.next_area_mfn;
504 }
505
506 if (pfn_s != ~0UL && len)
5b8e7d80 507 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
1f3ac86b
JG
508
509 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
510
511 pr_info("Remapped %ld page(s)\n", remapped);
512}
513
d312ae87
DV
514static unsigned long __init xen_get_max_pages(void)
515{
516 unsigned long max_pages = MAX_DOMAIN_PAGES;
517 domid_t domid = DOMID_SELF;
518 int ret;
519
d3db7281
IC
520 /*
521 * For the initial domain we use the maximum reservation as
522 * the maximum page.
523 *
524 * For guest domains the current maximum reservation reflects
525 * the current maximum rather than the static maximum. In this
526 * case the e820 map provided to us will cover the static
527 * maximum region.
528 */
529 if (xen_initial_domain()) {
530 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
531 if (ret > 0)
532 max_pages = ret;
533 }
534
d312ae87
DV
535 return min(max_pages, MAX_DOMAIN_PAGES);
536}
537
dc91c728
DV
538static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
539{
540 u64 end = start + size;
541
542 /* Align RAM regions to page boundaries. */
543 if (type == E820_RAM) {
544 start = PAGE_ALIGN(start);
545 end &= ~((u64)PAGE_SIZE - 1);
546 }
547
548 e820_add_region(start, end - start, type);
549}
550
3bc38cbc
DV
551void xen_ignore_unusable(struct e820entry *list, size_t map_size)
552{
553 struct e820entry *entry;
554 unsigned int i;
555
556 for (i = 0, entry = list; i < map_size; i++, entry++) {
557 if (entry->type == E820_UNUSABLE)
558 entry->type = E820_RAM;
559 }
560}
561
5ead97c8
JF
562/**
563 * machine_specific_memory_setup - Hook for machine specific memory setup.
564 **/
5ead97c8
JF
565char * __init xen_memory_setup(void)
566{
35ae11fd
IC
567 static struct e820entry map[E820MAX] __initdata;
568
5ead97c8 569 unsigned long max_pfn = xen_start_info->nr_pages;
35ae11fd
IC
570 unsigned long long mem_end;
571 int rc;
572 struct xen_memory_map memmap;
dc91c728 573 unsigned long max_pages;
42ee1471 574 unsigned long extra_pages = 0;
a97dae1a 575 unsigned long remapped_pages;
35ae11fd 576 int i;
9e9a5fcb 577 int op;
5ead97c8 578
8006ec3e 579 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
35ae11fd
IC
580 mem_end = PFN_PHYS(max_pfn);
581
582 memmap.nr_entries = E820MAX;
583 set_xen_guest_handle(memmap.buffer, map);
584
9e9a5fcb
IC
585 op = xen_initial_domain() ?
586 XENMEM_machine_memory_map :
587 XENMEM_memory_map;
588 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 589 if (rc == -ENOSYS) {
9ec23a7f 590 BUG_ON(xen_initial_domain());
35ae11fd
IC
591 memmap.nr_entries = 1;
592 map[0].addr = 0ULL;
593 map[0].size = mem_end;
594 /* 8MB slack (to balance backend allocations). */
595 map[0].size += 8ULL << 20;
596 map[0].type = E820_RAM;
597 rc = 0;
598 }
599 BUG_ON(rc);
1ea644c8 600 BUG_ON(memmap.nr_entries == 0);
8006ec3e 601
3bc38cbc
DV
602 /*
603 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
604 * regions, so if we're using the machine memory map leave the
605 * region as RAM as it is in the pseudo-physical map.
606 *
607 * UNUSABLE regions in domUs are not handled and will need
608 * a patch in the future.
609 */
610 if (xen_initial_domain())
611 xen_ignore_unusable(map, memmap.nr_entries);
612
dc91c728
DV
613 /* Make sure the Xen-supplied memory map is well-ordered. */
614 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
615
616 max_pages = xen_get_max_pages();
617 if (max_pages > max_pfn)
618 extra_pages += max_pages - max_pfn;
619
f3f436e3 620 /*
1f3ac86b
JG
621 * Set identity map on non-RAM pages and prepare remapping the
622 * underlying RAM.
f3f436e3 623 */
5b8e7d80 624 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
a97dae1a 625 &xen_released_pages, &remapped_pages);
2e2fb754 626
58b7b53a 627 extra_pages += xen_released_pages;
a97dae1a 628 extra_pages += remapped_pages;
2e2fb754 629
dc91c728
DV
630 /*
631 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
632 * factor the base size. On non-highmem systems, the base
633 * size is the full initial memory allocation; on highmem it
634 * is limited to the max size of lowmem, so that it doesn't
635 * get completely filled.
636 *
637 * In principle there could be a problem in lowmem systems if
638 * the initial memory is also very large with respect to
639 * lowmem, but we won't try to deal with that here.
640 */
641 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
642 extra_pages);
dc91c728
DV
643 i = 0;
644 while (i < memmap.nr_entries) {
645 u64 addr = map[i].addr;
646 u64 size = map[i].size;
647 u32 type = map[i].type;
648
649 if (type == E820_RAM) {
650 if (addr < mem_end) {
651 size = min(size, mem_end - addr);
652 } else if (extra_pages) {
653 size = min(size, (u64)extra_pages * PAGE_SIZE);
654 extra_pages -= size / PAGE_SIZE;
655 xen_add_extra_mem(addr, size);
5b8e7d80 656 xen_max_p2m_pfn = PFN_DOWN(addr + size);
dc91c728
DV
657 } else
658 type = E820_UNUSABLE;
3654581e
JF
659 }
660
dc91c728 661 xen_align_and_add_e820_region(addr, size, type);
b5b43ced 662
dc91c728
DV
663 map[i].addr += size;
664 map[i].size -= size;
665 if (map[i].size == 0)
666 i++;
35ae11fd 667 }
b792c755 668
25b884a8
DV
669 /*
670 * Set the rest as identity mapped, in case PCI BARs are
671 * located here.
672 *
673 * PFNs above MAX_P2M_PFN are considered identity mapped as
674 * well.
675 */
676 set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
677
b792c755 678 /*
9ec23a7f
IC
679 * In domU, the ISA region is normal, usable memory, but we
680 * reserve ISA memory anyway because too many things poke
b792c755
JF
681 * about in there.
682 */
683 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
684 E820_RESERVED);
5ead97c8 685
be5bf9fa
JF
686 /*
687 * Reserve Xen bits:
688 * - mfn_list
689 * - xen_start_info
690 * See comment above "struct start_info" in <xen/interface/xen.h>
51faaf2b
KRW
691 * We tried to make the the memblock_reserve more selective so
692 * that it would be clear what region is reserved. Sadly we ran
693 * in the problem wherein on a 64-bit hypervisor with a 32-bit
694 * initial domain, the pt_base has the cr3 value which is not
695 * neccessarily where the pagetable starts! As Jan put it: "
696 * Actually, the adjustment turns out to be correct: The page
697 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
698 * "first L2", "first L3", so the offset to the page table base is
699 * indeed 2. When reading xen/include/public/xen.h's comment
700 * very strictly, this is not a violation (since there nothing is said
701 * that the first thing in the page table space is pointed to by
702 * pt_base; I admit that this seems to be implied though, namely
703 * do I think that it is implied that the page table space is the
704 * range [pt_base, pt_base + nt_pt_frames), whereas that
705 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
706 * which - without a priori knowledge - the kernel would have
707 * difficulty to figure out)." - so lets just fall back to the
708 * easy way and reserve the whole region.
be5bf9fa 709 */
24aa0788
TH
710 memblock_reserve(__pa(xen_start_info->mfn_list),
711 xen_start_info->pt_base - xen_start_info->mfn_list);
be5bf9fa
JF
712
713 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
714
5ead97c8
JF
715 return "Xen";
716}
717
abacaadc
DV
718/*
719 * Machine specific memory setup for auto-translated guests.
720 */
721char * __init xen_auto_xlated_memory_setup(void)
722{
723 static struct e820entry map[E820MAX] __initdata;
724
725 struct xen_memory_map memmap;
726 int i;
727 int rc;
728
729 memmap.nr_entries = E820MAX;
730 set_xen_guest_handle(memmap.buffer, map);
731
732 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
733 if (rc < 0)
734 panic("No memory map (%d)\n", rc);
735
736 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
737
738 for (i = 0; i < memmap.nr_entries; i++)
739 e820_add_region(map[i].addr, map[i].size, map[i].type);
740
741 memblock_reserve(__pa(xen_start_info->mfn_list),
742 xen_start_info->pt_base - xen_start_info->mfn_list);
743
744 return "Xen";
745}
746
d2eea68e
RM
747/*
748 * Set the bit indicating "nosegneg" library variants should be used.
6a52e4b1
JF
749 * We only need to bother in pure 32-bit mode; compat 32-bit processes
750 * can have un-truncated segments, so wrapping around is allowed.
d2eea68e 751 */
08b6d290 752static void __init fiddle_vdso(void)
d2eea68e 753{
6a52e4b1 754#ifdef CONFIG_X86_32
6f121e54
AL
755 /*
756 * This could be called before selected_vdso32 is initialized, so
757 * just fiddle with both possible images. vdso_image_32_syscall
758 * can't be selected, since it only exists on 64-bit systems.
759 */
6a52e4b1 760 u32 *mask;
6f121e54
AL
761 mask = vdso_image_32_int80.data +
762 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
6a52e4b1 763 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6f121e54
AL
764 mask = vdso_image_32_sysenter.data +
765 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
d2eea68e 766 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6fcac6d3 767#endif
d2eea68e
RM
768}
769
148f9bb8 770static int register_callback(unsigned type, const void *func)
e2a81baf 771{
88459d4c
JF
772 struct callback_register callback = {
773 .type = type,
774 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
775 .flags = CALLBACKF_mask_events,
776 };
777
88459d4c
JF
778 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
779}
780
148f9bb8 781void xen_enable_sysenter(void)
88459d4c 782{
6fcac6d3 783 int ret;
62541c37 784 unsigned sysenter_feature;
6fcac6d3
JF
785
786#ifdef CONFIG_X86_32
62541c37 787 sysenter_feature = X86_FEATURE_SEP;
6fcac6d3 788#else
62541c37 789 sysenter_feature = X86_FEATURE_SYSENTER32;
6fcac6d3 790#endif
88459d4c 791
62541c37
JF
792 if (!boot_cpu_has(sysenter_feature))
793 return;
794
6fcac6d3 795 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
62541c37
JF
796 if(ret != 0)
797 setup_clear_cpu_cap(sysenter_feature);
e2a81baf
JF
798}
799
148f9bb8 800void xen_enable_syscall(void)
6fcac6d3
JF
801{
802#ifdef CONFIG_X86_64
6fcac6d3 803 int ret;
6fcac6d3
JF
804
805 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
806 if (ret != 0) {
d5303b81 807 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
808 /* Pretty fatal; 64-bit userspace has no other
809 mechanism for syscalls. */
810 }
811
812 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
6fcac6d3
JF
813 ret = register_callback(CALLBACKTYPE_syscall32,
814 xen_syscall32_target);
d5303b81 815 if (ret != 0)
62541c37 816 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3
JF
817 }
818#endif /* CONFIG_X86_64 */
819}
ea9f9274 820
d285d683 821void __init xen_pvmmu_arch_setup(void)
5ead97c8 822{
5ead97c8
JF
823 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
824 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
825
d285d683
MR
826 HYPERVISOR_vm_assist(VMASST_CMD_enable,
827 VMASST_TYPE_pae_extended_cr3);
5ead97c8 828
88459d4c
JF
829 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
830 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
831 BUG();
5ead97c8 832
e2a81baf 833 xen_enable_sysenter();
6fcac6d3 834 xen_enable_syscall();
d285d683
MR
835}
836
837/* This function is not called for HVM domains */
838void __init xen_arch_setup(void)
839{
840 xen_panic_handler_init();
841 if (!xen_feature(XENFEAT_auto_translated_physmap))
842 xen_pvmmu_arch_setup();
843
5ead97c8
JF
844#ifdef CONFIG_ACPI
845 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
846 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
847 disable_acpi();
848 }
849#endif
850
851 memcpy(boot_command_line, xen_start_info->cmd_line,
852 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
853 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
854
bc15fde7 855 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 856 disable_cpuidle();
48cdd828 857 disable_cpufreq();
6a377ddc 858 WARN_ON(xen_set_default_idle());
d2eea68e 859 fiddle_vdso();
8d54db79
KRW
860#ifdef CONFIG_NUMA
861 numa_off = 1;
862#endif
5ead97c8 863}