]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/xen/setup.c
xen: move p2m list if conflicting with e820 map
[mirror_ubuntu-artful-kernel.git] / arch / x86 / xen / setup.c
CommitLineData
5ead97c8
JF
1/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
a9ce6bc1 11#include <linux/memblock.h>
d91ee586 12#include <linux/cpuidle.h>
48cdd828 13#include <linux/cpufreq.h>
5ead97c8
JF
14
15#include <asm/elf.h>
6c3652ef 16#include <asm/vdso.h>
5ead97c8
JF
17#include <asm/e820.h>
18#include <asm/setup.h>
b792c755 19#include <asm/acpi.h>
8d54db79 20#include <asm/numa.h>
5ead97c8
JF
21#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
45263cb0 24#include <xen/xen.h>
8006ec3e 25#include <xen/page.h>
e2a81baf 26#include <xen/interface/callback.h>
35ae11fd 27#include <xen/interface/memory.h>
5ead97c8
JF
28#include <xen/interface/physdev.h>
29#include <xen/features.h>
808fdb71 30#include <xen/hvc-console.h>
5ead97c8 31#include "xen-ops.h"
d2eea68e 32#include "vdso.h"
4fbb67e3 33#include "p2m.h"
1f3ac86b 34#include "mmu.h"
5ead97c8 35
42ee1471 36/* Amount of extra memory space we add to the e820 ranges */
8b5d44a5 37struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42ee1471 38
aa24411b
DV
39/* Number of pages released from the initial allocation. */
40unsigned long xen_released_pages;
41
69632ecf
JG
42/* E820 map used during setting up memory. */
43static struct e820entry xen_e820_map[E820MAX] __initdata;
44static u32 xen_e820_map_entries __initdata;
45
1f3ac86b
JG
46/*
47 * Buffer used to remap identity mapped pages. We only need the virtual space.
48 * The physical page behind this address is remapped as needed to different
49 * buffer pages.
50 */
51#define REMAP_SIZE (P2M_PER_PAGE - 3)
52static struct {
53 unsigned long next_area_mfn;
54 unsigned long target_pfn;
55 unsigned long size;
56 unsigned long mfns[REMAP_SIZE];
57} xen_remap_buf __initdata __aligned(PAGE_SIZE);
58static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
4fbb67e3 59
698bb8d1
JF
60/*
61 * The maximum amount of extra memory compared to the base size. The
62 * main scaling factor is the size of struct page. At extreme ratios
63 * of base:extra, all the base memory can be filled with page
64 * structures for the extra memory, leaving no space for anything
65 * else.
66 *
67 * 10x seems like a reasonable balance between scaling flexibility and
68 * leaving a practically usable system.
69 */
70#define EXTRA_MEM_RATIO (10)
71
3ba5c867 72static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
42ee1471 73{
dc91c728 74 int i;
6eaa412f 75
dc91c728
DV
76 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
77 /* Add new region. */
78 if (xen_extra_mem[i].size == 0) {
79 xen_extra_mem[i].start = start;
80 xen_extra_mem[i].size = size;
81 break;
82 }
83 /* Append to existing region. */
84 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
85 xen_extra_mem[i].size += size;
86 break;
87 }
88 }
89 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
90 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
42ee1471 91
d4bbf7e7 92 memblock_reserve(start, size);
5b8e7d80 93}
2f7acb20 94
3ba5c867 95static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
5b8e7d80
JG
96{
97 int i;
3ba5c867 98 phys_addr_t start_r, size_r;
c96aae1f 99
5b8e7d80
JG
100 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
101 start_r = xen_extra_mem[i].start;
102 size_r = xen_extra_mem[i].size;
103
104 /* Start of region. */
105 if (start_r == start) {
106 BUG_ON(size > size_r);
107 xen_extra_mem[i].start += size;
108 xen_extra_mem[i].size -= size;
109 break;
110 }
111 /* End of region. */
112 if (start_r + size_r == start + size) {
113 BUG_ON(size > size_r);
114 xen_extra_mem[i].size -= size;
115 break;
116 }
117 /* Mid of region. */
118 if (start > start_r && start < start_r + size_r) {
119 BUG_ON(start + size > start_r + size_r);
120 xen_extra_mem[i].size = start - start_r;
121 /* Calling memblock_reserve() again is okay. */
122 xen_add_extra_mem(start + size, start_r + size_r -
123 (start + size));
124 break;
125 }
126 }
127 memblock_free(start, size);
128}
129
130/*
131 * Called during boot before the p2m list can take entries beyond the
132 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
133 * invalid.
134 */
135unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
136{
137 int i;
e86f9496 138 phys_addr_t addr = PFN_PHYS(pfn);
6eaa412f 139
5b8e7d80
JG
140 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
141 if (addr >= xen_extra_mem[i].start &&
142 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
143 return INVALID_P2M_ENTRY;
144 }
145
146 return IDENTITY_FRAME(pfn);
147}
148
149/*
150 * Mark all pfns of extra mem as invalid in p2m list.
151 */
152void __init xen_inv_extra_mem(void)
153{
154 unsigned long pfn, pfn_s, pfn_e;
155 int i;
156
157 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
9a17ad7f
JG
158 if (!xen_extra_mem[i].size)
159 continue;
5b8e7d80
JG
160 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
161 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
162 for (pfn = pfn_s; pfn < pfn_e; pfn++)
163 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 164 }
42ee1471
JF
165}
166
4fbb67e3
MR
167/*
168 * Finds the next RAM pfn available in the E820 map after min_pfn.
169 * This function updates min_pfn with the pfn found and returns
170 * the size of that range or zero if not found.
171 */
69632ecf 172static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
2e2fb754 173{
69632ecf 174 const struct e820entry *entry = xen_e820_map;
2e2fb754
KRW
175 unsigned int i;
176 unsigned long done = 0;
2e2fb754 177
69632ecf 178 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
2e2fb754
KRW
179 unsigned long s_pfn;
180 unsigned long e_pfn;
2e2fb754
KRW
181
182 if (entry->type != E820_RAM)
183 continue;
184
c3d93f88 185 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754 186
4fbb67e3
MR
187 /* We only care about E820 after this */
188 if (e_pfn < *min_pfn)
2e2fb754
KRW
189 continue;
190
c3d93f88 191 s_pfn = PFN_UP(entry->addr);
4fbb67e3
MR
192
193 /* If min_pfn falls within the E820 entry, we want to start
194 * at the min_pfn PFN.
2e2fb754 195 */
4fbb67e3
MR
196 if (s_pfn <= *min_pfn) {
197 done = e_pfn - *min_pfn;
2e2fb754 198 } else {
4fbb67e3
MR
199 done = e_pfn - s_pfn;
200 *min_pfn = s_pfn;
2e2fb754 201 }
4fbb67e3
MR
202 break;
203 }
2e2fb754 204
4fbb67e3
MR
205 return done;
206}
2e2fb754 207
1f3ac86b
JG
208static int __init xen_free_mfn(unsigned long mfn)
209{
210 struct xen_memory_reservation reservation = {
211 .address_bits = 0,
212 .extent_order = 0,
213 .domid = DOMID_SELF
214 };
215
216 set_xen_guest_handle(reservation.extent_start, &mfn);
217 reservation.nr_extents = 1;
218
219 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
220}
221
4fbb67e3 222/*
1f3ac86b 223 * This releases a chunk of memory and then does the identity map. It's used
4fbb67e3
MR
224 * as a fallback if the remapping fails.
225 */
226static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
5097cdf6 227 unsigned long end_pfn, unsigned long nr_pages)
4fbb67e3 228{
1f3ac86b
JG
229 unsigned long pfn, end;
230 int ret;
231
4fbb67e3
MR
232 WARN_ON(start_pfn > end_pfn);
233
bc7142cf 234 /* Release pages first. */
1f3ac86b
JG
235 end = min(end_pfn, nr_pages);
236 for (pfn = start_pfn; pfn < end; pfn++) {
237 unsigned long mfn = pfn_to_mfn(pfn);
238
239 /* Make sure pfn exists to start with */
240 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
241 continue;
242
243 ret = xen_free_mfn(mfn);
244 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
245
246 if (ret == 1) {
5097cdf6 247 xen_released_pages++;
1f3ac86b
JG
248 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
249 break;
1f3ac86b
JG
250 } else
251 break;
252 }
253
bc7142cf 254 set_phys_range_identity(start_pfn, end_pfn);
4fbb67e3
MR
255}
256
257/*
1f3ac86b 258 * Helper function to update the p2m and m2p tables and kernel mapping.
4fbb67e3 259 */
1f3ac86b 260static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
4fbb67e3
MR
261{
262 struct mmu_update update = {
3ba5c867 263 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
4fbb67e3
MR
264 .val = pfn
265 };
266
267 /* Update p2m */
1f3ac86b 268 if (!set_phys_to_machine(pfn, mfn)) {
4fbb67e3
MR
269 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
270 pfn, mfn);
1f3ac86b 271 BUG();
2e2fb754 272 }
4fbb67e3
MR
273
274 /* Update m2p */
275 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
276 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
277 mfn, pfn);
1f3ac86b 278 BUG();
4fbb67e3
MR
279 }
280
1f3ac86b 281 /* Update kernel mapping, but not for highmem. */
e86f9496 282 if (pfn >= PFN_UP(__pa(high_memory - 1)))
1f3ac86b
JG
283 return;
284
285 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
286 mfn_pte(mfn, PAGE_KERNEL), 0)) {
287 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
288 mfn, pfn);
289 BUG();
290 }
2e2fb754 291}
83d51ab4 292
4fbb67e3
MR
293/*
294 * This function updates the p2m and m2p tables with an identity map from
1f3ac86b
JG
295 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
296 * original allocation at remap_pfn. The information needed for remapping is
297 * saved in the memory itself to avoid the need for allocating buffers. The
298 * complete remap information is contained in a list of MFNs each containing
299 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
300 * This enables us to preserve the original mfn sequence while doing the
301 * remapping at a time when the memory management is capable of allocating
302 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
303 * its callers.
4fbb67e3 304 */
1f3ac86b 305static void __init xen_do_set_identity_and_remap_chunk(
4fbb67e3 306 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
83d51ab4 307{
1f3ac86b
JG
308 unsigned long buf = (unsigned long)&xen_remap_buf;
309 unsigned long mfn_save, mfn;
4fbb67e3 310 unsigned long ident_pfn_iter, remap_pfn_iter;
1f3ac86b 311 unsigned long ident_end_pfn = start_pfn + size;
4fbb67e3 312 unsigned long left = size;
1f3ac86b 313 unsigned int i, chunk;
4fbb67e3
MR
314
315 WARN_ON(size == 0);
316
317 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
83d51ab4 318
1f3ac86b 319 mfn_save = virt_to_mfn(buf);
e201bfcc 320
1f3ac86b
JG
321 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
322 ident_pfn_iter < ident_end_pfn;
323 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
324 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
4fbb67e3 325
1f3ac86b
JG
326 /* Map first pfn to xen_remap_buf */
327 mfn = pfn_to_mfn(ident_pfn_iter);
328 set_pte_mfn(buf, mfn, PAGE_KERNEL);
4fbb67e3 329
1f3ac86b
JG
330 /* Save mapping information in page */
331 xen_remap_buf.next_area_mfn = xen_remap_mfn;
332 xen_remap_buf.target_pfn = remap_pfn_iter;
333 xen_remap_buf.size = chunk;
334 for (i = 0; i < chunk; i++)
335 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
4fbb67e3 336
1f3ac86b
JG
337 /* Put remap buf into list. */
338 xen_remap_mfn = mfn;
4fbb67e3 339
1f3ac86b 340 /* Set identity map */
bc7142cf 341 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
83d51ab4 342
1f3ac86b 343 left -= chunk;
4fbb67e3 344 }
83d51ab4 345
1f3ac86b
JG
346 /* Restore old xen_remap_buf mapping */
347 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
83d51ab4
DV
348}
349
4fbb67e3
MR
350/*
351 * This function takes a contiguous pfn range that needs to be identity mapped
352 * and:
353 *
354 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
355 * 2) Calls the do_ function to actually do the mapping/remapping work.
356 *
357 * The goal is to not allocate additional memory but to remap the existing
358 * pages. In the case of an error the underlying memory is simply released back
359 * to Xen and not remapped.
360 */
76f0a486 361static unsigned long __init xen_set_identity_and_remap_chunk(
69632ecf 362 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
5097cdf6 363 unsigned long remap_pfn)
4fbb67e3
MR
364{
365 unsigned long pfn;
366 unsigned long i = 0;
367 unsigned long n = end_pfn - start_pfn;
368
369 while (i < n) {
370 unsigned long cur_pfn = start_pfn + i;
371 unsigned long left = n - i;
372 unsigned long size = left;
373 unsigned long remap_range_size;
374
375 /* Do not remap pages beyond the current allocation */
376 if (cur_pfn >= nr_pages) {
377 /* Identity map remaining pages */
bc7142cf 378 set_phys_range_identity(cur_pfn, cur_pfn + size);
4fbb67e3
MR
379 break;
380 }
381 if (cur_pfn + size > nr_pages)
382 size = nr_pages - cur_pfn;
383
69632ecf 384 remap_range_size = xen_find_pfn_range(&remap_pfn);
4fbb67e3
MR
385 if (!remap_range_size) {
386 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
387 xen_set_identity_and_release_chunk(cur_pfn,
5097cdf6 388 cur_pfn + left, nr_pages);
4fbb67e3
MR
389 break;
390 }
391 /* Adjust size to fit in current e820 RAM region */
392 if (size > remap_range_size)
393 size = remap_range_size;
394
1f3ac86b 395 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
4fbb67e3
MR
396
397 /* Update variables to reflect new mappings. */
398 i += size;
399 remap_pfn += size;
4fbb67e3
MR
400 }
401
402 /*
403 * If the PFNs are currently mapped, the VA mapping also needs
404 * to be updated to be 1:1.
405 */
406 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
407 (void)HYPERVISOR_update_va_mapping(
408 (unsigned long)__va(pfn << PAGE_SHIFT),
409 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
410
411 return remap_pfn;
412}
413
5097cdf6 414static void __init xen_set_identity_and_remap(unsigned long nr_pages)
093d7b46 415{
f3f436e3 416 phys_addr_t start = 0;
4fbb67e3 417 unsigned long last_pfn = nr_pages;
69632ecf 418 const struct e820entry *entry = xen_e820_map;
68df0da7
KRW
419 int i;
420
f3f436e3
DV
421 /*
422 * Combine non-RAM regions and gaps until a RAM region (or the
423 * end of the map) is reached, then set the 1:1 map and
4fbb67e3 424 * remap the memory in those non-RAM regions.
f3f436e3
DV
425 *
426 * The combined non-RAM regions are rounded to a whole number
427 * of pages so any partial pages are accessible via the 1:1
428 * mapping. This is needed for some BIOSes that put (for
429 * example) the DMI tables in a reserved region that begins on
430 * a non-page boundary.
431 */
69632ecf 432 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
f3f436e3 433 phys_addr_t end = entry->addr + entry->size;
69632ecf 434 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
f3f436e3
DV
435 unsigned long start_pfn = PFN_DOWN(start);
436 unsigned long end_pfn = PFN_UP(end);
68df0da7 437
f3f436e3
DV
438 if (entry->type == E820_RAM)
439 end_pfn = PFN_UP(entry->addr);
68df0da7 440
83d51ab4 441 if (start_pfn < end_pfn)
4fbb67e3 442 last_pfn = xen_set_identity_and_remap_chunk(
69632ecf 443 start_pfn, end_pfn, nr_pages,
5097cdf6 444 last_pfn);
f3f436e3 445 start = end;
68df0da7 446 }
68df0da7 447 }
f3f436e3 448
5097cdf6 449 pr_info("Released %ld page(s)\n", xen_released_pages);
4fbb67e3 450}
1f3ac86b
JG
451
452/*
453 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
454 * The remap information (which mfn remap to which pfn) is contained in the
455 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
456 * This scheme allows to remap the different chunks in arbitrary order while
457 * the resulting mapping will be independant from the order.
458 */
459void __init xen_remap_memory(void)
460{
461 unsigned long buf = (unsigned long)&xen_remap_buf;
462 unsigned long mfn_save, mfn, pfn;
463 unsigned long remapped = 0;
464 unsigned int i;
465 unsigned long pfn_s = ~0UL;
466 unsigned long len = 0;
467
468 mfn_save = virt_to_mfn(buf);
469
470 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
471 /* Map the remap information */
472 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
473
474 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
475
476 pfn = xen_remap_buf.target_pfn;
477 for (i = 0; i < xen_remap_buf.size; i++) {
478 mfn = xen_remap_buf.mfns[i];
479 xen_update_mem_tables(pfn, mfn);
480 remapped++;
481 pfn++;
482 }
483 if (pfn_s == ~0UL || pfn == pfn_s) {
484 pfn_s = xen_remap_buf.target_pfn;
485 len += xen_remap_buf.size;
486 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
487 len += xen_remap_buf.size;
488 } else {
5b8e7d80 489 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
1f3ac86b
JG
490 pfn_s = xen_remap_buf.target_pfn;
491 len = xen_remap_buf.size;
492 }
493
494 mfn = xen_remap_mfn;
495 xen_remap_mfn = xen_remap_buf.next_area_mfn;
496 }
497
498 if (pfn_s != ~0UL && len)
5b8e7d80 499 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
1f3ac86b
JG
500
501 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
502
503 pr_info("Remapped %ld page(s)\n", remapped);
504}
505
d312ae87
DV
506static unsigned long __init xen_get_max_pages(void)
507{
508 unsigned long max_pages = MAX_DOMAIN_PAGES;
509 domid_t domid = DOMID_SELF;
510 int ret;
511
d3db7281
IC
512 /*
513 * For the initial domain we use the maximum reservation as
514 * the maximum page.
515 *
516 * For guest domains the current maximum reservation reflects
517 * the current maximum rather than the static maximum. In this
518 * case the e820 map provided to us will cover the static
519 * maximum region.
520 */
521 if (xen_initial_domain()) {
522 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
523 if (ret > 0)
524 max_pages = ret;
525 }
526
d312ae87
DV
527 return min(max_pages, MAX_DOMAIN_PAGES);
528}
529
a3f52396
JG
530static void __init xen_align_and_add_e820_region(phys_addr_t start,
531 phys_addr_t size, int type)
dc91c728 532{
3ba5c867 533 phys_addr_t end = start + size;
dc91c728
DV
534
535 /* Align RAM regions to page boundaries. */
536 if (type == E820_RAM) {
537 start = PAGE_ALIGN(start);
3ba5c867 538 end &= ~((phys_addr_t)PAGE_SIZE - 1);
dc91c728
DV
539 }
540
541 e820_add_region(start, end - start, type);
542}
543
69632ecf 544static void __init xen_ignore_unusable(void)
3bc38cbc 545{
69632ecf 546 struct e820entry *entry = xen_e820_map;
3bc38cbc
DV
547 unsigned int i;
548
69632ecf 549 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
3bc38cbc
DV
550 if (entry->type == E820_UNUSABLE)
551 entry->type = E820_RAM;
552 }
553}
554
5097cdf6
JG
555static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
556{
557 unsigned long extra = 0;
558 const struct e820entry *entry = xen_e820_map;
559 int i;
560
561 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
562 unsigned long start_pfn = PFN_DOWN(entry->addr);
563 unsigned long end_pfn = PFN_UP(entry->addr + entry->size);
564
565 if (start_pfn >= max_pfn)
566 break;
567 if (entry->type == E820_RAM)
568 continue;
569 if (end_pfn >= max_pfn)
570 end_pfn = max_pfn;
571 extra += end_pfn - start_pfn;
572 }
573
574 return extra;
575}
576
e612b4a7
JG
577bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
578{
579 struct e820entry *entry;
580 unsigned mapcnt;
581 phys_addr_t end;
582
583 if (!size)
584 return false;
585
586 end = start + size;
587 entry = xen_e820_map;
588
589 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
590 if (entry->type == E820_RAM && entry->addr <= start &&
591 (entry->addr + entry->size) >= end)
592 return false;
593
594 entry++;
595 }
596
597 return true;
598}
599
9ddac5b7
JG
600/*
601 * Find a free area in physical memory not yet reserved and compliant with
602 * E820 map.
603 * Used to relocate pre-allocated areas like initrd or p2m list which are in
604 * conflict with the to be used E820 map.
605 * In case no area is found, return 0. Otherwise return the physical address
606 * of the area which is already reserved for convenience.
607 */
608phys_addr_t __init xen_find_free_area(phys_addr_t size)
609{
610 unsigned mapcnt;
611 phys_addr_t addr, start;
612 struct e820entry *entry = xen_e820_map;
613
614 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
615 if (entry->type != E820_RAM || entry->size < size)
616 continue;
617 start = entry->addr;
618 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
619 if (!memblock_is_reserved(addr))
620 continue;
621 start = addr + PAGE_SIZE;
622 if (start + size > entry->addr + entry->size)
623 break;
624 }
625 if (addr >= start + size) {
626 memblock_reserve(start, size);
627 return start;
628 }
629 }
630
631 return 0;
632}
633
4b9c1537
JG
634/*
635 * Like memcpy, but with physical addresses for dest and src.
636 */
637static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
638 phys_addr_t n)
639{
640 phys_addr_t dest_off, src_off, dest_len, src_len, len;
641 void *from, *to;
642
643 while (n) {
644 dest_off = dest & ~PAGE_MASK;
645 src_off = src & ~PAGE_MASK;
646 dest_len = n;
647 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
648 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
649 src_len = n;
650 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
651 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
652 len = min(dest_len, src_len);
653 to = early_memremap(dest - dest_off, dest_len + dest_off);
654 from = early_memremap(src - src_off, src_len + src_off);
655 memcpy(to, from, len);
656 early_memunmap(to, dest_len + dest_off);
657 early_memunmap(from, src_len + src_off);
658 n -= len;
659 dest += len;
660 src += len;
661 }
662}
663
8f5b0c63
JG
664/*
665 * Reserve Xen mfn_list.
8f5b0c63
JG
666 */
667static void __init xen_reserve_xen_mfnlist(void)
668{
70e61199
JG
669 phys_addr_t start, size;
670
8f5b0c63 671 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
70e61199
JG
672 start = __pa(xen_start_info->mfn_list);
673 size = PFN_ALIGN(xen_start_info->nr_pages *
674 sizeof(unsigned long));
675 } else {
676 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
677 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
678 }
679
680 if (!xen_is_e820_reserved(start, size)) {
681 memblock_reserve(start, size);
8f5b0c63
JG
682 return;
683 }
684
70e61199
JG
685#ifdef CONFIG_X86_32
686 /*
687 * Relocating the p2m on 32 bit system to an arbitrary virtual address
688 * is not supported, so just give up.
689 */
690 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
691 BUG();
692#else
693 xen_relocate_p2m();
694#endif
8f5b0c63
JG
695}
696
5ead97c8
JF
697/**
698 * machine_specific_memory_setup - Hook for machine specific memory setup.
699 **/
5ead97c8
JF
700char * __init xen_memory_setup(void)
701{
702 unsigned long max_pfn = xen_start_info->nr_pages;
5097cdf6
JG
703 phys_addr_t mem_end, addr, size, chunk_size;
704 u32 type;
35ae11fd
IC
705 int rc;
706 struct xen_memory_map memmap;
dc91c728 707 unsigned long max_pages;
42ee1471 708 unsigned long extra_pages = 0;
35ae11fd 709 int i;
9e9a5fcb 710 int op;
5ead97c8 711
8006ec3e 712 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
35ae11fd
IC
713 mem_end = PFN_PHYS(max_pfn);
714
715 memmap.nr_entries = E820MAX;
69632ecf 716 set_xen_guest_handle(memmap.buffer, xen_e820_map);
35ae11fd 717
9e9a5fcb
IC
718 op = xen_initial_domain() ?
719 XENMEM_machine_memory_map :
720 XENMEM_memory_map;
721 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 722 if (rc == -ENOSYS) {
9ec23a7f 723 BUG_ON(xen_initial_domain());
35ae11fd 724 memmap.nr_entries = 1;
69632ecf
JG
725 xen_e820_map[0].addr = 0ULL;
726 xen_e820_map[0].size = mem_end;
35ae11fd 727 /* 8MB slack (to balance backend allocations). */
69632ecf
JG
728 xen_e820_map[0].size += 8ULL << 20;
729 xen_e820_map[0].type = E820_RAM;
35ae11fd
IC
730 rc = 0;
731 }
732 BUG_ON(rc);
1ea644c8 733 BUG_ON(memmap.nr_entries == 0);
69632ecf 734 xen_e820_map_entries = memmap.nr_entries;
8006ec3e 735
3bc38cbc
DV
736 /*
737 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
738 * regions, so if we're using the machine memory map leave the
739 * region as RAM as it is in the pseudo-physical map.
740 *
741 * UNUSABLE regions in domUs are not handled and will need
742 * a patch in the future.
743 */
744 if (xen_initial_domain())
69632ecf 745 xen_ignore_unusable();
3bc38cbc 746
dc91c728 747 /* Make sure the Xen-supplied memory map is well-ordered. */
69632ecf
JG
748 sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
749 &xen_e820_map_entries);
dc91c728
DV
750
751 max_pages = xen_get_max_pages();
752 if (max_pages > max_pfn)
753 extra_pages += max_pages - max_pfn;
754
5097cdf6
JG
755 /* How many extra pages do we need due to remapping? */
756 extra_pages += xen_count_remap_pages(max_pfn);
2e2fb754 757
dc91c728
DV
758 /*
759 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
760 * factor the base size. On non-highmem systems, the base
761 * size is the full initial memory allocation; on highmem it
762 * is limited to the max size of lowmem, so that it doesn't
763 * get completely filled.
764 *
765 * In principle there could be a problem in lowmem systems if
766 * the initial memory is also very large with respect to
767 * lowmem, but we won't try to deal with that here.
768 */
769 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
770 extra_pages);
dc91c728 771 i = 0;
5097cdf6
JG
772 addr = xen_e820_map[0].addr;
773 size = xen_e820_map[0].size;
69632ecf 774 while (i < xen_e820_map_entries) {
5097cdf6
JG
775 chunk_size = size;
776 type = xen_e820_map[i].type;
dc91c728
DV
777
778 if (type == E820_RAM) {
779 if (addr < mem_end) {
5097cdf6 780 chunk_size = min(size, mem_end - addr);
dc91c728 781 } else if (extra_pages) {
5097cdf6
JG
782 chunk_size = min(size, PFN_PHYS(extra_pages));
783 extra_pages -= PFN_DOWN(chunk_size);
784 xen_add_extra_mem(addr, chunk_size);
785 xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
dc91c728
DV
786 } else
787 type = E820_UNUSABLE;
3654581e
JF
788 }
789
5097cdf6 790 xen_align_and_add_e820_region(addr, chunk_size, type);
b5b43ced 791
5097cdf6
JG
792 addr += chunk_size;
793 size -= chunk_size;
794 if (size == 0) {
dc91c728 795 i++;
5097cdf6
JG
796 if (i < xen_e820_map_entries) {
797 addr = xen_e820_map[i].addr;
798 size = xen_e820_map[i].size;
799 }
800 }
35ae11fd 801 }
b792c755 802
25b884a8
DV
803 /*
804 * Set the rest as identity mapped, in case PCI BARs are
805 * located here.
806 *
807 * PFNs above MAX_P2M_PFN are considered identity mapped as
808 * well.
809 */
5097cdf6 810 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
25b884a8 811
b792c755 812 /*
9ec23a7f
IC
813 * In domU, the ISA region is normal, usable memory, but we
814 * reserve ISA memory anyway because too many things poke
b792c755
JF
815 * about in there.
816 */
817 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
818 E820_RESERVED);
5ead97c8 819
be5bf9fa
JF
820 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
821
808fdb71
JG
822 /*
823 * Check whether the kernel itself conflicts with the target E820 map.
824 * Failing now is better than running into weird problems later due
825 * to relocating (and even reusing) pages with kernel text or data.
826 */
827 if (xen_is_e820_reserved(__pa_symbol(_text),
828 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
829 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
830 BUG();
831 }
832
04414baa
JG
833 /*
834 * Check for a conflict of the hypervisor supplied page tables with
835 * the target E820 map.
836 */
837 xen_pt_check_e820();
838
8f5b0c63
JG
839 xen_reserve_xen_mfnlist();
840
4b9c1537
JG
841 /* Check for a conflict of the initrd with the target E820 map. */
842 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
843 boot_params.hdr.ramdisk_size)) {
844 phys_addr_t new_area, start, size;
845
846 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
847 if (!new_area) {
848 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
849 BUG();
850 }
851
852 start = boot_params.hdr.ramdisk_image;
853 size = boot_params.hdr.ramdisk_size;
854 xen_phys_memcpy(new_area, start, size);
855 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
856 start, start + size, new_area, new_area + size);
857 memblock_free(start, size);
858 boot_params.hdr.ramdisk_image = new_area;
859 boot_params.ext_ramdisk_image = new_area >> 32;
860 }
861
5097cdf6
JG
862 /*
863 * Set identity map on non-RAM pages and prepare remapping the
864 * underlying RAM.
865 */
866 xen_set_identity_and_remap(max_pfn);
867
5ead97c8
JF
868 return "Xen";
869}
870
abacaadc
DV
871/*
872 * Machine specific memory setup for auto-translated guests.
873 */
874char * __init xen_auto_xlated_memory_setup(void)
875{
abacaadc
DV
876 struct xen_memory_map memmap;
877 int i;
878 int rc;
879
880 memmap.nr_entries = E820MAX;
69632ecf 881 set_xen_guest_handle(memmap.buffer, xen_e820_map);
abacaadc
DV
882
883 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
884 if (rc < 0)
885 panic("No memory map (%d)\n", rc);
886
69632ecf
JG
887 xen_e820_map_entries = memmap.nr_entries;
888
889 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
890 &xen_e820_map_entries);
abacaadc 891
69632ecf
JG
892 for (i = 0; i < xen_e820_map_entries; i++)
893 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
894 xen_e820_map[i].type);
abacaadc 895
70e61199
JG
896 /* Remove p2m info, it is not needed. */
897 xen_start_info->mfn_list = 0;
898 xen_start_info->first_p2m_pfn = 0;
899 xen_start_info->nr_p2m_frames = 0;
abacaadc
DV
900
901 return "Xen";
902}
903
d2eea68e
RM
904/*
905 * Set the bit indicating "nosegneg" library variants should be used.
6a52e4b1
JF
906 * We only need to bother in pure 32-bit mode; compat 32-bit processes
907 * can have un-truncated segments, so wrapping around is allowed.
d2eea68e 908 */
08b6d290 909static void __init fiddle_vdso(void)
d2eea68e 910{
6a52e4b1 911#ifdef CONFIG_X86_32
6f121e54
AL
912 /*
913 * This could be called before selected_vdso32 is initialized, so
914 * just fiddle with both possible images. vdso_image_32_syscall
915 * can't be selected, since it only exists on 64-bit systems.
916 */
6a52e4b1 917 u32 *mask;
6f121e54
AL
918 mask = vdso_image_32_int80.data +
919 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
6a52e4b1 920 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6f121e54
AL
921 mask = vdso_image_32_sysenter.data +
922 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
d2eea68e 923 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6fcac6d3 924#endif
d2eea68e
RM
925}
926
148f9bb8 927static int register_callback(unsigned type, const void *func)
e2a81baf 928{
88459d4c
JF
929 struct callback_register callback = {
930 .type = type,
931 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
932 .flags = CALLBACKF_mask_events,
933 };
934
88459d4c
JF
935 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
936}
937
148f9bb8 938void xen_enable_sysenter(void)
88459d4c 939{
6fcac6d3 940 int ret;
62541c37 941 unsigned sysenter_feature;
6fcac6d3
JF
942
943#ifdef CONFIG_X86_32
62541c37 944 sysenter_feature = X86_FEATURE_SEP;
6fcac6d3 945#else
62541c37 946 sysenter_feature = X86_FEATURE_SYSENTER32;
6fcac6d3 947#endif
88459d4c 948
62541c37
JF
949 if (!boot_cpu_has(sysenter_feature))
950 return;
951
6fcac6d3 952 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
62541c37
JF
953 if(ret != 0)
954 setup_clear_cpu_cap(sysenter_feature);
e2a81baf
JF
955}
956
148f9bb8 957void xen_enable_syscall(void)
6fcac6d3
JF
958{
959#ifdef CONFIG_X86_64
6fcac6d3 960 int ret;
6fcac6d3
JF
961
962 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
963 if (ret != 0) {
d5303b81 964 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
965 /* Pretty fatal; 64-bit userspace has no other
966 mechanism for syscalls. */
967 }
968
969 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
6fcac6d3
JF
970 ret = register_callback(CALLBACKTYPE_syscall32,
971 xen_syscall32_target);
d5303b81 972 if (ret != 0)
62541c37 973 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3
JF
974 }
975#endif /* CONFIG_X86_64 */
976}
ea9f9274 977
d285d683 978void __init xen_pvmmu_arch_setup(void)
5ead97c8 979{
5ead97c8
JF
980 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
981 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
982
d285d683
MR
983 HYPERVISOR_vm_assist(VMASST_CMD_enable,
984 VMASST_TYPE_pae_extended_cr3);
5ead97c8 985
88459d4c
JF
986 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
987 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
988 BUG();
5ead97c8 989
e2a81baf 990 xen_enable_sysenter();
6fcac6d3 991 xen_enable_syscall();
d285d683
MR
992}
993
994/* This function is not called for HVM domains */
995void __init xen_arch_setup(void)
996{
997 xen_panic_handler_init();
998 if (!xen_feature(XENFEAT_auto_translated_physmap))
999 xen_pvmmu_arch_setup();
1000
5ead97c8
JF
1001#ifdef CONFIG_ACPI
1002 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1003 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1004 disable_acpi();
1005 }
1006#endif
1007
1008 memcpy(boot_command_line, xen_start_info->cmd_line,
1009 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1010 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1011
bc15fde7 1012 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 1013 disable_cpuidle();
48cdd828 1014 disable_cpufreq();
6a377ddc 1015 WARN_ON(xen_set_default_idle());
d2eea68e 1016 fiddle_vdso();
8d54db79
KRW
1017#ifdef CONFIG_NUMA
1018 numa_off = 1;
1019#endif
5ead97c8 1020}