]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/xen/setup.c
ARM: dts: exynos: correct fuel gauge interrupt trigger level on GT-I9100
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / xen / setup.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5ead97c8
JF
2/*
3 * Machine specific setup for xen
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
7a2463dc 8#include <linux/init.h>
5ead97c8
JF
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/pm.h>
a9ce6bc1 12#include <linux/memblock.h>
d91ee586 13#include <linux/cpuidle.h>
48cdd828 14#include <linux/cpufreq.h>
1d988ed4 15#include <linux/memory_hotplug.h>
5ead97c8
JF
16
17#include <asm/elf.h>
6c3652ef 18#include <asm/vdso.h>
66441bd3 19#include <asm/e820/api.h>
5ead97c8 20#include <asm/setup.h>
b792c755 21#include <asm/acpi.h>
8d54db79 22#include <asm/numa.h>
2f6474e4 23#include <asm/idtentry.h>
5ead97c8
JF
24#include <asm/xen/hypervisor.h>
25#include <asm/xen/hypercall.h>
26
45263cb0 27#include <xen/xen.h>
8006ec3e 28#include <xen/page.h>
e2a81baf 29#include <xen/interface/callback.h>
35ae11fd 30#include <xen/interface/memory.h>
5ead97c8
JF
31#include <xen/interface/physdev.h>
32#include <xen/features.h>
808fdb71 33#include <xen/hvc-console.h>
5ead97c8 34#include "xen-ops.h"
1f3ac86b 35#include "mmu.h"
5ead97c8 36
c70727a5
JG
37#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
38
42ee1471 39/* Amount of extra memory space we add to the e820 ranges */
8b5d44a5 40struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42ee1471 41
aa24411b
DV
42/* Number of pages released from the initial allocation. */
43unsigned long xen_released_pages;
44
69632ecf 45/* E820 map used during setting up memory. */
e7dbf7ad 46static struct e820_table xen_e820_table __initdata;
69632ecf 47
1f3ac86b
JG
48/*
49 * Buffer used to remap identity mapped pages. We only need the virtual space.
50 * The physical page behind this address is remapped as needed to different
51 * buffer pages.
52 */
53#define REMAP_SIZE (P2M_PER_PAGE - 3)
54static struct {
55 unsigned long next_area_mfn;
56 unsigned long target_pfn;
57 unsigned long size;
58 unsigned long mfns[REMAP_SIZE];
59} xen_remap_buf __initdata __aligned(PAGE_SIZE);
60static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
4fbb67e3 61
58e0c804
RPM
62/*
63 * The maximum amount of extra memory compared to the base size. The
64 * main scaling factor is the size of struct page. At extreme ratios
65 * of base:extra, all the base memory can be filled with page
66 * structures for the extra memory, leaving no space for anything
67 * else.
68 *
69 * 10x seems like a reasonable balance between scaling flexibility and
70 * leaving a practically usable system.
71 */
72#define EXTRA_MEM_RATIO (10)
73
c70727a5
JG
74static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
75
76static void __init xen_parse_512gb(void)
77{
78 bool val = false;
79 char *arg;
80
81 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
82 if (!arg)
83 return;
84
85 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
86 if (!arg)
87 val = true;
88 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
89 return;
90
91 xen_512gb_limit = val;
92}
93
626d7508
JG
94static void __init xen_add_extra_mem(unsigned long start_pfn,
95 unsigned long n_pfns)
42ee1471 96{
dc91c728 97 int i;
6eaa412f 98
626d7508
JG
99 /*
100 * No need to check for zero size, should happen rarely and will only
101 * write a new entry regarded to be unused due to zero size.
102 */
dc91c728
DV
103 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
104 /* Add new region. */
626d7508
JG
105 if (xen_extra_mem[i].n_pfns == 0) {
106 xen_extra_mem[i].start_pfn = start_pfn;
107 xen_extra_mem[i].n_pfns = n_pfns;
dc91c728
DV
108 break;
109 }
110 /* Append to existing region. */
626d7508
JG
111 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
112 start_pfn) {
113 xen_extra_mem[i].n_pfns += n_pfns;
dc91c728
DV
114 break;
115 }
116 }
117 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
118 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
42ee1471 119
626d7508 120 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
5b8e7d80 121}
2f7acb20 122
626d7508
JG
123static void __init xen_del_extra_mem(unsigned long start_pfn,
124 unsigned long n_pfns)
5b8e7d80
JG
125{
126 int i;
626d7508 127 unsigned long start_r, size_r;
c96aae1f 128
5b8e7d80 129 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
130 start_r = xen_extra_mem[i].start_pfn;
131 size_r = xen_extra_mem[i].n_pfns;
5b8e7d80
JG
132
133 /* Start of region. */
626d7508
JG
134 if (start_r == start_pfn) {
135 BUG_ON(n_pfns > size_r);
136 xen_extra_mem[i].start_pfn += n_pfns;
137 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
138 break;
139 }
140 /* End of region. */
626d7508
JG
141 if (start_r + size_r == start_pfn + n_pfns) {
142 BUG_ON(n_pfns > size_r);
143 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
144 break;
145 }
146 /* Mid of region. */
626d7508
JG
147 if (start_pfn > start_r && start_pfn < start_r + size_r) {
148 BUG_ON(start_pfn + n_pfns > start_r + size_r);
149 xen_extra_mem[i].n_pfns = start_pfn - start_r;
5b8e7d80 150 /* Calling memblock_reserve() again is okay. */
626d7508
JG
151 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
152 (start_pfn + n_pfns));
5b8e7d80
JG
153 break;
154 }
155 }
626d7508 156 memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
5b8e7d80
JG
157}
158
159/*
160 * Called during boot before the p2m list can take entries beyond the
161 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
162 * invalid.
163 */
164unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
165{
166 int i;
6eaa412f 167
5b8e7d80 168 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
169 if (pfn >= xen_extra_mem[i].start_pfn &&
170 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
5b8e7d80
JG
171 return INVALID_P2M_ENTRY;
172 }
173
174 return IDENTITY_FRAME(pfn);
175}
176
177/*
178 * Mark all pfns of extra mem as invalid in p2m list.
179 */
180void __init xen_inv_extra_mem(void)
181{
182 unsigned long pfn, pfn_s, pfn_e;
183 int i;
184
185 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508 186 if (!xen_extra_mem[i].n_pfns)
9a17ad7f 187 continue;
626d7508
JG
188 pfn_s = xen_extra_mem[i].start_pfn;
189 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
5b8e7d80
JG
190 for (pfn = pfn_s; pfn < pfn_e; pfn++)
191 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 192 }
42ee1471
JF
193}
194
4fbb67e3
MR
195/*
196 * Finds the next RAM pfn available in the E820 map after min_pfn.
197 * This function updates min_pfn with the pfn found and returns
198 * the size of that range or zero if not found.
199 */
69632ecf 200static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
2e2fb754 201{
e7dbf7ad 202 const struct e820_entry *entry = xen_e820_table.entries;
2e2fb754
KRW
203 unsigned int i;
204 unsigned long done = 0;
2e2fb754 205
e7dbf7ad 206 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
2e2fb754
KRW
207 unsigned long s_pfn;
208 unsigned long e_pfn;
2e2fb754 209
09821ff1 210 if (entry->type != E820_TYPE_RAM)
2e2fb754
KRW
211 continue;
212
c3d93f88 213 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754 214
4fbb67e3 215 /* We only care about E820 after this */
abed7d07 216 if (e_pfn <= *min_pfn)
2e2fb754
KRW
217 continue;
218
c3d93f88 219 s_pfn = PFN_UP(entry->addr);
4fbb67e3
MR
220
221 /* If min_pfn falls within the E820 entry, we want to start
222 * at the min_pfn PFN.
2e2fb754 223 */
4fbb67e3
MR
224 if (s_pfn <= *min_pfn) {
225 done = e_pfn - *min_pfn;
2e2fb754 226 } else {
4fbb67e3
MR
227 done = e_pfn - s_pfn;
228 *min_pfn = s_pfn;
2e2fb754 229 }
4fbb67e3
MR
230 break;
231 }
2e2fb754 232
4fbb67e3
MR
233 return done;
234}
2e2fb754 235
1f3ac86b
JG
236static int __init xen_free_mfn(unsigned long mfn)
237{
238 struct xen_memory_reservation reservation = {
239 .address_bits = 0,
240 .extent_order = 0,
241 .domid = DOMID_SELF
242 };
243
244 set_xen_guest_handle(reservation.extent_start, &mfn);
245 reservation.nr_extents = 1;
246
247 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
248}
249
4fbb67e3 250/*
1f3ac86b 251 * This releases a chunk of memory and then does the identity map. It's used
4fbb67e3
MR
252 * as a fallback if the remapping fails.
253 */
254static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
5097cdf6 255 unsigned long end_pfn, unsigned long nr_pages)
4fbb67e3 256{
1f3ac86b
JG
257 unsigned long pfn, end;
258 int ret;
259
4fbb67e3
MR
260 WARN_ON(start_pfn > end_pfn);
261
bc7142cf 262 /* Release pages first. */
1f3ac86b
JG
263 end = min(end_pfn, nr_pages);
264 for (pfn = start_pfn; pfn < end; pfn++) {
265 unsigned long mfn = pfn_to_mfn(pfn);
266
267 /* Make sure pfn exists to start with */
268 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
269 continue;
270
271 ret = xen_free_mfn(mfn);
272 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
273
274 if (ret == 1) {
5097cdf6 275 xen_released_pages++;
1f3ac86b
JG
276 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
277 break;
1f3ac86b
JG
278 } else
279 break;
280 }
281
bc7142cf 282 set_phys_range_identity(start_pfn, end_pfn);
4fbb67e3
MR
283}
284
285/*
1f3ac86b 286 * Helper function to update the p2m and m2p tables and kernel mapping.
4fbb67e3 287 */
1f3ac86b 288static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
4fbb67e3
MR
289{
290 struct mmu_update update = {
3ba5c867 291 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
4fbb67e3
MR
292 .val = pfn
293 };
294
295 /* Update p2m */
1f3ac86b 296 if (!set_phys_to_machine(pfn, mfn)) {
4fbb67e3
MR
297 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
298 pfn, mfn);
1f3ac86b 299 BUG();
2e2fb754 300 }
4fbb67e3
MR
301
302 /* Update m2p */
303 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
304 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
305 mfn, pfn);
1f3ac86b 306 BUG();
4fbb67e3
MR
307 }
308
1f3ac86b 309 /* Update kernel mapping, but not for highmem. */
e86f9496 310 if (pfn >= PFN_UP(__pa(high_memory - 1)))
1f3ac86b
JG
311 return;
312
313 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
314 mfn_pte(mfn, PAGE_KERNEL), 0)) {
315 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
316 mfn, pfn);
317 BUG();
318 }
2e2fb754 319}
83d51ab4 320
4fbb67e3
MR
321/*
322 * This function updates the p2m and m2p tables with an identity map from
1f3ac86b
JG
323 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
324 * original allocation at remap_pfn. The information needed for remapping is
325 * saved in the memory itself to avoid the need for allocating buffers. The
326 * complete remap information is contained in a list of MFNs each containing
327 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
328 * This enables us to preserve the original mfn sequence while doing the
329 * remapping at a time when the memory management is capable of allocating
330 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
331 * its callers.
4fbb67e3 332 */
1f3ac86b 333static void __init xen_do_set_identity_and_remap_chunk(
4fbb67e3 334 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
83d51ab4 335{
1f3ac86b
JG
336 unsigned long buf = (unsigned long)&xen_remap_buf;
337 unsigned long mfn_save, mfn;
4fbb67e3 338 unsigned long ident_pfn_iter, remap_pfn_iter;
1f3ac86b 339 unsigned long ident_end_pfn = start_pfn + size;
4fbb67e3 340 unsigned long left = size;
1f3ac86b 341 unsigned int i, chunk;
4fbb67e3
MR
342
343 WARN_ON(size == 0);
344
1f3ac86b 345 mfn_save = virt_to_mfn(buf);
e201bfcc 346
1f3ac86b
JG
347 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
348 ident_pfn_iter < ident_end_pfn;
349 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
350 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
4fbb67e3 351
1f3ac86b
JG
352 /* Map first pfn to xen_remap_buf */
353 mfn = pfn_to_mfn(ident_pfn_iter);
354 set_pte_mfn(buf, mfn, PAGE_KERNEL);
4fbb67e3 355
1f3ac86b
JG
356 /* Save mapping information in page */
357 xen_remap_buf.next_area_mfn = xen_remap_mfn;
358 xen_remap_buf.target_pfn = remap_pfn_iter;
359 xen_remap_buf.size = chunk;
360 for (i = 0; i < chunk; i++)
361 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
4fbb67e3 362
1f3ac86b
JG
363 /* Put remap buf into list. */
364 xen_remap_mfn = mfn;
4fbb67e3 365
1f3ac86b 366 /* Set identity map */
bc7142cf 367 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
83d51ab4 368
1f3ac86b 369 left -= chunk;
4fbb67e3 370 }
83d51ab4 371
1f3ac86b
JG
372 /* Restore old xen_remap_buf mapping */
373 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
83d51ab4
DV
374}
375
4fbb67e3
MR
376/*
377 * This function takes a contiguous pfn range that needs to be identity mapped
378 * and:
379 *
380 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
381 * 2) Calls the do_ function to actually do the mapping/remapping work.
382 *
383 * The goal is to not allocate additional memory but to remap the existing
384 * pages. In the case of an error the underlying memory is simply released back
385 * to Xen and not remapped.
386 */
76f0a486 387static unsigned long __init xen_set_identity_and_remap_chunk(
69632ecf 388 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
5097cdf6 389 unsigned long remap_pfn)
4fbb67e3
MR
390{
391 unsigned long pfn;
392 unsigned long i = 0;
393 unsigned long n = end_pfn - start_pfn;
394
dd14be92
JG
395 if (remap_pfn == 0)
396 remap_pfn = nr_pages;
397
4fbb67e3
MR
398 while (i < n) {
399 unsigned long cur_pfn = start_pfn + i;
400 unsigned long left = n - i;
401 unsigned long size = left;
402 unsigned long remap_range_size;
403
404 /* Do not remap pages beyond the current allocation */
405 if (cur_pfn >= nr_pages) {
406 /* Identity map remaining pages */
bc7142cf 407 set_phys_range_identity(cur_pfn, cur_pfn + size);
4fbb67e3
MR
408 break;
409 }
410 if (cur_pfn + size > nr_pages)
411 size = nr_pages - cur_pfn;
412
69632ecf 413 remap_range_size = xen_find_pfn_range(&remap_pfn);
4fbb67e3 414 if (!remap_range_size) {
8d3bcc44 415 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
4fbb67e3 416 xen_set_identity_and_release_chunk(cur_pfn,
5097cdf6 417 cur_pfn + left, nr_pages);
4fbb67e3
MR
418 break;
419 }
420 /* Adjust size to fit in current e820 RAM region */
421 if (size > remap_range_size)
422 size = remap_range_size;
423
1f3ac86b 424 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
4fbb67e3
MR
425
426 /* Update variables to reflect new mappings. */
427 i += size;
428 remap_pfn += size;
4fbb67e3
MR
429 }
430
431 /*
432 * If the PFNs are currently mapped, the VA mapping also needs
433 * to be updated to be 1:1.
434 */
435 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
436 (void)HYPERVISOR_update_va_mapping(
437 (unsigned long)__va(pfn << PAGE_SHIFT),
438 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
439
440 return remap_pfn;
441}
442
dd14be92
JG
443static unsigned long __init xen_count_remap_pages(
444 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
445 unsigned long remap_pages)
446{
447 if (start_pfn >= nr_pages)
448 return remap_pages;
449
450 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
451}
452
453static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
454 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
455 unsigned long nr_pages, unsigned long last_val))
093d7b46 456{
f3f436e3 457 phys_addr_t start = 0;
dd14be92 458 unsigned long ret_val = 0;
e7dbf7ad 459 const struct e820_entry *entry = xen_e820_table.entries;
68df0da7
KRW
460 int i;
461
f3f436e3
DV
462 /*
463 * Combine non-RAM regions and gaps until a RAM region (or the
dd14be92
JG
464 * end of the map) is reached, then call the provided function
465 * to perform its duty on the non-RAM region.
f3f436e3
DV
466 *
467 * The combined non-RAM regions are rounded to a whole number
468 * of pages so any partial pages are accessible via the 1:1
469 * mapping. This is needed for some BIOSes that put (for
470 * example) the DMI tables in a reserved region that begins on
471 * a non-page boundary.
472 */
e7dbf7ad 473 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
f3f436e3 474 phys_addr_t end = entry->addr + entry->size;
e7dbf7ad 475 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
f3f436e3
DV
476 unsigned long start_pfn = PFN_DOWN(start);
477 unsigned long end_pfn = PFN_UP(end);
68df0da7 478
09821ff1 479 if (entry->type == E820_TYPE_RAM)
f3f436e3 480 end_pfn = PFN_UP(entry->addr);
68df0da7 481
83d51ab4 482 if (start_pfn < end_pfn)
dd14be92
JG
483 ret_val = func(start_pfn, end_pfn, nr_pages,
484 ret_val);
f3f436e3 485 start = end;
68df0da7 486 }
68df0da7 487 }
f3f436e3 488
dd14be92 489 return ret_val;
4fbb67e3 490}
1f3ac86b
JG
491
492/*
493 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
494 * The remap information (which mfn remap to which pfn) is contained in the
495 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
496 * This scheme allows to remap the different chunks in arbitrary order while
a97673a1 497 * the resulting mapping will be independent from the order.
1f3ac86b
JG
498 */
499void __init xen_remap_memory(void)
500{
501 unsigned long buf = (unsigned long)&xen_remap_buf;
bf1b9ddf 502 unsigned long mfn_save, pfn;
1f3ac86b
JG
503 unsigned long remapped = 0;
504 unsigned int i;
505 unsigned long pfn_s = ~0UL;
506 unsigned long len = 0;
507
508 mfn_save = virt_to_mfn(buf);
509
510 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
511 /* Map the remap information */
512 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
513
514 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
515
516 pfn = xen_remap_buf.target_pfn;
517 for (i = 0; i < xen_remap_buf.size; i++) {
bf1b9ddf 518 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
1f3ac86b
JG
519 remapped++;
520 pfn++;
521 }
522 if (pfn_s == ~0UL || pfn == pfn_s) {
523 pfn_s = xen_remap_buf.target_pfn;
524 len += xen_remap_buf.size;
525 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
526 len += xen_remap_buf.size;
527 } else {
626d7508 528 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
529 pfn_s = xen_remap_buf.target_pfn;
530 len = xen_remap_buf.size;
531 }
1f3ac86b
JG
532 xen_remap_mfn = xen_remap_buf.next_area_mfn;
533 }
534
535 if (pfn_s != ~0UL && len)
626d7508 536 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
537
538 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
539
540 pr_info("Remapped %ld page(s)\n", remapped);
541}
542
c70727a5
JG
543static unsigned long __init xen_get_pages_limit(void)
544{
545 unsigned long limit;
546
cb9e444b 547 limit = MAXMEM / PAGE_SIZE;
c70727a5
JG
548 if (!xen_initial_domain() && xen_512gb_limit)
549 limit = GB(512) / PAGE_SIZE;
a13f2ef1 550
c70727a5
JG
551 return limit;
552}
553
d312ae87
DV
554static unsigned long __init xen_get_max_pages(void)
555{
c70727a5 556 unsigned long max_pages, limit;
d312ae87 557 domid_t domid = DOMID_SELF;
24f775a6 558 long ret;
d312ae87 559
c70727a5
JG
560 limit = xen_get_pages_limit();
561 max_pages = limit;
562
d3db7281
IC
563 /*
564 * For the initial domain we use the maximum reservation as
565 * the maximum page.
566 *
567 * For guest domains the current maximum reservation reflects
568 * the current maximum rather than the static maximum. In this
569 * case the e820 map provided to us will cover the static
570 * maximum region.
571 */
572 if (xen_initial_domain()) {
573 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
574 if (ret > 0)
575 max_pages = ret;
576 }
577
c70727a5 578 return min(max_pages, limit);
d312ae87
DV
579}
580
a3f52396
JG
581static void __init xen_align_and_add_e820_region(phys_addr_t start,
582 phys_addr_t size, int type)
dc91c728 583{
3ba5c867 584 phys_addr_t end = start + size;
dc91c728
DV
585
586 /* Align RAM regions to page boundaries. */
09821ff1 587 if (type == E820_TYPE_RAM) {
dc91c728 588 start = PAGE_ALIGN(start);
3ba5c867 589 end &= ~((phys_addr_t)PAGE_SIZE - 1);
1d988ed4
JG
590#ifdef CONFIG_MEMORY_HOTPLUG
591 /*
592 * Don't allow adding memory not in E820 map while booting the
593 * system. Once the balloon driver is up it will remove that
594 * restriction again.
595 */
596 max_mem_size = end;
597#endif
dc91c728
DV
598 }
599
ab6bc04c 600 e820__range_add(start, end - start, type);
dc91c728
DV
601}
602
69632ecf 603static void __init xen_ignore_unusable(void)
3bc38cbc 604{
e7dbf7ad 605 struct e820_entry *entry = xen_e820_table.entries;
3bc38cbc
DV
606 unsigned int i;
607
e7dbf7ad 608 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
09821ff1
IM
609 if (entry->type == E820_TYPE_UNUSABLE)
610 entry->type = E820_TYPE_RAM;
3bc38cbc
DV
611 }
612}
613
e612b4a7
JG
614bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
615{
8ec67d97 616 struct e820_entry *entry;
e612b4a7
JG
617 unsigned mapcnt;
618 phys_addr_t end;
619
620 if (!size)
621 return false;
622
623 end = start + size;
e7dbf7ad 624 entry = xen_e820_table.entries;
e612b4a7 625
e7dbf7ad 626 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
09821ff1 627 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
e612b4a7
JG
628 (entry->addr + entry->size) >= end)
629 return false;
630
631 entry++;
632 }
633
634 return true;
635}
636
9ddac5b7
JG
637/*
638 * Find a free area in physical memory not yet reserved and compliant with
639 * E820 map.
640 * Used to relocate pre-allocated areas like initrd or p2m list which are in
641 * conflict with the to be used E820 map.
642 * In case no area is found, return 0. Otherwise return the physical address
643 * of the area which is already reserved for convenience.
644 */
645phys_addr_t __init xen_find_free_area(phys_addr_t size)
646{
647 unsigned mapcnt;
648 phys_addr_t addr, start;
e7dbf7ad 649 struct e820_entry *entry = xen_e820_table.entries;
9ddac5b7 650
e7dbf7ad 651 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
09821ff1 652 if (entry->type != E820_TYPE_RAM || entry->size < size)
9ddac5b7
JG
653 continue;
654 start = entry->addr;
655 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
656 if (!memblock_is_reserved(addr))
657 continue;
658 start = addr + PAGE_SIZE;
659 if (start + size > entry->addr + entry->size)
660 break;
661 }
662 if (addr >= start + size) {
663 memblock_reserve(start, size);
664 return start;
665 }
666 }
667
668 return 0;
669}
670
4b9c1537
JG
671/*
672 * Like memcpy, but with physical addresses for dest and src.
673 */
674static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
675 phys_addr_t n)
676{
677 phys_addr_t dest_off, src_off, dest_len, src_len, len;
678 void *from, *to;
679
680 while (n) {
681 dest_off = dest & ~PAGE_MASK;
682 src_off = src & ~PAGE_MASK;
683 dest_len = n;
684 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
685 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
686 src_len = n;
687 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
688 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
689 len = min(dest_len, src_len);
690 to = early_memremap(dest - dest_off, dest_len + dest_off);
691 from = early_memremap(src - src_off, src_len + src_off);
692 memcpy(to, from, len);
693 early_memunmap(to, dest_len + dest_off);
694 early_memunmap(from, src_len + src_off);
695 n -= len;
696 dest += len;
697 src += len;
698 }
699}
700
8f5b0c63
JG
701/*
702 * Reserve Xen mfn_list.
8f5b0c63
JG
703 */
704static void __init xen_reserve_xen_mfnlist(void)
705{
70e61199
JG
706 phys_addr_t start, size;
707
8f5b0c63 708 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
70e61199
JG
709 start = __pa(xen_start_info->mfn_list);
710 size = PFN_ALIGN(xen_start_info->nr_pages *
711 sizeof(unsigned long));
712 } else {
713 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
714 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
715 }
716
7ecec850
RL
717 memblock_reserve(start, size);
718 if (!xen_is_e820_reserved(start, size))
8f5b0c63 719 return;
8f5b0c63 720
70e61199 721 xen_relocate_p2m();
7ecec850 722 memblock_free(start, size);
8f5b0c63
JG
723}
724
5ead97c8
JF
725/**
726 * machine_specific_memory_setup - Hook for machine specific memory setup.
727 **/
5ead97c8
JF
728char * __init xen_memory_setup(void)
729{
626d7508 730 unsigned long max_pfn, pfn_s, n_pfns;
5097cdf6
JG
731 phys_addr_t mem_end, addr, size, chunk_size;
732 u32 type;
35ae11fd
IC
733 int rc;
734 struct xen_memory_map memmap;
dc91c728 735 unsigned long max_pages;
42ee1471 736 unsigned long extra_pages = 0;
35ae11fd 737 int i;
9e9a5fcb 738 int op;
5ead97c8 739
c70727a5
JG
740 xen_parse_512gb();
741 max_pfn = xen_get_pages_limit();
742 max_pfn = min(max_pfn, xen_start_info->nr_pages);
35ae11fd
IC
743 mem_end = PFN_PHYS(max_pfn);
744
e7dbf7ad
IM
745 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
746 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
35ae11fd 747
1d988ed4
JG
748#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
749 xen_saved_max_mem_size = max_mem_size;
750#endif
751
9e9a5fcb
IC
752 op = xen_initial_domain() ?
753 XENMEM_machine_memory_map :
754 XENMEM_memory_map;
755 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 756 if (rc == -ENOSYS) {
9ec23a7f 757 BUG_ON(xen_initial_domain());
35ae11fd 758 memmap.nr_entries = 1;
e7dbf7ad
IM
759 xen_e820_table.entries[0].addr = 0ULL;
760 xen_e820_table.entries[0].size = mem_end;
35ae11fd 761 /* 8MB slack (to balance backend allocations). */
e7dbf7ad
IM
762 xen_e820_table.entries[0].size += 8ULL << 20;
763 xen_e820_table.entries[0].type = E820_TYPE_RAM;
35ae11fd
IC
764 rc = 0;
765 }
766 BUG_ON(rc);
1ea644c8 767 BUG_ON(memmap.nr_entries == 0);
e7dbf7ad 768 xen_e820_table.nr_entries = memmap.nr_entries;
8006ec3e 769
3bc38cbc
DV
770 /*
771 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
772 * regions, so if we're using the machine memory map leave the
773 * region as RAM as it is in the pseudo-physical map.
774 *
775 * UNUSABLE regions in domUs are not handled and will need
776 * a patch in the future.
777 */
778 if (xen_initial_domain())
69632ecf 779 xen_ignore_unusable();
3bc38cbc 780
dc91c728 781 /* Make sure the Xen-supplied memory map is well-ordered. */
f9748fa0 782 e820__update_table(&xen_e820_table);
dc91c728
DV
783
784 max_pages = xen_get_max_pages();
dc91c728 785
5097cdf6 786 /* How many extra pages do we need due to remapping? */
dd14be92 787 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
eafd72e0
JG
788
789 if (max_pages > max_pfn)
790 extra_pages += max_pages - max_pfn;
2e2fb754 791
dc91c728 792 /*
58e0c804 793 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
415de44f 794 * factor the base size.
dc91c728 795 *
c70727a5
JG
796 * Make sure we have no memory above max_pages, as this area
797 * isn't handled by the p2m management.
dc91c728 798 */
58e0c804 799 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
c70727a5 800 extra_pages, max_pages - max_pfn);
dc91c728 801 i = 0;
e7dbf7ad
IM
802 addr = xen_e820_table.entries[0].addr;
803 size = xen_e820_table.entries[0].size;
804 while (i < xen_e820_table.nr_entries) {
12366410 805 bool discard = false;
f5775e0b 806
5097cdf6 807 chunk_size = size;
e7dbf7ad 808 type = xen_e820_table.entries[i].type;
dc91c728 809
09821ff1 810 if (type == E820_TYPE_RAM) {
dc91c728 811 if (addr < mem_end) {
5097cdf6 812 chunk_size = min(size, mem_end - addr);
dc91c728 813 } else if (extra_pages) {
5097cdf6 814 chunk_size = min(size, PFN_PHYS(extra_pages));
626d7508
JG
815 pfn_s = PFN_UP(addr);
816 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
817 extra_pages -= n_pfns;
818 xen_add_extra_mem(pfn_s, n_pfns);
819 xen_max_p2m_pfn = pfn_s + n_pfns;
dc91c728 820 } else
12366410 821 discard = true;
3654581e
JF
822 }
823
12366410
ID
824 if (!discard)
825 xen_align_and_add_e820_region(addr, chunk_size, type);
b5b43ced 826
5097cdf6
JG
827 addr += chunk_size;
828 size -= chunk_size;
829 if (size == 0) {
dc91c728 830 i++;
e7dbf7ad
IM
831 if (i < xen_e820_table.nr_entries) {
832 addr = xen_e820_table.entries[i].addr;
833 size = xen_e820_table.entries[i].size;
5097cdf6
JG
834 }
835 }
35ae11fd 836 }
b792c755 837
25b884a8
DV
838 /*
839 * Set the rest as identity mapped, in case PCI BARs are
840 * located here.
25b884a8 841 */
5097cdf6 842 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
25b884a8 843
b792c755 844 /*
9ec23a7f
IC
845 * In domU, the ISA region is normal, usable memory, but we
846 * reserve ISA memory anyway because too many things poke
b792c755
JF
847 * about in there.
848 */
f9748fa0 849 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
5ead97c8 850
f9748fa0 851 e820__update_table(e820_table);
be5bf9fa 852
808fdb71
JG
853 /*
854 * Check whether the kernel itself conflicts with the target E820 map.
855 * Failing now is better than running into weird problems later due
856 * to relocating (and even reusing) pages with kernel text or data.
857 */
858 if (xen_is_e820_reserved(__pa_symbol(_text),
859 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
860 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
861 BUG();
862 }
863
04414baa
JG
864 /*
865 * Check for a conflict of the hypervisor supplied page tables with
866 * the target E820 map.
867 */
868 xen_pt_check_e820();
869
8f5b0c63
JG
870 xen_reserve_xen_mfnlist();
871
4b9c1537
JG
872 /* Check for a conflict of the initrd with the target E820 map. */
873 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
874 boot_params.hdr.ramdisk_size)) {
875 phys_addr_t new_area, start, size;
876
877 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
878 if (!new_area) {
879 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
880 BUG();
881 }
882
883 start = boot_params.hdr.ramdisk_image;
884 size = boot_params.hdr.ramdisk_size;
885 xen_phys_memcpy(new_area, start, size);
886 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
887 start, start + size, new_area, new_area + size);
888 memblock_free(start, size);
889 boot_params.hdr.ramdisk_image = new_area;
890 boot_params.ext_ramdisk_image = new_area >> 32;
891 }
892
5097cdf6
JG
893 /*
894 * Set identity map on non-RAM pages and prepare remapping the
895 * underlying RAM.
896 */
dd14be92
JG
897 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
898
899 pr_info("Released %ld page(s)\n", xen_released_pages);
5097cdf6 900
5ead97c8
JF
901 return "Xen";
902}
903
148f9bb8 904static int register_callback(unsigned type, const void *func)
e2a81baf 905{
88459d4c
JF
906 struct callback_register callback = {
907 .type = type,
908 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
909 .flags = CALLBACKF_mask_events,
910 };
911
88459d4c
JF
912 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
913}
914
148f9bb8 915void xen_enable_sysenter(void)
88459d4c 916{
6fcac6d3 917 int ret;
62541c37 918 unsigned sysenter_feature;
6fcac6d3 919
62541c37 920 sysenter_feature = X86_FEATURE_SYSENTER32;
88459d4c 921
62541c37
JF
922 if (!boot_cpu_has(sysenter_feature))
923 return;
924
6fcac6d3 925 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
62541c37
JF
926 if(ret != 0)
927 setup_clear_cpu_cap(sysenter_feature);
e2a81baf
JF
928}
929
148f9bb8 930void xen_enable_syscall(void)
6fcac6d3 931{
6fcac6d3 932 int ret;
6fcac6d3
JF
933
934 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
935 if (ret != 0) {
d5303b81 936 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
937 /* Pretty fatal; 64-bit userspace has no other
938 mechanism for syscalls. */
939 }
940
941 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
6fcac6d3
JF
942 ret = register_callback(CALLBACKTYPE_syscall32,
943 xen_syscall32_target);
d5303b81 944 if (ret != 0)
62541c37 945 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3 946 }
6fcac6d3 947}
ea9f9274 948
0e1b4271 949static void __init xen_pvmmu_arch_setup(void)
5ead97c8 950{
5ead97c8
JF
951 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
952 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
953
d285d683
MR
954 HYPERVISOR_vm_assist(VMASST_CMD_enable,
955 VMASST_TYPE_pae_extended_cr3);
5ead97c8 956
2f6474e4
TG
957 if (register_callback(CALLBACKTYPE_event,
958 xen_asm_exc_xen_hypervisor_callback) ||
88459d4c
JF
959 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
960 BUG();
5ead97c8 961
e2a81baf 962 xen_enable_sysenter();
6fcac6d3 963 xen_enable_syscall();
d285d683
MR
964}
965
966/* This function is not called for HVM domains */
967void __init xen_arch_setup(void)
968{
969 xen_panic_handler_init();
82616f95 970 xen_pvmmu_arch_setup();
d285d683 971
5ead97c8
JF
972#ifdef CONFIG_ACPI
973 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
974 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
975 disable_acpi();
976 }
977#endif
978
979 memcpy(boot_command_line, xen_start_info->cmd_line,
980 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
981 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
982
bc15fde7 983 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 984 disable_cpuidle();
48cdd828 985 disable_cpufreq();
6a377ddc 986 WARN_ON(xen_set_default_idle());
8d54db79
KRW
987#ifdef CONFIG_NUMA
988 numa_off = 1;
989#endif
5ead97c8 990}