]> git.proxmox.com Git - mirror_edk2.git/blob - OvmfPkg/PlatformPei/MemDetect.c
OvmfPkg/PlatformPei: Move global variables to PlatformInfoHob
[mirror_edk2.git] / OvmfPkg / PlatformPei / MemDetect.c
1 /**@file
2 Memory Detection for Virtual Machines.
3
4 Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
5 SPDX-License-Identifier: BSD-2-Clause-Patent
6
7 Module Name:
8
9 MemDetect.c
10
11 **/
12
13 //
14 // The package level header files this module uses
15 //
16 #include <IndustryStandard/E820.h>
17 #include <IndustryStandard/I440FxPiix4.h>
18 #include <IndustryStandard/Q35MchIch9.h>
19 #include <IndustryStandard/CloudHv.h>
20 #include <IndustryStandard/Xen/arch-x86/hvm/start_info.h>
21 #include <PiPei.h>
22 #include <Register/Intel/SmramSaveStateMap.h>
23
24 //
25 // The Library classes this module consumes
26 //
27 #include <Library/BaseLib.h>
28 #include <Library/BaseMemoryLib.h>
29 #include <Library/DebugLib.h>
30 #include <Library/HobLib.h>
31 #include <Library/IoLib.h>
32 #include <Library/MemEncryptSevLib.h>
33 #include <Library/PcdLib.h>
34 #include <Library/PciLib.h>
35 #include <Library/PeimEntryPoint.h>
36 #include <Library/ResourcePublicationLib.h>
37 #include <Library/MtrrLib.h>
38 #include <Library/QemuFwCfgLib.h>
39 #include <Library/QemuFwCfgSimpleParserLib.h>
40
41 #include "Platform.h"
42
43 VOID
44 Q35TsegMbytesInitialization (
45 VOID
46 )
47 {
48 UINT16 ExtendedTsegMbytes;
49 RETURN_STATUS PcdStatus;
50
51 ASSERT (mPlatformInfoHob.HostBridgeDevId == INTEL_Q35_MCH_DEVICE_ID);
52
53 //
54 // Check if QEMU offers an extended TSEG.
55 //
56 // This can be seen from writing MCH_EXT_TSEG_MB_QUERY to the MCH_EXT_TSEG_MB
57 // register, and reading back the register.
58 //
59 // On a QEMU machine type that does not offer an extended TSEG, the initial
60 // write overwrites whatever value a malicious guest OS may have placed in
61 // the (unimplemented) register, before entering S3 or rebooting.
62 // Subsequently, the read returns MCH_EXT_TSEG_MB_QUERY unchanged.
63 //
64 // On a QEMU machine type that offers an extended TSEG, the initial write
65 // triggers an update to the register. Subsequently, the value read back
66 // (which is guaranteed to differ from MCH_EXT_TSEG_MB_QUERY) tells us the
67 // number of megabytes.
68 //
69 PciWrite16 (DRAMC_REGISTER_Q35 (MCH_EXT_TSEG_MB), MCH_EXT_TSEG_MB_QUERY);
70 ExtendedTsegMbytes = PciRead16 (DRAMC_REGISTER_Q35 (MCH_EXT_TSEG_MB));
71 if (ExtendedTsegMbytes == MCH_EXT_TSEG_MB_QUERY) {
72 mPlatformInfoHob.Q35TsegMbytes = PcdGet16 (PcdQ35TsegMbytes);
73 return;
74 }
75
76 DEBUG ((
77 DEBUG_INFO,
78 "%a: QEMU offers an extended TSEG (%d MB)\n",
79 __FUNCTION__,
80 ExtendedTsegMbytes
81 ));
82 PcdStatus = PcdSet16S (PcdQ35TsegMbytes, ExtendedTsegMbytes);
83 ASSERT_RETURN_ERROR (PcdStatus);
84 mPlatformInfoHob.Q35TsegMbytes = ExtendedTsegMbytes;
85 }
86
87 VOID
88 Q35SmramAtDefaultSmbaseInitialization (
89 VOID
90 )
91 {
92 RETURN_STATUS PcdStatus;
93
94 ASSERT (mPlatformInfoHob.HostBridgeDevId == INTEL_Q35_MCH_DEVICE_ID);
95
96 mPlatformInfoHob.Q35SmramAtDefaultSmbase = FALSE;
97 if (FeaturePcdGet (PcdCsmEnable)) {
98 DEBUG ((
99 DEBUG_INFO,
100 "%a: SMRAM at default SMBASE not checked due to CSM\n",
101 __FUNCTION__
102 ));
103 } else {
104 UINTN CtlReg;
105 UINT8 CtlRegVal;
106
107 CtlReg = DRAMC_REGISTER_Q35 (MCH_DEFAULT_SMBASE_CTL);
108 PciWrite8 (CtlReg, MCH_DEFAULT_SMBASE_QUERY);
109 CtlRegVal = PciRead8 (CtlReg);
110 mPlatformInfoHob.Q35SmramAtDefaultSmbase = (BOOLEAN)(CtlRegVal ==
111 MCH_DEFAULT_SMBASE_IN_RAM);
112 DEBUG ((
113 DEBUG_INFO,
114 "%a: SMRAM at default SMBASE %a\n",
115 __FUNCTION__,
116 mPlatformInfoHob.Q35SmramAtDefaultSmbase ? "found" : "not found"
117 ));
118 }
119
120 PcdStatus = PcdSetBoolS (
121 PcdQ35SmramAtDefaultSmbase,
122 mPlatformInfoHob.Q35SmramAtDefaultSmbase
123 );
124 ASSERT_RETURN_ERROR (PcdStatus);
125 }
126
127 VOID
128 QemuUc32BaseInitialization (
129 IN OUT EFI_HOB_PLATFORM_INFO *PlatformInfoHob
130 )
131 {
132 UINT32 LowerMemorySize;
133
134 if (PlatformInfoHob->HostBridgeDevId == 0xffff /* microvm */) {
135 return;
136 }
137
138 if (PlatformInfoHob->HostBridgeDevId == INTEL_Q35_MCH_DEVICE_ID) {
139 //
140 // On q35, the 32-bit area that we'll mark as UC, through variable MTRRs,
141 // starts at PcdPciExpressBaseAddress. The platform DSC is responsible for
142 // setting PcdPciExpressBaseAddress such that describing the
143 // [PcdPciExpressBaseAddress, 4GB) range require a very small number of
144 // variable MTRRs (preferably 1 or 2).
145 //
146 ASSERT (FixedPcdGet64 (PcdPciExpressBaseAddress) <= MAX_UINT32);
147 PlatformInfoHob->Uc32Base = (UINT32)FixedPcdGet64 (PcdPciExpressBaseAddress);
148 return;
149 }
150
151 if (PlatformInfoHob->HostBridgeDevId == CLOUDHV_DEVICE_ID) {
152 PlatformInfoHob->Uc32Size = CLOUDHV_MMIO_HOLE_SIZE;
153 PlatformInfoHob->Uc32Base = CLOUDHV_MMIO_HOLE_ADDRESS;
154 return;
155 }
156
157 ASSERT (PlatformInfoHob->HostBridgeDevId == INTEL_82441_DEVICE_ID);
158 //
159 // On i440fx, start with the [LowerMemorySize, 4GB) range. Make sure one
160 // variable MTRR suffices by truncating the size to a whole power of two,
161 // while keeping the end affixed to 4GB. This will round the base up.
162 //
163 LowerMemorySize = GetSystemMemorySizeBelow4gb (PlatformInfoHob);
164 PlatformInfoHob->Uc32Size = GetPowerOfTwo32 ((UINT32)(SIZE_4GB - LowerMemorySize));
165 PlatformInfoHob->Uc32Base = (UINT32)(SIZE_4GB - PlatformInfoHob->Uc32Size);
166 //
167 // Assuming that LowerMemorySize is at least 1 byte, Uc32Size is at most 2GB.
168 // Therefore mQemuUc32Base is at least 2GB.
169 //
170 ASSERT (PlatformInfoHob->Uc32Base >= BASE_2GB);
171
172 if (PlatformInfoHob->Uc32Base != LowerMemorySize) {
173 DEBUG ((
174 DEBUG_VERBOSE,
175 "%a: rounded UC32 base from 0x%x up to 0x%x, for "
176 "an UC32 size of 0x%x\n",
177 __FUNCTION__,
178 LowerMemorySize,
179 PlatformInfoHob->Uc32Base,
180 PlatformInfoHob->Uc32Size
181 ));
182 }
183 }
184
185 /**
186 Iterate over the RAM entries in QEMU's fw_cfg E820 RAM map that start outside
187 of the 32-bit address range.
188
189 Find the highest exclusive >=4GB RAM address, or produce memory resource
190 descriptor HOBs for RAM entries that start at or above 4GB.
191
192 @param[out] MaxAddress If MaxAddress is NULL, then ScanOrAdd64BitE820Ram()
193 produces memory resource descriptor HOBs for RAM
194 entries that start at or above 4GB.
195
196 Otherwise, MaxAddress holds the highest exclusive
197 >=4GB RAM address on output. If QEMU's fw_cfg E820
198 RAM map contains no RAM entry that starts outside of
199 the 32-bit address range, then MaxAddress is exactly
200 4GB on output.
201
202 @retval EFI_SUCCESS The fw_cfg E820 RAM map was found and processed.
203
204 @retval EFI_PROTOCOL_ERROR The RAM map was found, but its size wasn't a
205 whole multiple of sizeof(EFI_E820_ENTRY64). No
206 RAM entry was processed.
207
208 @return Error codes from QemuFwCfgFindFile(). No RAM
209 entry was processed.
210 **/
211 STATIC
212 EFI_STATUS
213 ScanOrAdd64BitE820Ram (
214 IN BOOLEAN AddHighHob,
215 OUT UINT64 *LowMemory OPTIONAL,
216 OUT UINT64 *MaxAddress OPTIONAL
217 )
218 {
219 EFI_STATUS Status;
220 FIRMWARE_CONFIG_ITEM FwCfgItem;
221 UINTN FwCfgSize;
222 EFI_E820_ENTRY64 E820Entry;
223 UINTN Processed;
224
225 Status = QemuFwCfgFindFile ("etc/e820", &FwCfgItem, &FwCfgSize);
226 if (EFI_ERROR (Status)) {
227 return Status;
228 }
229
230 if (FwCfgSize % sizeof E820Entry != 0) {
231 return EFI_PROTOCOL_ERROR;
232 }
233
234 if (LowMemory != NULL) {
235 *LowMemory = 0;
236 }
237
238 if (MaxAddress != NULL) {
239 *MaxAddress = BASE_4GB;
240 }
241
242 QemuFwCfgSelectItem (FwCfgItem);
243 for (Processed = 0; Processed < FwCfgSize; Processed += sizeof E820Entry) {
244 QemuFwCfgReadBytes (sizeof E820Entry, &E820Entry);
245 DEBUG ((
246 DEBUG_VERBOSE,
247 "%a: Base=0x%Lx Length=0x%Lx Type=%u\n",
248 __FUNCTION__,
249 E820Entry.BaseAddr,
250 E820Entry.Length,
251 E820Entry.Type
252 ));
253 if (E820Entry.Type == EfiAcpiAddressRangeMemory) {
254 if (AddHighHob && (E820Entry.BaseAddr >= BASE_4GB)) {
255 UINT64 Base;
256 UINT64 End;
257
258 //
259 // Round up the start address, and round down the end address.
260 //
261 Base = ALIGN_VALUE (E820Entry.BaseAddr, (UINT64)EFI_PAGE_SIZE);
262 End = (E820Entry.BaseAddr + E820Entry.Length) &
263 ~(UINT64)EFI_PAGE_MASK;
264 if (Base < End) {
265 PlatformAddMemoryRangeHob (Base, End);
266 DEBUG ((
267 DEBUG_VERBOSE,
268 "%a: PlatformAddMemoryRangeHob [0x%Lx, 0x%Lx)\n",
269 __FUNCTION__,
270 Base,
271 End
272 ));
273 }
274 }
275
276 if (MaxAddress || LowMemory) {
277 UINT64 Candidate;
278
279 Candidate = E820Entry.BaseAddr + E820Entry.Length;
280 if (MaxAddress && (Candidate > *MaxAddress)) {
281 *MaxAddress = Candidate;
282 DEBUG ((
283 DEBUG_VERBOSE,
284 "%a: MaxAddress=0x%Lx\n",
285 __FUNCTION__,
286 *MaxAddress
287 ));
288 }
289
290 if (LowMemory && (Candidate > *LowMemory) && (Candidate < BASE_4GB)) {
291 *LowMemory = Candidate;
292 DEBUG ((
293 DEBUG_VERBOSE,
294 "%a: LowMemory=0x%Lx\n",
295 __FUNCTION__,
296 *LowMemory
297 ));
298 }
299 }
300 }
301 }
302
303 return EFI_SUCCESS;
304 }
305
306 /**
307 Returns PVH memmap
308
309 @param Entries Pointer to PVH memmap
310 @param Count Number of entries
311
312 @return EFI_STATUS
313 **/
314 EFI_STATUS
315 GetPvhMemmapEntries (
316 struct hvm_memmap_table_entry **Entries,
317 UINT32 *Count
318 )
319 {
320 UINT32 *PVHResetVectorData;
321 struct hvm_start_info *pvh_start_info;
322
323 PVHResetVectorData = (VOID *)(UINTN)PcdGet32 (PcdXenPvhStartOfDayStructPtr);
324 if (PVHResetVectorData == 0) {
325 return EFI_NOT_FOUND;
326 }
327
328 pvh_start_info = (struct hvm_start_info *)(UINTN)PVHResetVectorData[0];
329
330 *Entries = (struct hvm_memmap_table_entry *)(UINTN)pvh_start_info->memmap_paddr;
331 *Count = pvh_start_info->memmap_entries;
332
333 return EFI_SUCCESS;
334 }
335
336 STATIC
337 UINT64
338 GetHighestSystemMemoryAddressFromPvhMemmap (
339 BOOLEAN Below4gb
340 )
341 {
342 struct hvm_memmap_table_entry *Memmap;
343 UINT32 MemmapEntriesCount;
344 struct hvm_memmap_table_entry *Entry;
345 EFI_STATUS Status;
346 UINT32 Loop;
347 UINT64 HighestAddress;
348 UINT64 EntryEnd;
349
350 HighestAddress = 0;
351
352 Status = GetPvhMemmapEntries (&Memmap, &MemmapEntriesCount);
353 ASSERT_EFI_ERROR (Status);
354
355 for (Loop = 0; Loop < MemmapEntriesCount; Loop++) {
356 Entry = Memmap + Loop;
357 EntryEnd = Entry->addr + Entry->size;
358
359 if ((Entry->type == XEN_HVM_MEMMAP_TYPE_RAM) &&
360 (EntryEnd > HighestAddress))
361 {
362 if (Below4gb && (EntryEnd <= BASE_4GB)) {
363 HighestAddress = EntryEnd;
364 } else if (!Below4gb && (EntryEnd >= BASE_4GB)) {
365 HighestAddress = EntryEnd;
366 }
367 }
368 }
369
370 return HighestAddress;
371 }
372
373 UINT32
374 GetSystemMemorySizeBelow4gb (
375 IN EFI_HOB_PLATFORM_INFO *PlatformInfoHob
376 )
377 {
378 EFI_STATUS Status;
379 UINT64 LowerMemorySize = 0;
380 UINT8 Cmos0x34;
381 UINT8 Cmos0x35;
382
383 if (PlatformInfoHob->HostBridgeDevId == CLOUDHV_DEVICE_ID) {
384 // Get the information from PVH memmap
385 return (UINT32)GetHighestSystemMemoryAddressFromPvhMemmap (TRUE);
386 }
387
388 Status = ScanOrAdd64BitE820Ram (FALSE, &LowerMemorySize, NULL);
389 if ((Status == EFI_SUCCESS) && (LowerMemorySize > 0)) {
390 return (UINT32)LowerMemorySize;
391 }
392
393 //
394 // CMOS 0x34/0x35 specifies the system memory above 16 MB.
395 // * CMOS(0x35) is the high byte
396 // * CMOS(0x34) is the low byte
397 // * The size is specified in 64kb chunks
398 // * Since this is memory above 16MB, the 16MB must be added
399 // into the calculation to get the total memory size.
400 //
401
402 Cmos0x34 = (UINT8)PlatformCmosRead8 (0x34);
403 Cmos0x35 = (UINT8)PlatformCmosRead8 (0x35);
404
405 return (UINT32)(((UINTN)((Cmos0x35 << 8) + Cmos0x34) << 16) + SIZE_16MB);
406 }
407
408 STATIC
409 UINT64
410 GetSystemMemorySizeAbove4gb (
411 )
412 {
413 UINT32 Size;
414 UINTN CmosIndex;
415
416 //
417 // CMOS 0x5b-0x5d specifies the system memory above 4GB MB.
418 // * CMOS(0x5d) is the most significant size byte
419 // * CMOS(0x5c) is the middle size byte
420 // * CMOS(0x5b) is the least significant size byte
421 // * The size is specified in 64kb chunks
422 //
423
424 Size = 0;
425 for (CmosIndex = 0x5d; CmosIndex >= 0x5b; CmosIndex--) {
426 Size = (UINT32)(Size << 8) + (UINT32)PlatformCmosRead8 (CmosIndex);
427 }
428
429 return LShiftU64 (Size, 16);
430 }
431
432 /**
433 Return the highest address that DXE could possibly use, plus one.
434 **/
435 STATIC
436 UINT64
437 GetFirstNonAddress (
438 IN OUT EFI_HOB_PLATFORM_INFO *PlatformInfoHob
439 )
440 {
441 UINT64 FirstNonAddress;
442 UINT32 FwCfgPciMmio64Mb;
443 EFI_STATUS Status;
444 FIRMWARE_CONFIG_ITEM FwCfgItem;
445 UINTN FwCfgSize;
446 UINT64 HotPlugMemoryEnd;
447 RETURN_STATUS PcdStatus;
448
449 //
450 // set FirstNonAddress to suppress incorrect compiler/analyzer warnings
451 //
452 FirstNonAddress = 0;
453
454 //
455 // If QEMU presents an E820 map, then get the highest exclusive >=4GB RAM
456 // address from it. This can express an address >= 4GB+1TB.
457 //
458 // Otherwise, get the flat size of the memory above 4GB from the CMOS (which
459 // can only express a size smaller than 1TB), and add it to 4GB.
460 //
461 Status = ScanOrAdd64BitE820Ram (FALSE, NULL, &FirstNonAddress);
462 if (EFI_ERROR (Status)) {
463 FirstNonAddress = BASE_4GB + GetSystemMemorySizeAbove4gb ();
464 }
465
466 //
467 // If DXE is 32-bit, then we're done; PciBusDxe will degrade 64-bit MMIO
468 // resources to 32-bit anyway. See DegradeResource() in
469 // "PciResourceSupport.c".
470 //
471 #ifdef MDE_CPU_IA32
472 if (!FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {
473 return FirstNonAddress;
474 }
475
476 #endif
477
478 //
479 // Otherwise, in order to calculate the highest address plus one, we must
480 // consider the 64-bit PCI host aperture too. Fetch the default size.
481 //
482 PlatformInfoHob->PcdPciMmio64Size = PcdGet64 (PcdPciMmio64Size);
483
484 //
485 // See if the user specified the number of megabytes for the 64-bit PCI host
486 // aperture. Accept an aperture size up to 16TB.
487 //
488 // As signaled by the "X-" prefix, this knob is experimental, and might go
489 // away at any time.
490 //
491 Status = QemuFwCfgParseUint32 (
492 "opt/ovmf/X-PciMmio64Mb",
493 FALSE,
494 &FwCfgPciMmio64Mb
495 );
496 switch (Status) {
497 case EFI_UNSUPPORTED:
498 case EFI_NOT_FOUND:
499 break;
500 case EFI_SUCCESS:
501 if (FwCfgPciMmio64Mb <= 0x1000000) {
502 PlatformInfoHob->PcdPciMmio64Size = LShiftU64 (FwCfgPciMmio64Mb, 20);
503 break;
504 }
505
506 //
507 // fall through
508 //
509 default:
510 DEBUG ((
511 DEBUG_WARN,
512 "%a: ignoring malformed 64-bit PCI host aperture size from fw_cfg\n",
513 __FUNCTION__
514 ));
515 break;
516 }
517
518 if (PlatformInfoHob->PcdPciMmio64Size == 0) {
519 if (PlatformInfoHob->BootMode != BOOT_ON_S3_RESUME) {
520 DEBUG ((
521 DEBUG_INFO,
522 "%a: disabling 64-bit PCI host aperture\n",
523 __FUNCTION__
524 ));
525 PcdStatus = PcdSet64S (PcdPciMmio64Size, 0);
526 ASSERT_RETURN_ERROR (PcdStatus);
527 }
528
529 //
530 // There's nothing more to do; the amount of memory above 4GB fully
531 // determines the highest address plus one. The memory hotplug area (see
532 // below) plays no role for the firmware in this case.
533 //
534 return FirstNonAddress;
535 }
536
537 //
538 // The "etc/reserved-memory-end" fw_cfg file, when present, contains an
539 // absolute, exclusive end address for the memory hotplug area. This area
540 // starts right at the end of the memory above 4GB. The 64-bit PCI host
541 // aperture must be placed above it.
542 //
543 Status = QemuFwCfgFindFile (
544 "etc/reserved-memory-end",
545 &FwCfgItem,
546 &FwCfgSize
547 );
548 if (!EFI_ERROR (Status) && (FwCfgSize == sizeof HotPlugMemoryEnd)) {
549 QemuFwCfgSelectItem (FwCfgItem);
550 QemuFwCfgReadBytes (FwCfgSize, &HotPlugMemoryEnd);
551 DEBUG ((
552 DEBUG_VERBOSE,
553 "%a: HotPlugMemoryEnd=0x%Lx\n",
554 __FUNCTION__,
555 HotPlugMemoryEnd
556 ));
557
558 ASSERT (HotPlugMemoryEnd >= FirstNonAddress);
559 FirstNonAddress = HotPlugMemoryEnd;
560 }
561
562 //
563 // SeaBIOS aligns both boundaries of the 64-bit PCI host aperture to 1GB, so
564 // that the host can map it with 1GB hugepages. Follow suit.
565 //
566 PlatformInfoHob->PcdPciMmio64Base = ALIGN_VALUE (FirstNonAddress, (UINT64)SIZE_1GB);
567 PlatformInfoHob->PcdPciMmio64Size = ALIGN_VALUE (PlatformInfoHob->PcdPciMmio64Size, (UINT64)SIZE_1GB);
568
569 //
570 // The 64-bit PCI host aperture should also be "naturally" aligned. The
571 // alignment is determined by rounding the size of the aperture down to the
572 // next smaller or equal power of two. That is, align the aperture by the
573 // largest BAR size that can fit into it.
574 //
575 PlatformInfoHob->PcdPciMmio64Base = ALIGN_VALUE (PlatformInfoHob->PcdPciMmio64Base, GetPowerOfTwo64 (PlatformInfoHob->PcdPciMmio64Size));
576
577 if (PlatformInfoHob->BootMode != BOOT_ON_S3_RESUME) {
578 //
579 // The core PciHostBridgeDxe driver will automatically add this range to
580 // the GCD memory space map through our PciHostBridgeLib instance; here we
581 // only need to set the PCDs.
582 //
583 PcdStatus = PcdSet64S (PcdPciMmio64Base, PlatformInfoHob->PcdPciMmio64Base);
584 ASSERT_RETURN_ERROR (PcdStatus);
585 PcdStatus = PcdSet64S (PcdPciMmio64Size, PlatformInfoHob->PcdPciMmio64Size);
586 ASSERT_RETURN_ERROR (PcdStatus);
587
588 DEBUG ((
589 DEBUG_INFO,
590 "%a: Pci64Base=0x%Lx Pci64Size=0x%Lx\n",
591 __FUNCTION__,
592 PlatformInfoHob->PcdPciMmio64Base,
593 PlatformInfoHob->PcdPciMmio64Size
594 ));
595 }
596
597 //
598 // The useful address space ends with the 64-bit PCI host aperture.
599 //
600 FirstNonAddress = PlatformInfoHob->PcdPciMmio64Base + PlatformInfoHob->PcdPciMmio64Size;
601 return FirstNonAddress;
602 }
603
604 /**
605 Initialize the mPhysMemAddressWidth variable, based on guest RAM size.
606 **/
607 VOID
608 AddressWidthInitialization (
609 IN OUT EFI_HOB_PLATFORM_INFO *PlatformInfoHob
610 )
611 {
612 UINT64 FirstNonAddress;
613 UINT8 PhysMemAddressWidth;
614
615 //
616 // As guest-physical memory size grows, the permanent PEI RAM requirements
617 // are dominated by the identity-mapping page tables built by the DXE IPL.
618 // The DXL IPL keys off of the physical address bits advertized in the CPU
619 // HOB. To conserve memory, we calculate the minimum address width here.
620 //
621 FirstNonAddress = GetFirstNonAddress (PlatformInfoHob);
622 PhysMemAddressWidth = (UINT8)HighBitSet64 (FirstNonAddress);
623
624 //
625 // If FirstNonAddress is not an integral power of two, then we need an
626 // additional bit.
627 //
628 if ((FirstNonAddress & (FirstNonAddress - 1)) != 0) {
629 ++PhysMemAddressWidth;
630 }
631
632 //
633 // The minimum address width is 36 (covers up to and excluding 64 GB, which
634 // is the maximum for Ia32 + PAE). The theoretical architecture maximum for
635 // X64 long mode is 52 bits, but the DXE IPL clamps that down to 48 bits. We
636 // can simply assert that here, since 48 bits are good enough for 256 TB.
637 //
638 if (PhysMemAddressWidth <= 36) {
639 PhysMemAddressWidth = 36;
640 }
641
642 ASSERT (PhysMemAddressWidth <= 48);
643
644 PlatformInfoHob->FirstNonAddress = FirstNonAddress;
645 PlatformInfoHob->PhysMemAddressWidth = PhysMemAddressWidth;
646 }
647
648 /**
649 Calculate the cap for the permanent PEI memory.
650 **/
651 STATIC
652 UINT32
653 GetPeiMemoryCap (
654 VOID
655 )
656 {
657 BOOLEAN Page1GSupport;
658 UINT32 RegEax;
659 UINT32 RegEdx;
660 UINT32 Pml4Entries;
661 UINT32 PdpEntries;
662 UINTN TotalPages;
663
664 //
665 // If DXE is 32-bit, then just return the traditional 64 MB cap.
666 //
667 #ifdef MDE_CPU_IA32
668 if (!FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {
669 return SIZE_64MB;
670 }
671
672 #endif
673
674 //
675 // Dependent on physical address width, PEI memory allocations can be
676 // dominated by the page tables built for 64-bit DXE. So we key the cap off
677 // of those. The code below is based on CreateIdentityMappingPageTables() in
678 // "MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c".
679 //
680 Page1GSupport = FALSE;
681 if (PcdGetBool (PcdUse1GPageTable)) {
682 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
683 if (RegEax >= 0x80000001) {
684 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
685 if ((RegEdx & BIT26) != 0) {
686 Page1GSupport = TRUE;
687 }
688 }
689 }
690
691 if (mPlatformInfoHob.PhysMemAddressWidth <= 39) {
692 Pml4Entries = 1;
693 PdpEntries = 1 << (mPlatformInfoHob.PhysMemAddressWidth - 30);
694 ASSERT (PdpEntries <= 0x200);
695 } else {
696 Pml4Entries = 1 << (mPlatformInfoHob.PhysMemAddressWidth - 39);
697 ASSERT (Pml4Entries <= 0x200);
698 PdpEntries = 512;
699 }
700
701 TotalPages = Page1GSupport ? Pml4Entries + 1 :
702 (PdpEntries + 1) * Pml4Entries + 1;
703 ASSERT (TotalPages <= 0x40201);
704
705 //
706 // Add 64 MB for miscellaneous allocations. Note that for
707 // mPhysMemAddressWidth values close to 36, the cap will actually be
708 // dominated by this increment.
709 //
710 return (UINT32)(EFI_PAGES_TO_SIZE (TotalPages) + SIZE_64MB);
711 }
712
713 /**
714 Publish PEI core memory
715
716 @return EFI_SUCCESS The PEIM initialized successfully.
717
718 **/
719 EFI_STATUS
720 PublishPeiMemory (
721 VOID
722 )
723 {
724 EFI_STATUS Status;
725 EFI_PHYSICAL_ADDRESS MemoryBase;
726 UINT64 MemorySize;
727 UINT32 LowerMemorySize;
728 UINT32 PeiMemoryCap;
729 UINT32 S3AcpiReservedMemoryBase;
730 UINT32 S3AcpiReservedMemorySize;
731
732 LowerMemorySize = GetSystemMemorySizeBelow4gb (&mPlatformInfoHob);
733 if (mPlatformInfoHob.SmmSmramRequire) {
734 //
735 // TSEG is chipped from the end of low RAM
736 //
737 LowerMemorySize -= mPlatformInfoHob.Q35TsegMbytes * SIZE_1MB;
738 }
739
740 S3AcpiReservedMemoryBase = 0;
741 S3AcpiReservedMemorySize = 0;
742
743 //
744 // If S3 is supported, then the S3 permanent PEI memory is placed next,
745 // downwards. Its size is primarily dictated by CpuMpPei. The formula below
746 // is an approximation.
747 //
748 if (mPlatformInfoHob.S3Supported) {
749 S3AcpiReservedMemorySize = SIZE_512KB +
750 mPlatformInfoHob.PcdCpuMaxLogicalProcessorNumber *
751 PcdGet32 (PcdCpuApStackSize);
752 S3AcpiReservedMemoryBase = LowerMemorySize - S3AcpiReservedMemorySize;
753 LowerMemorySize = S3AcpiReservedMemoryBase;
754 }
755
756 mPlatformInfoHob.S3AcpiReservedMemoryBase = S3AcpiReservedMemoryBase;
757 mPlatformInfoHob.S3AcpiReservedMemorySize = S3AcpiReservedMemorySize;
758
759 if (mPlatformInfoHob.BootMode == BOOT_ON_S3_RESUME) {
760 MemoryBase = S3AcpiReservedMemoryBase;
761 MemorySize = S3AcpiReservedMemorySize;
762 } else {
763 PeiMemoryCap = GetPeiMemoryCap ();
764 DEBUG ((
765 DEBUG_INFO,
766 "%a: mPhysMemAddressWidth=%d PeiMemoryCap=%u KB\n",
767 __FUNCTION__,
768 mPlatformInfoHob.PhysMemAddressWidth,
769 PeiMemoryCap >> 10
770 ));
771
772 //
773 // Determine the range of memory to use during PEI
774 //
775 // Technically we could lay the permanent PEI RAM over SEC's temporary
776 // decompression and scratch buffer even if "secure S3" is needed, since
777 // their lifetimes don't overlap. However, PeiFvInitialization() will cover
778 // RAM up to PcdOvmfDecompressionScratchEnd with an EfiACPIMemoryNVS memory
779 // allocation HOB, and other allocations served from the permanent PEI RAM
780 // shouldn't overlap with that HOB.
781 //
782 MemoryBase = mPlatformInfoHob.S3Supported && mPlatformInfoHob.SmmSmramRequire ?
783 PcdGet32 (PcdOvmfDecompressionScratchEnd) :
784 PcdGet32 (PcdOvmfDxeMemFvBase) + PcdGet32 (PcdOvmfDxeMemFvSize);
785 MemorySize = LowerMemorySize - MemoryBase;
786 if (MemorySize > PeiMemoryCap) {
787 MemoryBase = LowerMemorySize - PeiMemoryCap;
788 MemorySize = PeiMemoryCap;
789 }
790 }
791
792 //
793 // MEMFD_BASE_ADDRESS separates the SMRAM at the default SMBASE from the
794 // normal boot permanent PEI RAM. Regarding the S3 boot path, the S3
795 // permanent PEI RAM is located even higher.
796 //
797 if (mPlatformInfoHob.SmmSmramRequire && mPlatformInfoHob.Q35SmramAtDefaultSmbase) {
798 ASSERT (SMM_DEFAULT_SMBASE + MCH_DEFAULT_SMBASE_SIZE <= MemoryBase);
799 }
800
801 //
802 // Publish this memory to the PEI Core
803 //
804 Status = PublishSystemMemory (MemoryBase, MemorySize);
805 ASSERT_EFI_ERROR (Status);
806
807 return Status;
808 }
809
810 STATIC
811 VOID
812 QemuInitializeRamBelow1gb (
813 IN EFI_HOB_PLATFORM_INFO *PlatformInfoHob
814 )
815 {
816 if (PlatformInfoHob->SmmSmramRequire && PlatformInfoHob->Q35SmramAtDefaultSmbase) {
817 PlatformAddMemoryRangeHob (0, SMM_DEFAULT_SMBASE);
818 PlatformAddReservedMemoryBaseSizeHob (
819 SMM_DEFAULT_SMBASE,
820 MCH_DEFAULT_SMBASE_SIZE,
821 TRUE /* Cacheable */
822 );
823 STATIC_ASSERT (
824 SMM_DEFAULT_SMBASE + MCH_DEFAULT_SMBASE_SIZE < BASE_512KB + BASE_128KB,
825 "end of SMRAM at default SMBASE ends at, or exceeds, 640KB"
826 );
827 PlatformAddMemoryRangeHob (
828 SMM_DEFAULT_SMBASE + MCH_DEFAULT_SMBASE_SIZE,
829 BASE_512KB + BASE_128KB
830 );
831 } else {
832 PlatformAddMemoryRangeHob (0, BASE_512KB + BASE_128KB);
833 }
834 }
835
836 /**
837 Peform Memory Detection for QEMU / KVM
838
839 **/
840 STATIC
841 VOID
842 QemuInitializeRam (
843 IN EFI_HOB_PLATFORM_INFO *PlatformInfoHob
844 )
845 {
846 UINT64 LowerMemorySize;
847 UINT64 UpperMemorySize;
848 MTRR_SETTINGS MtrrSettings;
849 EFI_STATUS Status;
850
851 DEBUG ((DEBUG_INFO, "%a called\n", __FUNCTION__));
852
853 //
854 // Determine total memory size available
855 //
856 LowerMemorySize = GetSystemMemorySizeBelow4gb (PlatformInfoHob);
857
858 if (PlatformInfoHob->BootMode == BOOT_ON_S3_RESUME) {
859 //
860 // Create the following memory HOB as an exception on the S3 boot path.
861 //
862 // Normally we'd create memory HOBs only on the normal boot path. However,
863 // CpuMpPei specifically needs such a low-memory HOB on the S3 path as
864 // well, for "borrowing" a subset of it temporarily, for the AP startup
865 // vector.
866 //
867 // CpuMpPei saves the original contents of the borrowed area in permanent
868 // PEI RAM, in a backup buffer allocated with the normal PEI services.
869 // CpuMpPei restores the original contents ("returns" the borrowed area) at
870 // End-of-PEI. End-of-PEI in turn is emitted by S3Resume2Pei before
871 // transferring control to the OS's wakeup vector in the FACS.
872 //
873 // We expect any other PEIMs that "borrow" memory similarly to CpuMpPei to
874 // restore the original contents. Furthermore, we expect all such PEIMs
875 // (CpuMpPei included) to claim the borrowed areas by producing memory
876 // allocation HOBs, and to honor preexistent memory allocation HOBs when
877 // looking for an area to borrow.
878 //
879 QemuInitializeRamBelow1gb (PlatformInfoHob);
880 } else {
881 //
882 // Create memory HOBs
883 //
884 QemuInitializeRamBelow1gb (PlatformInfoHob);
885
886 if (PlatformInfoHob->SmmSmramRequire) {
887 UINT32 TsegSize;
888
889 TsegSize = PlatformInfoHob->Q35TsegMbytes * SIZE_1MB;
890 PlatformAddMemoryRangeHob (BASE_1MB, LowerMemorySize - TsegSize);
891 PlatformAddReservedMemoryBaseSizeHob (
892 LowerMemorySize - TsegSize,
893 TsegSize,
894 TRUE
895 );
896 } else {
897 PlatformAddMemoryRangeHob (BASE_1MB, LowerMemorySize);
898 }
899
900 //
901 // If QEMU presents an E820 map, then create memory HOBs for the >=4GB RAM
902 // entries. Otherwise, create a single memory HOB with the flat >=4GB
903 // memory size read from the CMOS.
904 //
905 Status = ScanOrAdd64BitE820Ram (TRUE, NULL, NULL);
906 if (EFI_ERROR (Status)) {
907 UpperMemorySize = GetSystemMemorySizeAbove4gb ();
908 if (UpperMemorySize != 0) {
909 PlatformAddMemoryBaseSizeHob (BASE_4GB, UpperMemorySize);
910 }
911 }
912 }
913
914 //
915 // We'd like to keep the following ranges uncached:
916 // - [640 KB, 1 MB)
917 // - [LowerMemorySize, 4 GB)
918 //
919 // Everything else should be WB. Unfortunately, programming the inverse (ie.
920 // keeping the default UC, and configuring the complement set of the above as
921 // WB) is not reliable in general, because the end of the upper RAM can have
922 // practically any alignment, and we may not have enough variable MTRRs to
923 // cover it exactly.
924 //
925 if (IsMtrrSupported () && (PlatformInfoHob->HostBridgeDevId != CLOUDHV_DEVICE_ID)) {
926 MtrrGetAllMtrrs (&MtrrSettings);
927
928 //
929 // MTRRs disabled, fixed MTRRs disabled, default type is uncached
930 //
931 ASSERT ((MtrrSettings.MtrrDefType & BIT11) == 0);
932 ASSERT ((MtrrSettings.MtrrDefType & BIT10) == 0);
933 ASSERT ((MtrrSettings.MtrrDefType & 0xFF) == 0);
934
935 //
936 // flip default type to writeback
937 //
938 SetMem (&MtrrSettings.Fixed, sizeof MtrrSettings.Fixed, 0x06);
939 ZeroMem (&MtrrSettings.Variables, sizeof MtrrSettings.Variables);
940 MtrrSettings.MtrrDefType |= BIT11 | BIT10 | 6;
941 MtrrSetAllMtrrs (&MtrrSettings);
942
943 //
944 // Set memory range from 640KB to 1MB to uncacheable
945 //
946 Status = MtrrSetMemoryAttribute (
947 BASE_512KB + BASE_128KB,
948 BASE_1MB - (BASE_512KB + BASE_128KB),
949 CacheUncacheable
950 );
951 ASSERT_EFI_ERROR (Status);
952
953 //
954 // Set the memory range from the start of the 32-bit MMIO area (32-bit PCI
955 // MMIO aperture on i440fx, PCIEXBAR on q35) to 4GB as uncacheable.
956 //
957 Status = MtrrSetMemoryAttribute (
958 PlatformInfoHob->Uc32Base,
959 SIZE_4GB - PlatformInfoHob->Uc32Base,
960 CacheUncacheable
961 );
962 ASSERT_EFI_ERROR (Status);
963 }
964 }
965
966 /**
967 Publish system RAM and reserve memory regions
968
969 **/
970 VOID
971 InitializeRamRegions (
972 IN EFI_HOB_PLATFORM_INFO *PlatformInfoHob
973 )
974 {
975 QemuInitializeRam (PlatformInfoHob);
976
977 SevInitializeRam ();
978
979 if (PlatformInfoHob->S3Supported && (PlatformInfoHob->BootMode != BOOT_ON_S3_RESUME)) {
980 //
981 // This is the memory range that will be used for PEI on S3 resume
982 //
983 BuildMemoryAllocationHob (
984 PlatformInfoHob->S3AcpiReservedMemoryBase,
985 PlatformInfoHob->S3AcpiReservedMemorySize,
986 EfiACPIMemoryNVS
987 );
988
989 //
990 // Cover the initial RAM area used as stack and temporary PEI heap.
991 //
992 // This is reserved as ACPI NVS so it can be used on S3 resume.
993 //
994 BuildMemoryAllocationHob (
995 PcdGet32 (PcdOvmfSecPeiTempRamBase),
996 PcdGet32 (PcdOvmfSecPeiTempRamSize),
997 EfiACPIMemoryNVS
998 );
999
1000 //
1001 // SEC stores its table of GUIDed section handlers here.
1002 //
1003 BuildMemoryAllocationHob (
1004 PcdGet64 (PcdGuidedExtractHandlerTableAddress),
1005 PcdGet32 (PcdGuidedExtractHandlerTableSize),
1006 EfiACPIMemoryNVS
1007 );
1008
1009 #ifdef MDE_CPU_X64
1010 //
1011 // Reserve the initial page tables built by the reset vector code.
1012 //
1013 // Since this memory range will be used by the Reset Vector on S3
1014 // resume, it must be reserved as ACPI NVS.
1015 //
1016 BuildMemoryAllocationHob (
1017 (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfSecPageTablesBase),
1018 (UINT64)(UINTN)PcdGet32 (PcdOvmfSecPageTablesSize),
1019 EfiACPIMemoryNVS
1020 );
1021
1022 if (PlatformInfoHob->SevEsIsEnabled) {
1023 //
1024 // If SEV-ES is enabled, reserve the GHCB-related memory area. This
1025 // includes the extra page table used to break down the 2MB page
1026 // mapping into 4KB page entries where the GHCB resides and the
1027 // GHCB area itself.
1028 //
1029 // Since this memory range will be used by the Reset Vector on S3
1030 // resume, it must be reserved as ACPI NVS.
1031 //
1032 BuildMemoryAllocationHob (
1033 (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfSecGhcbPageTableBase),
1034 (UINT64)(UINTN)PcdGet32 (PcdOvmfSecGhcbPageTableSize),
1035 EfiACPIMemoryNVS
1036 );
1037 BuildMemoryAllocationHob (
1038 (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfSecGhcbBase),
1039 (UINT64)(UINTN)PcdGet32 (PcdOvmfSecGhcbSize),
1040 EfiACPIMemoryNVS
1041 );
1042 BuildMemoryAllocationHob (
1043 (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfSecGhcbBackupBase),
1044 (UINT64)(UINTN)PcdGet32 (PcdOvmfSecGhcbBackupSize),
1045 EfiACPIMemoryNVS
1046 );
1047 }
1048
1049 #endif
1050 }
1051
1052 if (PlatformInfoHob->BootMode != BOOT_ON_S3_RESUME) {
1053 if (!PlatformInfoHob->SmmSmramRequire) {
1054 //
1055 // Reserve the lock box storage area
1056 //
1057 // Since this memory range will be used on S3 resume, it must be
1058 // reserved as ACPI NVS.
1059 //
1060 // If S3 is unsupported, then various drivers might still write to the
1061 // LockBox area. We ought to prevent DXE from serving allocation requests
1062 // such that they would overlap the LockBox storage.
1063 //
1064 ZeroMem (
1065 (VOID *)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageBase),
1066 (UINTN)PcdGet32 (PcdOvmfLockBoxStorageSize)
1067 );
1068 BuildMemoryAllocationHob (
1069 (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageBase),
1070 (UINT64)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageSize),
1071 PlatformInfoHob->S3Supported ? EfiACPIMemoryNVS : EfiBootServicesData
1072 );
1073 }
1074
1075 if (PlatformInfoHob->SmmSmramRequire) {
1076 UINT32 TsegSize;
1077
1078 //
1079 // Make sure the TSEG area that we reported as a reserved memory resource
1080 // cannot be used for reserved memory allocations.
1081 //
1082 TsegSize = PlatformInfoHob->Q35TsegMbytes * SIZE_1MB;
1083 BuildMemoryAllocationHob (
1084 GetSystemMemorySizeBelow4gb (PlatformInfoHob) - TsegSize,
1085 TsegSize,
1086 EfiReservedMemoryType
1087 );
1088 //
1089 // Similarly, allocate away the (already reserved) SMRAM at the default
1090 // SMBASE, if it exists.
1091 //
1092 if (PlatformInfoHob->Q35SmramAtDefaultSmbase) {
1093 BuildMemoryAllocationHob (
1094 SMM_DEFAULT_SMBASE,
1095 MCH_DEFAULT_SMBASE_SIZE,
1096 EfiReservedMemoryType
1097 );
1098 }
1099 }
1100
1101 #ifdef MDE_CPU_X64
1102 if (FixedPcdGet32 (PcdOvmfWorkAreaSize) != 0) {
1103 //
1104 // Reserve the work area.
1105 //
1106 // Since this memory range will be used by the Reset Vector on S3
1107 // resume, it must be reserved as ACPI NVS.
1108 //
1109 // If S3 is unsupported, then various drivers might still write to the
1110 // work area. We ought to prevent DXE from serving allocation requests
1111 // such that they would overlap the work area.
1112 //
1113 BuildMemoryAllocationHob (
1114 (EFI_PHYSICAL_ADDRESS)(UINTN)FixedPcdGet32 (PcdOvmfWorkAreaBase),
1115 (UINT64)(UINTN)FixedPcdGet32 (PcdOvmfWorkAreaSize),
1116 PlatformInfoHob->S3Supported ? EfiACPIMemoryNVS : EfiBootServicesData
1117 );
1118 }
1119
1120 #endif
1121 }
1122 }