]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpu: Always set RW+P bit for page table by default
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
4Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17#define PAGE_TABLE_PAGES 8\r
18#define ACC_MAX_BIT BIT3\r
19LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
20SPIN_LOCK mPFLock;\r
21BOOLEAN m1GPageTableSupport = FALSE;\r
22\r
23/**\r
24 Check if 1-GByte pages is supported by processor or not.\r
25\r
26 @retval TRUE 1-GByte pages is supported.\r
27 @retval FALSE 1-GByte pages is not supported.\r
28\r
29**/\r
30BOOLEAN\r
31Is1GPageSupport (\r
32 VOID\r
33 )\r
34{\r
35 UINT32 RegEax;\r
36 UINT32 RegEdx;\r
37\r
38 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
39 if (RegEax >= 0x80000001) {\r
40 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
41 if ((RegEdx & BIT26) != 0) {\r
42 return TRUE;\r
43 }\r
44 }\r
45 return FALSE;\r
46}\r
47\r
48/**\r
49 Set sub-entries number in entry.\r
50\r
51 @param[in, out] Entry Pointer to entry\r
52 @param[in] SubEntryNum Sub-entries number based on 0:\r
53 0 means there is 1 sub-entry under this entry\r
54 0x1ff means there is 512 sub-entries under this entry\r
55\r
56**/\r
57VOID\r
58SetSubEntriesNum (\r
59 IN OUT UINT64 *Entry,\r
60 IN UINT64 SubEntryNum\r
61 )\r
62{\r
63 //\r
64 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
65 //\r
66 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
67}\r
68\r
69/**\r
70 Return sub-entries number in entry.\r
71\r
72 @param[in] Entry Pointer to entry\r
73\r
74 @return Sub-entries number based on 0:\r
75 0 means there is 1 sub-entry under this entry\r
76 0x1ff means there is 512 sub-entries under this entry\r
77**/\r
78UINT64\r
79GetSubEntriesNum (\r
80 IN UINT64 *Entry\r
81 )\r
82{\r
83 //\r
84 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
85 //\r
86 return BitFieldRead64 (*Entry, 52, 60);\r
87}\r
88\r
89/**\r
90 Create PageTable for SMM use.\r
91\r
92 @return The address of PML4 (to set CR3).\r
93\r
94**/\r
95UINT32\r
96SmmInitPageTable (\r
97 VOID\r
98 )\r
99{\r
100 EFI_PHYSICAL_ADDRESS Pages;\r
101 UINT64 *PTEntry;\r
102 LIST_ENTRY *FreePage;\r
103 UINTN Index;\r
104 UINTN PageFaultHandlerHookAddress;\r
105 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
106\r
107 //\r
108 // Initialize spin lock\r
109 //\r
110 InitializeSpinLock (&mPFLock);\r
111\r
112 m1GPageTableSupport = Is1GPageSupport ();\r
113 //\r
114 // Generate PAE page table for the first 4GB memory space\r
115 //\r
881520ea 116 Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE);\r
427e3573
MK
117\r
118 //\r
119 // Set IA32_PG_PMNT bit to mask this entry\r
120 //\r
121 PTEntry = (UINT64*)(UINTN)Pages;\r
122 for (Index = 0; Index < 4; Index++) {\r
123 PTEntry[Index] |= IA32_PG_PMNT;\r
124 }\r
125\r
126 //\r
127 // Fill Page-Table-Level4 (PML4) entry\r
128 //\r
129 PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));\r
881520ea 130 *PTEntry = Pages + PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
131 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
132 //\r
133 // Set sub-entries number\r
134 //\r
135 SetSubEntriesNum (PTEntry, 3);\r
136\r
137 //\r
138 // Add remaining pages to page pool\r
139 //\r
140 FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));\r
141 while ((UINTN)FreePage < Pages) {\r
142 InsertTailList (&mPagePool, FreePage);\r
143 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
144 }\r
145\r
146 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
147 //\r
148 // Set own Page Fault entry instead of the default one, because SMM Profile\r
149 // feature depends on IRET instruction to do Single Step\r
150 //\r
151 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
152 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
153 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
154 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
155 IdtEntry->Bits.Reserved_0 = 0;\r
156 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
157 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
158 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
159 IdtEntry->Bits.Reserved_1 = 0;\r
160 } else {\r
161 //\r
162 // Register Smm Page Fault Handler\r
163 //\r
164 SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
165 }\r
166\r
167 //\r
168 // Additional SMM IDT initialization for SMM stack guard\r
169 //\r
170 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
171 InitializeIDTSmmStackGuard ();\r
172 }\r
173\r
174 //\r
175 // Return the address of PML4 (to set CR3)\r
176 //\r
177 return (UINT32)(UINTN)PTEntry;\r
178}\r
179\r
180/**\r
181 Set access record in entry.\r
182\r
183 @param[in, out] Entry Pointer to entry\r
184 @param[in] Acc Access record value\r
185\r
186**/\r
187VOID\r
188SetAccNum (\r
189 IN OUT UINT64 *Entry,\r
190 IN UINT64 Acc\r
191 )\r
192{\r
193 //\r
194 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
195 //\r
196 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
197}\r
198\r
199/**\r
200 Return access record in entry.\r
201\r
202 @param[in] Entry Pointer to entry\r
203\r
204 @return Access record value.\r
205\r
206**/\r
207UINT64\r
208GetAccNum (\r
209 IN UINT64 *Entry\r
210 )\r
211{\r
212 //\r
213 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
214 //\r
215 return BitFieldRead64 (*Entry, 9, 11);\r
216}\r
217\r
218/**\r
219 Return and update the access record in entry.\r
220\r
221 @param[in, out] Entry Pointer to entry\r
222\r
223 @return Access record value.\r
224\r
225**/\r
226UINT64\r
227GetAndUpdateAccNum (\r
228 IN OUT UINT64 *Entry\r
229 )\r
230{\r
231 UINT64 Acc;\r
232\r
233 Acc = GetAccNum (Entry);\r
234 if ((*Entry & IA32_PG_A) != 0) {\r
235 //\r
236 // If this entry has been accessed, clear access flag in Entry and update access record\r
237 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
238 //\r
239 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
240 SetAccNum (Entry, 0x7);\r
241 return (0x7 + ACC_MAX_BIT);\r
242 } else {\r
243 if (Acc != 0) {\r
244 //\r
245 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
246 //\r
247 SetAccNum (Entry, Acc - 1);\r
248 }\r
249 }\r
250 return Acc;\r
251}\r
252\r
253/**\r
254 Reclaim free pages for PageFault handler.\r
255\r
256 Search the whole entries tree to find the leaf entry that has the smallest\r
257 access record value. Insert the page pointed by this leaf entry into the\r
258 page pool. And check its upper entries if need to be inserted into the page\r
259 pool or not.\r
260\r
261**/\r
262VOID\r
263ReclaimPages (\r
264 VOID\r
265 )\r
266{\r
267 UINT64 *Pml4;\r
268 UINT64 *Pdpt;\r
269 UINT64 *Pdt;\r
270 UINTN Pml4Index;\r
271 UINTN PdptIndex;\r
272 UINTN PdtIndex;\r
273 UINTN MinPml4;\r
274 UINTN MinPdpt;\r
275 UINTN MinPdt;\r
276 UINT64 MinAcc;\r
277 UINT64 Acc;\r
278 UINT64 SubEntriesNum;\r
279 BOOLEAN PML4EIgnore;\r
280 BOOLEAN PDPTEIgnore;\r
281 UINT64 *ReleasePageAddress;\r
282\r
283 Pml4 = NULL;\r
284 Pdpt = NULL;\r
285 Pdt = NULL;\r
286 MinAcc = (UINT64)-1;\r
287 MinPml4 = (UINTN)-1;\r
288 MinPdpt = (UINTN)-1;\r
289 MinPdt = (UINTN)-1;\r
290 Acc = 0;\r
291 ReleasePageAddress = 0;\r
292\r
293 //\r
294 // First, find the leaf entry has the smallest access record value\r
295 //\r
296 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
297 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
298 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
299 //\r
300 // If the PML4 entry is not present or is masked, skip it\r
301 //\r
302 continue;\r
303 }\r
304 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);\r
305 PML4EIgnore = FALSE;\r
306 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
307 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
308 //\r
309 // If the PDPT entry is not present or is masked, skip it\r
310 //\r
311 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
312 //\r
313 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
314 //\r
315 PML4EIgnore = TRUE;\r
316 }\r
317 continue;\r
318 }\r
319 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
320 //\r
321 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
322 // we will not check PML4 entry more\r
323 //\r
324 PML4EIgnore = TRUE;\r
325 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);\r
326 PDPTEIgnore = FALSE;\r
327 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
328 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
329 //\r
330 // If the PD entry is not present or is masked, skip it\r
331 //\r
332 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
333 //\r
334 // If the PD entry is masked, we will not PDPT entry more\r
335 //\r
336 PDPTEIgnore = TRUE;\r
337 }\r
338 continue;\r
339 }\r
340 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
341 //\r
342 // It's not 2 MByte page table entry, it should be PD entry\r
343 // we will find the entry has the smallest access record value\r
344 //\r
345 PDPTEIgnore = TRUE;\r
346 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
347 if (Acc < MinAcc) {\r
348 //\r
349 // If the PD entry has the smallest access record value,\r
350 // save the Page address to be released\r
351 //\r
352 MinAcc = Acc;\r
353 MinPml4 = Pml4Index;\r
354 MinPdpt = PdptIndex;\r
355 MinPdt = PdtIndex;\r
356 ReleasePageAddress = Pdt + PdtIndex;\r
357 }\r
358 }\r
359 }\r
360 if (!PDPTEIgnore) {\r
361 //\r
362 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
363 // it should only has the entries point to 2 MByte Pages\r
364 //\r
365 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
366 if (Acc < MinAcc) {\r
367 //\r
368 // If the PDPT entry has the smallest access record value,\r
369 // save the Page address to be released\r
370 //\r
371 MinAcc = Acc;\r
372 MinPml4 = Pml4Index;\r
373 MinPdpt = PdptIndex;\r
374 MinPdt = (UINTN)-1;\r
375 ReleasePageAddress = Pdpt + PdptIndex;\r
376 }\r
377 }\r
378 }\r
379 }\r
380 if (!PML4EIgnore) {\r
381 //\r
382 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
383 // it should only has the entries point to 1 GByte Pages\r
384 //\r
385 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
386 if (Acc < MinAcc) {\r
387 //\r
388 // If the PML4 entry has the smallest access record value,\r
389 // save the Page address to be released\r
390 //\r
391 MinAcc = Acc;\r
392 MinPml4 = Pml4Index;\r
393 MinPdpt = (UINTN)-1;\r
394 MinPdt = (UINTN)-1;\r
395 ReleasePageAddress = Pml4 + Pml4Index;\r
396 }\r
397 }\r
398 }\r
399 //\r
400 // Make sure one PML4/PDPT/PD entry is selected\r
401 //\r
402 ASSERT (MinAcc != (UINT64)-1);\r
403\r
404 //\r
405 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
406 //\r
407 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));\r
408 *ReleasePageAddress = 0;\r
409\r
410 //\r
411 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
412 // or not\r
413 //\r
414 while (TRUE) {\r
415 if (MinPdt != (UINTN)-1) {\r
416 //\r
417 // If 4 KByte Page Table is released, check the PDPT entry\r
418 //\r
419 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);\r
420 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
421 if (SubEntriesNum == 0) {\r
422 //\r
423 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
424 // clear the Page directory entry\r
425 //\r
426 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));\r
427 Pdpt[MinPdpt] = 0;\r
428 //\r
429 // Go on checking the PML4 table\r
430 //\r
431 MinPdt = (UINTN)-1;\r
432 continue;\r
433 }\r
434 //\r
435 // Update the sub-entries filed in PDPT entry and exit\r
436 //\r
437 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
438 break;\r
439 }\r
440 if (MinPdpt != (UINTN)-1) {\r
441 //\r
442 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
443 //\r
444 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
445 if (SubEntriesNum == 0) {\r
446 //\r
447 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
448 // clear the Page directory entry\r
449 //\r
450 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));\r
451 Pml4[MinPml4] = 0;\r
452 MinPdpt = (UINTN)-1;\r
453 continue;\r
454 }\r
455 //\r
456 // Update the sub-entries filed in PML4 entry and exit\r
457 //\r
458 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
459 break;\r
460 }\r
461 //\r
462 // PLM4 table has been released before, exit it\r
463 //\r
464 break;\r
465 }\r
466}\r
467\r
468/**\r
469 Allocate free Page for PageFault handler use.\r
470\r
471 @return Page address.\r
472\r
473**/\r
474UINT64\r
475AllocPage (\r
476 VOID\r
477 )\r
478{\r
479 UINT64 RetVal;\r
480\r
481 if (IsListEmpty (&mPagePool)) {\r
482 //\r
483 // If page pool is empty, reclaim the used pages and insert one into page pool\r
484 //\r
485 ReclaimPages ();\r
486 }\r
487\r
488 //\r
489 // Get one free page and remove it from page pool\r
490 //\r
491 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
492 RemoveEntryList (mPagePool.ForwardLink);\r
493 //\r
494 // Clean this page and return\r
495 //\r
496 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
497 return RetVal;\r
498}\r
499\r
500/**\r
501 Page Fault handler for SMM use.\r
502\r
503**/\r
504VOID\r
505SmiDefaultPFHandler (\r
506 VOID\r
507 )\r
508{\r
509 UINT64 *PageTable;\r
510 UINT64 *Pml4;\r
511 UINT64 PFAddress;\r
512 UINTN StartBit;\r
513 UINTN EndBit;\r
514 UINT64 PTIndex;\r
515 UINTN Index;\r
516 SMM_PAGE_SIZE_TYPE PageSize;\r
517 UINTN NumOfPages;\r
518 UINTN PageAttribute;\r
519 EFI_STATUS Status;\r
520 UINT64 *UpperEntry;\r
521\r
522 //\r
523 // Set default SMM page attribute\r
524 //\r
525 PageSize = SmmPageSize2M;\r
526 NumOfPages = 1;\r
527 PageAttribute = 0;\r
528\r
529 EndBit = 0;\r
530 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
531 PFAddress = AsmReadCr2 ();\r
532\r
533 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
534 //\r
535 // If platform not support page table attribute, set default SMM page attribute\r
536 //\r
537 if (Status != EFI_SUCCESS) {\r
538 PageSize = SmmPageSize2M;\r
539 NumOfPages = 1;\r
540 PageAttribute = 0;\r
541 }\r
542 if (PageSize >= MaxSmmPageSizeType) {\r
543 PageSize = SmmPageSize2M;\r
544 }\r
545 if (NumOfPages > 512) {\r
546 NumOfPages = 512;\r
547 }\r
548\r
549 switch (PageSize) {\r
550 case SmmPageSize4K:\r
551 //\r
552 // BIT12 to BIT20 is Page Table index\r
553 //\r
554 EndBit = 12;\r
555 break;\r
556 case SmmPageSize2M:\r
557 //\r
558 // BIT21 to BIT29 is Page Directory index\r
559 //\r
560 EndBit = 21;\r
561 PageAttribute |= (UINTN)IA32_PG_PS;\r
562 break;\r
563 case SmmPageSize1G:\r
564 if (!m1GPageTableSupport) {\r
565 DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));\r
566 ASSERT (FALSE);\r
567 }\r
568 //\r
569 // BIT30 to BIT38 is Page Directory Pointer Table index\r
570 //\r
571 EndBit = 30;\r
572 PageAttribute |= (UINTN)IA32_PG_PS;\r
573 break;\r
574 default:\r
575 ASSERT (FALSE);\r
576 }\r
577\r
578 //\r
579 // If execute-disable is enabled, set NX bit\r
580 //\r
581 if (mXdEnabled) {\r
582 PageAttribute |= IA32_PG_NX;\r
583 }\r
584\r
585 for (Index = 0; Index < NumOfPages; Index++) {\r
586 PageTable = Pml4;\r
587 UpperEntry = NULL;\r
588 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
589 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
590 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
591 //\r
592 // If the entry is not present, allocate one page from page pool for it\r
593 //\r
881520ea 594 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
595 } else {\r
596 //\r
597 // Save the upper entry address\r
598 //\r
599 UpperEntry = PageTable + PTIndex;\r
600 }\r
601 //\r
602 // BIT9 to BIT11 of entry is used to save access record,\r
603 // initialize value is 7\r
604 //\r
605 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
606 SetAccNum (PageTable + PTIndex, 7);\r
607 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
608 }\r
609\r
610 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
611 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
612 //\r
613 // Check if the entry has already existed, this issue may occur when the different\r
614 // size page entries created under the same entry\r
615 //\r
616 DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
617 DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));\r
618 ASSERT (FALSE);\r
619 }\r
620 //\r
621 // Fill the new entry\r
622 //\r
623 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 624 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
625 if (UpperEntry != NULL) {\r
626 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
627 }\r
628 //\r
629 // Get the next page address if we need to create more page tables\r
630 //\r
631 PFAddress += (1ull << EndBit);\r
632 }\r
633}\r
634\r
635/**\r
636 ThePage Fault handler wrapper for SMM use.\r
637\r
638 @param InterruptType Defines the type of interrupt or exception that\r
639 occurred on the processor.This parameter is processor architecture specific.\r
640 @param SystemContext A pointer to the processor context when\r
641 the interrupt occurred on the processor.\r
642**/\r
643VOID\r
644EFIAPI\r
645SmiPFHandler (\r
646 IN EFI_EXCEPTION_TYPE InterruptType,\r
647 IN EFI_SYSTEM_CONTEXT SystemContext\r
648 )\r
649{\r
650 UINTN PFAddress;\r
651\r
652 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
653\r
654 AcquireSpinLock (&mPFLock);\r
655\r
656 PFAddress = AsmReadCr2 ();\r
657\r
658 //\r
659 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.\r
660 //\r
661 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
662 (PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
663 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
664 DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));\r
665 CpuDeadLoop ();\r
666 }\r
667\r
668 //\r
669 // If a page fault occurs in SMM range\r
670 //\r
671 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
672 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
673 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
674 DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
675 DEBUG_CODE (\r
676 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
677 );\r
678 CpuDeadLoop ();\r
679 }\r
680 }\r
681\r
682 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
683 SmmProfilePFHandler (\r
684 SystemContext.SystemContextX64->Rip,\r
685 SystemContext.SystemContextX64->ExceptionData\r
686 );\r
687 } else {\r
688 SmiDefaultPFHandler ();\r
689 }\r
690\r
691 ReleaseSpinLock (&mPFLock);\r
692}\r