]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
UefiCpuPkg/PiSmmCpuDxeSmm: Using global semaphores in aligned buffer
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / X64 / PageTbl.c
CommitLineData
427e3573
MK
1/** @file\r
2Page Fault (#PF) handler for X64 processors\r
3\r
fe3a75bc 4Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
427e3573
MK
5This program and the accompanying materials\r
6are licensed and made available under the terms and conditions of the BSD License\r
7which accompanies this distribution. The full text of the license may be found at\r
8http://opensource.org/licenses/bsd-license.php\r
9\r
10THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
11WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
12\r
13**/\r
14\r
15#include "PiSmmCpuDxeSmm.h"\r
16\r
17#define PAGE_TABLE_PAGES 8\r
18#define ACC_MAX_BIT BIT3\r
19LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
427e3573
MK
20BOOLEAN m1GPageTableSupport = FALSE;\r
21\r
22/**\r
23 Check if 1-GByte pages is supported by processor or not.\r
24\r
25 @retval TRUE 1-GByte pages is supported.\r
26 @retval FALSE 1-GByte pages is not supported.\r
27\r
28**/\r
29BOOLEAN\r
30Is1GPageSupport (\r
31 VOID\r
32 )\r
33{\r
34 UINT32 RegEax;\r
35 UINT32 RegEdx;\r
36\r
37 AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
38 if (RegEax >= 0x80000001) {\r
39 AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
40 if ((RegEdx & BIT26) != 0) {\r
41 return TRUE;\r
42 }\r
43 }\r
44 return FALSE;\r
45}\r
46\r
47/**\r
48 Set sub-entries number in entry.\r
49\r
50 @param[in, out] Entry Pointer to entry\r
51 @param[in] SubEntryNum Sub-entries number based on 0:\r
52 0 means there is 1 sub-entry under this entry\r
53 0x1ff means there is 512 sub-entries under this entry\r
54\r
55**/\r
56VOID\r
57SetSubEntriesNum (\r
58 IN OUT UINT64 *Entry,\r
59 IN UINT64 SubEntryNum\r
60 )\r
61{\r
62 //\r
63 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
64 //\r
65 *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
66}\r
67\r
68/**\r
69 Return sub-entries number in entry.\r
70\r
71 @param[in] Entry Pointer to entry\r
72\r
73 @return Sub-entries number based on 0:\r
74 0 means there is 1 sub-entry under this entry\r
75 0x1ff means there is 512 sub-entries under this entry\r
76**/\r
77UINT64\r
78GetSubEntriesNum (\r
79 IN UINT64 *Entry\r
80 )\r
81{\r
82 //\r
83 // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
84 //\r
85 return BitFieldRead64 (*Entry, 52, 60);\r
86}\r
87\r
88/**\r
89 Create PageTable for SMM use.\r
90\r
91 @return The address of PML4 (to set CR3).\r
92\r
93**/\r
94UINT32\r
95SmmInitPageTable (\r
96 VOID\r
97 )\r
98{\r
99 EFI_PHYSICAL_ADDRESS Pages;\r
100 UINT64 *PTEntry;\r
101 LIST_ENTRY *FreePage;\r
102 UINTN Index;\r
103 UINTN PageFaultHandlerHookAddress;\r
104 IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
105\r
106 //\r
107 // Initialize spin lock\r
108 //\r
fe3a75bc 109 InitializeSpinLock (mPFLock);\r
427e3573
MK
110\r
111 m1GPageTableSupport = Is1GPageSupport ();\r
112 //\r
113 // Generate PAE page table for the first 4GB memory space\r
114 //\r
881520ea 115 Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE);\r
427e3573
MK
116\r
117 //\r
118 // Set IA32_PG_PMNT bit to mask this entry\r
119 //\r
120 PTEntry = (UINT64*)(UINTN)Pages;\r
121 for (Index = 0; Index < 4; Index++) {\r
122 PTEntry[Index] |= IA32_PG_PMNT;\r
123 }\r
124\r
125 //\r
126 // Fill Page-Table-Level4 (PML4) entry\r
127 //\r
128 PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));\r
881520ea 129 *PTEntry = Pages + PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
130 ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
131 //\r
132 // Set sub-entries number\r
133 //\r
134 SetSubEntriesNum (PTEntry, 3);\r
135\r
136 //\r
137 // Add remaining pages to page pool\r
138 //\r
139 FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));\r
140 while ((UINTN)FreePage < Pages) {\r
141 InsertTailList (&mPagePool, FreePage);\r
142 FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
143 }\r
144\r
145 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
146 //\r
147 // Set own Page Fault entry instead of the default one, because SMM Profile\r
148 // feature depends on IRET instruction to do Single Step\r
149 //\r
150 PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
151 IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
152 IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
153 IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
154 IdtEntry->Bits.Reserved_0 = 0;\r
155 IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
156 IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
157 IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
158 IdtEntry->Bits.Reserved_1 = 0;\r
159 } else {\r
160 //\r
161 // Register Smm Page Fault Handler\r
162 //\r
163 SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
164 }\r
165\r
166 //\r
167 // Additional SMM IDT initialization for SMM stack guard\r
168 //\r
169 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
170 InitializeIDTSmmStackGuard ();\r
171 }\r
172\r
173 //\r
174 // Return the address of PML4 (to set CR3)\r
175 //\r
176 return (UINT32)(UINTN)PTEntry;\r
177}\r
178\r
179/**\r
180 Set access record in entry.\r
181\r
182 @param[in, out] Entry Pointer to entry\r
183 @param[in] Acc Access record value\r
184\r
185**/\r
186VOID\r
187SetAccNum (\r
188 IN OUT UINT64 *Entry,\r
189 IN UINT64 Acc\r
190 )\r
191{\r
192 //\r
193 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
194 //\r
195 *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
196}\r
197\r
198/**\r
199 Return access record in entry.\r
200\r
201 @param[in] Entry Pointer to entry\r
202\r
203 @return Access record value.\r
204\r
205**/\r
206UINT64\r
207GetAccNum (\r
208 IN UINT64 *Entry\r
209 )\r
210{\r
211 //\r
212 // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
213 //\r
214 return BitFieldRead64 (*Entry, 9, 11);\r
215}\r
216\r
217/**\r
218 Return and update the access record in entry.\r
219\r
220 @param[in, out] Entry Pointer to entry\r
221\r
222 @return Access record value.\r
223\r
224**/\r
225UINT64\r
226GetAndUpdateAccNum (\r
227 IN OUT UINT64 *Entry\r
228 )\r
229{\r
230 UINT64 Acc;\r
231\r
232 Acc = GetAccNum (Entry);\r
233 if ((*Entry & IA32_PG_A) != 0) {\r
234 //\r
235 // If this entry has been accessed, clear access flag in Entry and update access record\r
236 // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
237 //\r
238 *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
239 SetAccNum (Entry, 0x7);\r
240 return (0x7 + ACC_MAX_BIT);\r
241 } else {\r
242 if (Acc != 0) {\r
243 //\r
244 // If the access record is not the smallest value 0, minus 1 and update the access record field\r
245 //\r
246 SetAccNum (Entry, Acc - 1);\r
247 }\r
248 }\r
249 return Acc;\r
250}\r
251\r
252/**\r
253 Reclaim free pages for PageFault handler.\r
254\r
255 Search the whole entries tree to find the leaf entry that has the smallest\r
256 access record value. Insert the page pointed by this leaf entry into the\r
257 page pool. And check its upper entries if need to be inserted into the page\r
258 pool or not.\r
259\r
260**/\r
261VOID\r
262ReclaimPages (\r
263 VOID\r
264 )\r
265{\r
266 UINT64 *Pml4;\r
267 UINT64 *Pdpt;\r
268 UINT64 *Pdt;\r
269 UINTN Pml4Index;\r
270 UINTN PdptIndex;\r
271 UINTN PdtIndex;\r
272 UINTN MinPml4;\r
273 UINTN MinPdpt;\r
274 UINTN MinPdt;\r
275 UINT64 MinAcc;\r
276 UINT64 Acc;\r
277 UINT64 SubEntriesNum;\r
278 BOOLEAN PML4EIgnore;\r
279 BOOLEAN PDPTEIgnore;\r
280 UINT64 *ReleasePageAddress;\r
281\r
282 Pml4 = NULL;\r
283 Pdpt = NULL;\r
284 Pdt = NULL;\r
285 MinAcc = (UINT64)-1;\r
286 MinPml4 = (UINTN)-1;\r
287 MinPdpt = (UINTN)-1;\r
288 MinPdt = (UINTN)-1;\r
289 Acc = 0;\r
290 ReleasePageAddress = 0;\r
291\r
292 //\r
293 // First, find the leaf entry has the smallest access record value\r
294 //\r
295 Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
296 for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
297 if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
298 //\r
299 // If the PML4 entry is not present or is masked, skip it\r
300 //\r
301 continue;\r
302 }\r
303 Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);\r
304 PML4EIgnore = FALSE;\r
305 for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
306 if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
307 //\r
308 // If the PDPT entry is not present or is masked, skip it\r
309 //\r
310 if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
311 //\r
312 // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
313 //\r
314 PML4EIgnore = TRUE;\r
315 }\r
316 continue;\r
317 }\r
318 if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
319 //\r
320 // It's not 1-GByte pages entry, it should be a PDPT entry,\r
321 // we will not check PML4 entry more\r
322 //\r
323 PML4EIgnore = TRUE;\r
324 Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);\r
325 PDPTEIgnore = FALSE;\r
326 for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
327 if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
328 //\r
329 // If the PD entry is not present or is masked, skip it\r
330 //\r
331 if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
332 //\r
333 // If the PD entry is masked, we will not PDPT entry more\r
334 //\r
335 PDPTEIgnore = TRUE;\r
336 }\r
337 continue;\r
338 }\r
339 if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
340 //\r
341 // It's not 2 MByte page table entry, it should be PD entry\r
342 // we will find the entry has the smallest access record value\r
343 //\r
344 PDPTEIgnore = TRUE;\r
345 Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
346 if (Acc < MinAcc) {\r
347 //\r
348 // If the PD entry has the smallest access record value,\r
349 // save the Page address to be released\r
350 //\r
351 MinAcc = Acc;\r
352 MinPml4 = Pml4Index;\r
353 MinPdpt = PdptIndex;\r
354 MinPdt = PdtIndex;\r
355 ReleasePageAddress = Pdt + PdtIndex;\r
356 }\r
357 }\r
358 }\r
359 if (!PDPTEIgnore) {\r
360 //\r
361 // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
362 // it should only has the entries point to 2 MByte Pages\r
363 //\r
364 Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
365 if (Acc < MinAcc) {\r
366 //\r
367 // If the PDPT entry has the smallest access record value,\r
368 // save the Page address to be released\r
369 //\r
370 MinAcc = Acc;\r
371 MinPml4 = Pml4Index;\r
372 MinPdpt = PdptIndex;\r
373 MinPdt = (UINTN)-1;\r
374 ReleasePageAddress = Pdpt + PdptIndex;\r
375 }\r
376 }\r
377 }\r
378 }\r
379 if (!PML4EIgnore) {\r
380 //\r
381 // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
382 // it should only has the entries point to 1 GByte Pages\r
383 //\r
384 Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
385 if (Acc < MinAcc) {\r
386 //\r
387 // If the PML4 entry has the smallest access record value,\r
388 // save the Page address to be released\r
389 //\r
390 MinAcc = Acc;\r
391 MinPml4 = Pml4Index;\r
392 MinPdpt = (UINTN)-1;\r
393 MinPdt = (UINTN)-1;\r
394 ReleasePageAddress = Pml4 + Pml4Index;\r
395 }\r
396 }\r
397 }\r
398 //\r
399 // Make sure one PML4/PDPT/PD entry is selected\r
400 //\r
401 ASSERT (MinAcc != (UINT64)-1);\r
402\r
403 //\r
404 // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
405 //\r
406 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));\r
407 *ReleasePageAddress = 0;\r
408\r
409 //\r
410 // Lastly, check this entry's upper entries if need to be inserted into page pool\r
411 // or not\r
412 //\r
413 while (TRUE) {\r
414 if (MinPdt != (UINTN)-1) {\r
415 //\r
416 // If 4 KByte Page Table is released, check the PDPT entry\r
417 //\r
418 Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);\r
419 SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
420 if (SubEntriesNum == 0) {\r
421 //\r
422 // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
423 // clear the Page directory entry\r
424 //\r
425 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));\r
426 Pdpt[MinPdpt] = 0;\r
427 //\r
428 // Go on checking the PML4 table\r
429 //\r
430 MinPdt = (UINTN)-1;\r
431 continue;\r
432 }\r
433 //\r
434 // Update the sub-entries filed in PDPT entry and exit\r
435 //\r
436 SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
437 break;\r
438 }\r
439 if (MinPdpt != (UINTN)-1) {\r
440 //\r
441 // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
442 //\r
443 SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
444 if (SubEntriesNum == 0) {\r
445 //\r
446 // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
447 // clear the Page directory entry\r
448 //\r
449 InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));\r
450 Pml4[MinPml4] = 0;\r
451 MinPdpt = (UINTN)-1;\r
452 continue;\r
453 }\r
454 //\r
455 // Update the sub-entries filed in PML4 entry and exit\r
456 //\r
457 SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
458 break;\r
459 }\r
460 //\r
461 // PLM4 table has been released before, exit it\r
462 //\r
463 break;\r
464 }\r
465}\r
466\r
467/**\r
468 Allocate free Page for PageFault handler use.\r
469\r
470 @return Page address.\r
471\r
472**/\r
473UINT64\r
474AllocPage (\r
475 VOID\r
476 )\r
477{\r
478 UINT64 RetVal;\r
479\r
480 if (IsListEmpty (&mPagePool)) {\r
481 //\r
482 // If page pool is empty, reclaim the used pages and insert one into page pool\r
483 //\r
484 ReclaimPages ();\r
485 }\r
486\r
487 //\r
488 // Get one free page and remove it from page pool\r
489 //\r
490 RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
491 RemoveEntryList (mPagePool.ForwardLink);\r
492 //\r
493 // Clean this page and return\r
494 //\r
495 ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
496 return RetVal;\r
497}\r
498\r
499/**\r
500 Page Fault handler for SMM use.\r
501\r
502**/\r
503VOID\r
504SmiDefaultPFHandler (\r
505 VOID\r
506 )\r
507{\r
508 UINT64 *PageTable;\r
509 UINT64 *Pml4;\r
510 UINT64 PFAddress;\r
511 UINTN StartBit;\r
512 UINTN EndBit;\r
513 UINT64 PTIndex;\r
514 UINTN Index;\r
515 SMM_PAGE_SIZE_TYPE PageSize;\r
516 UINTN NumOfPages;\r
517 UINTN PageAttribute;\r
518 EFI_STATUS Status;\r
519 UINT64 *UpperEntry;\r
520\r
521 //\r
522 // Set default SMM page attribute\r
523 //\r
524 PageSize = SmmPageSize2M;\r
525 NumOfPages = 1;\r
526 PageAttribute = 0;\r
527\r
528 EndBit = 0;\r
529 Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
530 PFAddress = AsmReadCr2 ();\r
531\r
532 Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
533 //\r
534 // If platform not support page table attribute, set default SMM page attribute\r
535 //\r
536 if (Status != EFI_SUCCESS) {\r
537 PageSize = SmmPageSize2M;\r
538 NumOfPages = 1;\r
539 PageAttribute = 0;\r
540 }\r
541 if (PageSize >= MaxSmmPageSizeType) {\r
542 PageSize = SmmPageSize2M;\r
543 }\r
544 if (NumOfPages > 512) {\r
545 NumOfPages = 512;\r
546 }\r
547\r
548 switch (PageSize) {\r
549 case SmmPageSize4K:\r
550 //\r
551 // BIT12 to BIT20 is Page Table index\r
552 //\r
553 EndBit = 12;\r
554 break;\r
555 case SmmPageSize2M:\r
556 //\r
557 // BIT21 to BIT29 is Page Directory index\r
558 //\r
559 EndBit = 21;\r
560 PageAttribute |= (UINTN)IA32_PG_PS;\r
561 break;\r
562 case SmmPageSize1G:\r
563 if (!m1GPageTableSupport) {\r
564 DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));\r
565 ASSERT (FALSE);\r
566 }\r
567 //\r
568 // BIT30 to BIT38 is Page Directory Pointer Table index\r
569 //\r
570 EndBit = 30;\r
571 PageAttribute |= (UINTN)IA32_PG_PS;\r
572 break;\r
573 default:\r
574 ASSERT (FALSE);\r
575 }\r
576\r
577 //\r
578 // If execute-disable is enabled, set NX bit\r
579 //\r
580 if (mXdEnabled) {\r
581 PageAttribute |= IA32_PG_NX;\r
582 }\r
583\r
584 for (Index = 0; Index < NumOfPages; Index++) {\r
585 PageTable = Pml4;\r
586 UpperEntry = NULL;\r
587 for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
588 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
589 if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
590 //\r
591 // If the entry is not present, allocate one page from page pool for it\r
592 //\r
881520ea 593 PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
594 } else {\r
595 //\r
596 // Save the upper entry address\r
597 //\r
598 UpperEntry = PageTable + PTIndex;\r
599 }\r
600 //\r
601 // BIT9 to BIT11 of entry is used to save access record,\r
602 // initialize value is 7\r
603 //\r
604 PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
605 SetAccNum (PageTable + PTIndex, 7);\r
606 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
607 }\r
608\r
609 PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
610 if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
611 //\r
612 // Check if the entry has already existed, this issue may occur when the different\r
613 // size page entries created under the same entry\r
614 //\r
615 DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
616 DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));\r
617 ASSERT (FALSE);\r
618 }\r
619 //\r
620 // Fill the new entry\r
621 //\r
622 PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |\r
881520ea 623 PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;\r
427e3573
MK
624 if (UpperEntry != NULL) {\r
625 SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
626 }\r
627 //\r
628 // Get the next page address if we need to create more page tables\r
629 //\r
630 PFAddress += (1ull << EndBit);\r
631 }\r
632}\r
633\r
634/**\r
635 ThePage Fault handler wrapper for SMM use.\r
636\r
637 @param InterruptType Defines the type of interrupt or exception that\r
638 occurred on the processor.This parameter is processor architecture specific.\r
639 @param SystemContext A pointer to the processor context when\r
640 the interrupt occurred on the processor.\r
641**/\r
642VOID\r
643EFIAPI\r
644SmiPFHandler (\r
645 IN EFI_EXCEPTION_TYPE InterruptType,\r
646 IN EFI_SYSTEM_CONTEXT SystemContext\r
647 )\r
648{\r
649 UINTN PFAddress;\r
650\r
651 ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
652\r
fe3a75bc 653 AcquireSpinLock (mPFLock);\r
427e3573
MK
654\r
655 PFAddress = AsmReadCr2 ();\r
656\r
657 //\r
658 // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.\r
659 //\r
660 if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
661 (PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
662 (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
663 DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));\r
664 CpuDeadLoop ();\r
665 }\r
666\r
667 //\r
668 // If a page fault occurs in SMM range\r
669 //\r
670 if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
671 (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
672 if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
673 DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
674 DEBUG_CODE (\r
675 DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
676 );\r
677 CpuDeadLoop ();\r
678 }\r
679 }\r
680\r
681 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
682 SmmProfilePFHandler (\r
683 SystemContext.SystemContextX64->Rip,\r
684 SystemContext.SystemContextX64->ExceptionData\r
685 );\r
686 } else {\r
687 SmiDefaultPFHandler ();\r
688 }\r
689\r
fe3a75bc 690 ReleaseSpinLock (mPFLock);\r
427e3573 691}\r