/** @file\r
GCC inline implementation of BaseLib processor specific functions.\r
- \r
- Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>\r
+\r
+ Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
This program and the accompanying materials\r
are licensed and made available under the terms and conditions of the BSD License\r
which accompanies this distribution. The full text of the license may be found at\r
- http://opensource.org/licenses/bsd-license.php\r
+ http://opensource.org/licenses/bsd-license.php.\r
\r
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
)\r
{\r
// This is a little bit of overkill and it is more about the compiler that it is\r
- // actually processor synchronization. This is like the _ReadWriteBarrier \r
+ // actually processor synchronization. This is like the _ReadWriteBarrier\r
// Microsoft specific intrinsic\r
__asm__ __volatile__ ("":::"memory");\r
}\r
DisableInterrupts (\r
VOID\r
)\r
-{ \r
+{\r
__asm__ __volatile__ ("cli"::: "memory");\r
}\r
\r
)\r
{\r
UINT64 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"rdmsr"\r
: "=A" (Data) // %0\r
: "c" (Index) // %1\r
);\r
- \r
+\r
return Data;\r
}\r
\r
: "c" (Index),\r
"A" (Value)\r
);\r
- \r
+\r
return Value;\r
}\r
\r
)\r
{\r
UINTN Eflags;\r
- \r
+\r
__asm__ __volatile__ (\r
"pushfl \n\t"\r
"popl %0 "\r
: "=r" (Eflags)\r
);\r
- \r
+\r
return Eflags;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
- "movl %%cr0,%0" \r
+ "movl %%cr0,%0"\r
: "=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
- "movl %%cr2, %0" \r
+ "movl %%cr2, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%cr3, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%cr4, %0"\r
: "=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr0, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr1, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr2, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr3, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr4, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr5, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr6, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINTN Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"movl %%dr7, %0"\r
: "=r" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%cs, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%ds, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%es, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%fs, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%gs, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"mov %%ds, %0"\r
:"=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"str %0"\r
: "=a" (Data)\r
);\r
- \r
+\r
return Data;\r
}\r
\r
Reads and returns the current GDTR descriptor and returns it in Gdtr. This\r
function is only available on IA-32 and X64.\r
\r
- @param Gdtr Pointer to a GDTR descriptor.\r
+ @param Gdtr The pointer to a GDTR descriptor.\r
\r
**/\r
VOID\r
Writes and the current GDTR descriptor specified by Gdtr. This function is\r
only available on IA-32 and X64.\r
\r
- @param Gdtr Pointer to a GDTR descriptor.\r
+ @param Gdtr The pointer to a GDTR descriptor.\r
\r
**/\r
VOID\r
:\r
: "m" (*Gdtr)\r
);\r
- \r
+\r
}\r
\r
\r
Reads and returns the current IDTR descriptor and returns it in Idtr. This\r
function is only available on IA-32 and X64.\r
\r
- @param Idtr Pointer to a IDTR descriptor.\r
+ @param Idtr The pointer to a IDTR descriptor.\r
\r
**/\r
VOID\r
Writes the current IDTR descriptor and returns it in Idtr. This function is\r
only available on IA-32 and X64.\r
\r
- @param Idtr Pointer to a IDTR descriptor.\r
+ @param Idtr The pointer to a IDTR descriptor.\r
\r
**/\r
VOID\r
)\r
{\r
UINT16 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"sldt %0"\r
: "=g" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
Buffer. Buffer must be aligned on a 16-byte boundary. This function is only\r
available on IA-32 and X64.\r
\r
- @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.\r
+ @param Buffer The pointer to a buffer to save the floating point/SSE/SSE2 context.\r
\r
**/\r
VOID\r
"fxsave %0"\r
:\r
: "m" (*Buffer) // %0\r
- ); \r
+ );\r
}\r
\r
\r
by Buffer. Buffer must be aligned on a 16-byte boundary. This function is\r
only available on IA-32 and X64.\r
\r
- @param Buffer Pointer to a buffer to save the floating point/SSE/SSE2 context.\r
+ @param Buffer The pointer to a buffer to save the floating point/SSE/SSE2 context.\r
\r
**/\r
VOID\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
"pop %%edx \n\t"\r
: "=A" (Data) // %0\r
);\r
- \r
+\r
return Data;\r
}\r
\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm0" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm1" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm2" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm3" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm4" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm5" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm6" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
{\r
__asm__ __volatile__ (\r
"movq %0, %%mm7" // %0\r
- : \r
+ :\r
: "m" (Value)\r
);\r
}\r
)\r
{\r
UINT64 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"rdtsc"\r
: "=A" (Data)\r
);\r
- \r
- return Data; \r
+\r
+ return Data;\r
}\r
\r
\r
)\r
{\r
UINT64 Data;\r
- \r
+\r
__asm__ __volatile__ (\r
"rdpmc"\r
: "=A" (Data)\r
: "c" (Index)\r
);\r
- \r
- return Data; \r
+\r
+ return Data;\r
}\r
\r
\r
)\r
{\r
__asm__ __volatile__ ("invd":::"memory");\r
- \r
+\r
}\r
\r
\r
IN VOID *LinearAddress\r
)\r
{\r
+ UINT32 RegEdx;\r
+\r
+ //\r
+ // If the CPU does not support CLFLUSH instruction,\r
+ // then promote flush range to flush entire cache.\r
+ //\r
+ AsmCpuid (0x01, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT19) == 0) {\r
+ __asm__ __volatile__ ("wbinvd":::"memory");\r
+ return LinearAddress;\r
+ }\r
+\r
+\r
__asm__ __volatile__ (\r
"clflush (%0)"\r
- : "+a" (LinearAddress) \r
- : \r
+ : "+a" (LinearAddress)\r
+ :\r
: "memory"\r
);\r
- \r
- return LinearAddress;\r
+\r
+ return LinearAddress;\r
}\r
\r
\r