]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - ubuntu/vbox/include/iprt/asm-amd64-x86.h
4f9d647bf7eafab15ab0d55beb626b59eae528d2
[mirror_ubuntu-artful-kernel.git] / ubuntu / vbox / include / iprt / asm-amd64-x86.h
1 /** @file
2 * IPRT - AMD64 and x86 Specific Assembly Functions.
3 */
4
5 /*
6 * Copyright (C) 2006-2016 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26 #ifndef ___iprt_asm_amd64_x86_h
27 #define ___iprt_asm_amd64_x86_h
28
29 #include <iprt/types.h>
30 #include <iprt/assert.h>
31 #if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
32 # error "Not on AMD64 or x86"
33 #endif
34
35 #if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
36 # pragma warning(push)
37 # pragma warning(disable:4668) /* Several incorrect __cplusplus uses. */
38 # pragma warning(disable:4255) /* Incorrect __slwpcb prototype. */
39 # include <intrin.h>
40 # pragma warning(pop)
41 /* Emit the intrinsics at all optimization levels. */
42 # pragma intrinsic(_ReadWriteBarrier)
43 # pragma intrinsic(__cpuid)
44 # pragma intrinsic(_enable)
45 # pragma intrinsic(_disable)
46 # pragma intrinsic(__rdtsc)
47 # pragma intrinsic(__readmsr)
48 # pragma intrinsic(__writemsr)
49 # pragma intrinsic(__outbyte)
50 # pragma intrinsic(__outbytestring)
51 # pragma intrinsic(__outword)
52 # pragma intrinsic(__outwordstring)
53 # pragma intrinsic(__outdword)
54 # pragma intrinsic(__outdwordstring)
55 # pragma intrinsic(__inbyte)
56 # pragma intrinsic(__inbytestring)
57 # pragma intrinsic(__inword)
58 # pragma intrinsic(__inwordstring)
59 # pragma intrinsic(__indword)
60 # pragma intrinsic(__indwordstring)
61 # pragma intrinsic(__invlpg)
62 # pragma intrinsic(__wbinvd)
63 # pragma intrinsic(__readcr0)
64 # pragma intrinsic(__readcr2)
65 # pragma intrinsic(__readcr3)
66 # pragma intrinsic(__readcr4)
67 # pragma intrinsic(__writecr0)
68 # pragma intrinsic(__writecr3)
69 # pragma intrinsic(__writecr4)
70 # pragma intrinsic(__readdr)
71 # pragma intrinsic(__writedr)
72 # ifdef RT_ARCH_AMD64
73 # pragma intrinsic(__readcr8)
74 # pragma intrinsic(__writecr8)
75 # endif
76 # if RT_INLINE_ASM_USES_INTRIN >= 14
77 # pragma intrinsic(__halt)
78 # endif
79 # if RT_INLINE_ASM_USES_INTRIN >= 15
80 # pragma intrinsic(__readeflags)
81 # pragma intrinsic(__writeeflags)
82 # pragma intrinsic(__rdtscp)
83 # endif
84 #endif
85
86
87 /*
88 * Include #pragma aux definitions for Watcom C/C++.
89 */
90 #if defined(__WATCOMC__) && ARCH_BITS == 16
91 # include "asm-amd64-x86-watcom-16.h"
92 #elif defined(__WATCOMC__) && ARCH_BITS == 32
93 # include "asm-amd64-x86-watcom-32.h"
94 #endif
95
96
97 /** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
98 * @ingroup grp_rt_asm
99 * @{
100 */
101
102 /** @todo find a more proper place for these structures? */
103
104 #pragma pack(1)
105 /** IDTR */
106 typedef struct RTIDTR
107 {
108 /** Size of the IDT. */
109 uint16_t cbIdt;
110 /** Address of the IDT. */
111 #if ARCH_BITS != 64
112 uint32_t pIdt;
113 #else
114 uint64_t pIdt;
115 #endif
116 } RTIDTR, *PRTIDTR;
117 #pragma pack()
118
119 #pragma pack(1)
120 /** @internal */
121 typedef struct RTIDTRALIGNEDINT
122 {
123 /** Alignment padding. */
124 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
125 /** The IDTR structure. */
126 RTIDTR Idtr;
127 } RTIDTRALIGNEDINT;
128 #pragma pack()
129
130 /** Wrapped RTIDTR for preventing misalignment exceptions. */
131 typedef union RTIDTRALIGNED
132 {
133 /** Try make sure this structure has optimal alignment. */
134 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
135 /** Aligned structure. */
136 RTIDTRALIGNEDINT s;
137 } RTIDTRALIGNED;
138 AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
139 /** Pointer to a an RTIDTR alignment wrapper. */
140 typedef RTIDTRALIGNED *PRIDTRALIGNED;
141
142
143 #pragma pack(1)
144 /** GDTR */
145 typedef struct RTGDTR
146 {
147 /** Size of the GDT. */
148 uint16_t cbGdt;
149 /** Address of the GDT. */
150 #if ARCH_BITS != 64
151 uint32_t pGdt;
152 #else
153 uint64_t pGdt;
154 #endif
155 } RTGDTR, *PRTGDTR;
156 #pragma pack()
157
158 #pragma pack(1)
159 /** @internal */
160 typedef struct RTGDTRALIGNEDINT
161 {
162 /** Alignment padding. */
163 uint16_t au16Padding[ARCH_BITS == 64 ? 3 : 1];
164 /** The GDTR structure. */
165 RTGDTR Gdtr;
166 } RTGDTRALIGNEDINT;
167 #pragma pack()
168
169 /** Wrapped RTGDTR for preventing misalignment exceptions. */
170 typedef union RTGDTRALIGNED
171 {
172 /** Try make sure this structure has optimal alignment. */
173 uint64_t auAlignmentHack[ARCH_BITS == 64 ? 2 : 1];
174 /** Aligned structure. */
175 RTGDTRALIGNEDINT s;
176 } RTGDTRALIGNED;
177 AssertCompileSize(RTIDTRALIGNED, ((ARCH_BITS == 64) + 1) * 8);
178 /** Pointer to a an RTGDTR alignment wrapper. */
179 typedef RTGDTRALIGNED *PRGDTRALIGNED;
180
181
182 /**
183 * Gets the content of the IDTR CPU register.
184 * @param pIdtr Where to store the IDTR contents.
185 */
186 #if RT_INLINE_ASM_EXTERNAL
187 DECLASM(void) ASMGetIDTR(PRTIDTR pIdtr);
188 #else
189 DECLINLINE(void) ASMGetIDTR(PRTIDTR pIdtr)
190 {
191 # if RT_INLINE_ASM_GNU_STYLE
192 __asm__ __volatile__("sidt %0" : "=m" (*pIdtr));
193 # else
194 __asm
195 {
196 # ifdef RT_ARCH_AMD64
197 mov rax, [pIdtr]
198 sidt [rax]
199 # else
200 mov eax, [pIdtr]
201 sidt [eax]
202 # endif
203 }
204 # endif
205 }
206 #endif
207
208
209 /**
210 * Gets the content of the IDTR.LIMIT CPU register.
211 * @returns IDTR limit.
212 */
213 #if RT_INLINE_ASM_EXTERNAL
214 DECLASM(uint16_t) ASMGetIdtrLimit(void);
215 #else
216 DECLINLINE(uint16_t) ASMGetIdtrLimit(void)
217 {
218 RTIDTRALIGNED TmpIdtr;
219 # if RT_INLINE_ASM_GNU_STYLE
220 __asm__ __volatile__("sidt %0" : "=m" (TmpIdtr.s.Idtr));
221 # else
222 __asm
223 {
224 sidt [TmpIdtr.s.Idtr]
225 }
226 # endif
227 return TmpIdtr.s.Idtr.cbIdt;
228 }
229 #endif
230
231
232 /**
233 * Sets the content of the IDTR CPU register.
234 * @param pIdtr Where to load the IDTR contents from
235 */
236 #if RT_INLINE_ASM_EXTERNAL
237 DECLASM(void) ASMSetIDTR(const RTIDTR *pIdtr);
238 #else
239 DECLINLINE(void) ASMSetIDTR(const RTIDTR *pIdtr)
240 {
241 # if RT_INLINE_ASM_GNU_STYLE
242 __asm__ __volatile__("lidt %0" : : "m" (*pIdtr));
243 # else
244 __asm
245 {
246 # ifdef RT_ARCH_AMD64
247 mov rax, [pIdtr]
248 lidt [rax]
249 # else
250 mov eax, [pIdtr]
251 lidt [eax]
252 # endif
253 }
254 # endif
255 }
256 #endif
257
258
259 /**
260 * Gets the content of the GDTR CPU register.
261 * @param pGdtr Where to store the GDTR contents.
262 */
263 #if RT_INLINE_ASM_EXTERNAL
264 DECLASM(void) ASMGetGDTR(PRTGDTR pGdtr);
265 #else
266 DECLINLINE(void) ASMGetGDTR(PRTGDTR pGdtr)
267 {
268 # if RT_INLINE_ASM_GNU_STYLE
269 __asm__ __volatile__("sgdt %0" : "=m" (*pGdtr));
270 # else
271 __asm
272 {
273 # ifdef RT_ARCH_AMD64
274 mov rax, [pGdtr]
275 sgdt [rax]
276 # else
277 mov eax, [pGdtr]
278 sgdt [eax]
279 # endif
280 }
281 # endif
282 }
283 #endif
284
285
286 /**
287 * Sets the content of the GDTR CPU register.
288 * @param pGdtr Where to load the GDTR contents from
289 */
290 #if RT_INLINE_ASM_EXTERNAL
291 DECLASM(void) ASMSetGDTR(const RTGDTR *pGdtr);
292 #else
293 DECLINLINE(void) ASMSetGDTR(const RTGDTR *pGdtr)
294 {
295 # if RT_INLINE_ASM_GNU_STYLE
296 __asm__ __volatile__("lgdt %0" : : "m" (*pGdtr));
297 # else
298 __asm
299 {
300 # ifdef RT_ARCH_AMD64
301 mov rax, [pGdtr]
302 lgdt [rax]
303 # else
304 mov eax, [pGdtr]
305 lgdt [eax]
306 # endif
307 }
308 # endif
309 }
310 #endif
311
312
313
314 /**
315 * Get the cs register.
316 * @returns cs.
317 */
318 #if RT_INLINE_ASM_EXTERNAL
319 DECLASM(RTSEL) ASMGetCS(void);
320 #else
321 DECLINLINE(RTSEL) ASMGetCS(void)
322 {
323 RTSEL SelCS;
324 # if RT_INLINE_ASM_GNU_STYLE
325 __asm__ __volatile__("movw %%cs, %0\n\t" : "=r" (SelCS));
326 # else
327 __asm
328 {
329 mov ax, cs
330 mov [SelCS], ax
331 }
332 # endif
333 return SelCS;
334 }
335 #endif
336
337
338 /**
339 * Get the DS register.
340 * @returns DS.
341 */
342 #if RT_INLINE_ASM_EXTERNAL
343 DECLASM(RTSEL) ASMGetDS(void);
344 #else
345 DECLINLINE(RTSEL) ASMGetDS(void)
346 {
347 RTSEL SelDS;
348 # if RT_INLINE_ASM_GNU_STYLE
349 __asm__ __volatile__("movw %%ds, %0\n\t" : "=r" (SelDS));
350 # else
351 __asm
352 {
353 mov ax, ds
354 mov [SelDS], ax
355 }
356 # endif
357 return SelDS;
358 }
359 #endif
360
361
362 /**
363 * Get the ES register.
364 * @returns ES.
365 */
366 #if RT_INLINE_ASM_EXTERNAL
367 DECLASM(RTSEL) ASMGetES(void);
368 #else
369 DECLINLINE(RTSEL) ASMGetES(void)
370 {
371 RTSEL SelES;
372 # if RT_INLINE_ASM_GNU_STYLE
373 __asm__ __volatile__("movw %%es, %0\n\t" : "=r" (SelES));
374 # else
375 __asm
376 {
377 mov ax, es
378 mov [SelES], ax
379 }
380 # endif
381 return SelES;
382 }
383 #endif
384
385
386 /**
387 * Get the FS register.
388 * @returns FS.
389 */
390 #if RT_INLINE_ASM_EXTERNAL
391 DECLASM(RTSEL) ASMGetFS(void);
392 #else
393 DECLINLINE(RTSEL) ASMGetFS(void)
394 {
395 RTSEL SelFS;
396 # if RT_INLINE_ASM_GNU_STYLE
397 __asm__ __volatile__("movw %%fs, %0\n\t" : "=r" (SelFS));
398 # else
399 __asm
400 {
401 mov ax, fs
402 mov [SelFS], ax
403 }
404 # endif
405 return SelFS;
406 }
407 # endif
408
409
410 /**
411 * Get the GS register.
412 * @returns GS.
413 */
414 #if RT_INLINE_ASM_EXTERNAL
415 DECLASM(RTSEL) ASMGetGS(void);
416 #else
417 DECLINLINE(RTSEL) ASMGetGS(void)
418 {
419 RTSEL SelGS;
420 # if RT_INLINE_ASM_GNU_STYLE
421 __asm__ __volatile__("movw %%gs, %0\n\t" : "=r" (SelGS));
422 # else
423 __asm
424 {
425 mov ax, gs
426 mov [SelGS], ax
427 }
428 # endif
429 return SelGS;
430 }
431 #endif
432
433
434 /**
435 * Get the SS register.
436 * @returns SS.
437 */
438 #if RT_INLINE_ASM_EXTERNAL
439 DECLASM(RTSEL) ASMGetSS(void);
440 #else
441 DECLINLINE(RTSEL) ASMGetSS(void)
442 {
443 RTSEL SelSS;
444 # if RT_INLINE_ASM_GNU_STYLE
445 __asm__ __volatile__("movw %%ss, %0\n\t" : "=r" (SelSS));
446 # else
447 __asm
448 {
449 mov ax, ss
450 mov [SelSS], ax
451 }
452 # endif
453 return SelSS;
454 }
455 #endif
456
457
458 /**
459 * Get the TR register.
460 * @returns TR.
461 */
462 #if RT_INLINE_ASM_EXTERNAL
463 DECLASM(RTSEL) ASMGetTR(void);
464 #else
465 DECLINLINE(RTSEL) ASMGetTR(void)
466 {
467 RTSEL SelTR;
468 # if RT_INLINE_ASM_GNU_STYLE
469 __asm__ __volatile__("str %w0\n\t" : "=r" (SelTR));
470 # else
471 __asm
472 {
473 str ax
474 mov [SelTR], ax
475 }
476 # endif
477 return SelTR;
478 }
479 #endif
480
481
482 /**
483 * Get the LDTR register.
484 * @returns LDTR.
485 */
486 #if RT_INLINE_ASM_EXTERNAL
487 DECLASM(RTSEL) ASMGetLDTR(void);
488 #else
489 DECLINLINE(RTSEL) ASMGetLDTR(void)
490 {
491 RTSEL SelLDTR;
492 # if RT_INLINE_ASM_GNU_STYLE
493 __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR));
494 # else
495 __asm
496 {
497 sldt ax
498 mov [SelLDTR], ax
499 }
500 # endif
501 return SelLDTR;
502 }
503 #endif
504
505
506 /**
507 * Get the access rights for the segment selector.
508 *
509 * @returns The access rights on success or UINT32_MAX on failure.
510 * @param uSel The selector value.
511 *
512 * @remarks Using UINT32_MAX for failure is chosen because valid access rights
513 * always have bits 0:7 as 0 (on both Intel & AMD).
514 */
515 #if RT_INLINE_ASM_EXTERNAL
516 DECLASM(uint32_t) ASMGetSegAttr(uint32_t uSel);
517 #else
518 DECLINLINE(uint32_t) ASMGetSegAttr(uint32_t uSel)
519 {
520 uint32_t uAttr;
521 /* LAR only accesses 16-bit of the source operand, but eax for the
522 destination operand is required for getting the full 32-bit access rights. */
523 # if RT_INLINE_ASM_GNU_STYLE
524 __asm__ __volatile__("lar %1, %%eax\n\t"
525 "jz done%=\n\t"
526 "movl $0xffffffff, %%eax\n\t"
527 "done%=:\n\t"
528 "movl %%eax, %0\n\t"
529 : "=r" (uAttr)
530 : "r" (uSel)
531 : "cc", "%eax");
532 # else
533 __asm
534 {
535 lar eax, [uSel]
536 jz done
537 mov eax, 0ffffffffh
538 done:
539 mov [uAttr], eax
540 }
541 # endif
542 return uAttr;
543 }
544 #endif
545
546
547 /**
548 * Get the [RE]FLAGS register.
549 * @returns [RE]FLAGS.
550 */
551 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
552 DECLASM(RTCCUINTREG) ASMGetFlags(void);
553 #else
554 DECLINLINE(RTCCUINTREG) ASMGetFlags(void)
555 {
556 RTCCUINTREG uFlags;
557 # if RT_INLINE_ASM_GNU_STYLE
558 # ifdef RT_ARCH_AMD64
559 __asm__ __volatile__("pushfq\n\t"
560 "popq %0\n\t"
561 : "=r" (uFlags));
562 # else
563 __asm__ __volatile__("pushfl\n\t"
564 "popl %0\n\t"
565 : "=r" (uFlags));
566 # endif
567 # elif RT_INLINE_ASM_USES_INTRIN >= 15
568 uFlags = __readeflags();
569 # else
570 __asm
571 {
572 # ifdef RT_ARCH_AMD64
573 pushfq
574 pop [uFlags]
575 # else
576 pushfd
577 pop [uFlags]
578 # endif
579 }
580 # endif
581 return uFlags;
582 }
583 #endif
584
585
586 /**
587 * Set the [RE]FLAGS register.
588 * @param uFlags The new [RE]FLAGS value.
589 */
590 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
591 DECLASM(void) ASMSetFlags(RTCCUINTREG uFlags);
592 #else
593 DECLINLINE(void) ASMSetFlags(RTCCUINTREG uFlags)
594 {
595 # if RT_INLINE_ASM_GNU_STYLE
596 # ifdef RT_ARCH_AMD64
597 __asm__ __volatile__("pushq %0\n\t"
598 "popfq\n\t"
599 : : "g" (uFlags));
600 # else
601 __asm__ __volatile__("pushl %0\n\t"
602 "popfl\n\t"
603 : : "g" (uFlags));
604 # endif
605 # elif RT_INLINE_ASM_USES_INTRIN >= 15
606 __writeeflags(uFlags);
607 # else
608 __asm
609 {
610 # ifdef RT_ARCH_AMD64
611 push [uFlags]
612 popfq
613 # else
614 push [uFlags]
615 popfd
616 # endif
617 }
618 # endif
619 }
620 #endif
621
622
623 /**
624 * Modifies the [RE]FLAGS register.
625 * @returns Original value.
626 * @param fAndEfl Flags to keep (applied first).
627 * @param fOrEfl Flags to be set.
628 */
629 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
630 DECLASM(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl);
631 #else
632 DECLINLINE(RTCCUINTREG) ASMChangeFlags(RTCCUINTREG fAndEfl, RTCCUINTREG fOrEfl)
633 {
634 RTCCUINTREG fOldEfl;
635 # if RT_INLINE_ASM_GNU_STYLE
636 # ifdef RT_ARCH_AMD64
637 __asm__ __volatile__("pushfq\n\t"
638 "movq (%%rsp), %0\n\t"
639 "andq %0, %1\n\t"
640 "orq %3, %1\n\t"
641 "mov %1, (%%rsp)\n\t"
642 "popfq\n\t"
643 : "=&r" (fOldEfl),
644 "=r" (fAndEfl)
645 : "1" (fAndEfl),
646 "rn" (fOrEfl) );
647 # else
648 __asm__ __volatile__("pushfl\n\t"
649 "movl (%%esp), %0\n\t"
650 "andl %1, (%%esp)\n\t"
651 "orl %2, (%%esp)\n\t"
652 "popfl\n\t"
653 : "=&r" (fOldEfl)
654 : "rn" (fAndEfl),
655 "rn" (fOrEfl) );
656 # endif
657 # elif RT_INLINE_ASM_USES_INTRIN >= 15
658 fOldEfl = __readeflags();
659 __writeeflags((fOldEfl & fAndEfl) | fOrEfl);
660 # else
661 __asm
662 {
663 # ifdef RT_ARCH_AMD64
664 mov rdx, [fAndEfl]
665 mov rcx, [fOrEfl]
666 pushfq
667 mov rax, [rsp]
668 and rdx, rax
669 or rdx, rcx
670 mov [rsp], rdx
671 popfq
672 mov [fOldEfl], rax
673 # else
674 mov edx, [fAndEfl]
675 mov ecx, [fOrEfl]
676 pushfd
677 mov eax, [esp]
678 and edx, eax
679 or edx, ecx
680 mov [esp], edx
681 popfd
682 mov [fOldEfl], eax
683 # endif
684 }
685 # endif
686 return fOldEfl;
687 }
688 #endif
689
690
691 /**
692 * Modifies the [RE]FLAGS register by ORing in one or more flags.
693 * @returns Original value.
694 * @param fOrEfl The flags to be set (ORed in).
695 */
696 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
697 DECLASM(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl);
698 #else
699 DECLINLINE(RTCCUINTREG) ASMAddFlags(RTCCUINTREG fOrEfl)
700 {
701 RTCCUINTREG fOldEfl;
702 # if RT_INLINE_ASM_GNU_STYLE
703 # ifdef RT_ARCH_AMD64
704 __asm__ __volatile__("pushfq\n\t"
705 "movq (%%rsp), %0\n\t"
706 "orq %1, (%%rsp)\n\t"
707 "popfq\n\t"
708 : "=&r" (fOldEfl)
709 : "rn" (fOrEfl) );
710 # else
711 __asm__ __volatile__("pushfl\n\t"
712 "movl (%%esp), %0\n\t"
713 "orl %1, (%%esp)\n\t"
714 "popfl\n\t"
715 : "=&r" (fOldEfl)
716 : "rn" (fOrEfl) );
717 # endif
718 # elif RT_INLINE_ASM_USES_INTRIN >= 15
719 fOldEfl = __readeflags();
720 __writeeflags(fOldEfl | fOrEfl);
721 # else
722 __asm
723 {
724 # ifdef RT_ARCH_AMD64
725 mov rcx, [fOrEfl]
726 pushfq
727 mov rdx, [rsp]
728 or [rsp], rcx
729 popfq
730 mov [fOldEfl], rax
731 # else
732 mov ecx, [fOrEfl]
733 pushfd
734 mov edx, [esp]
735 or [esp], ecx
736 popfd
737 mov [fOldEfl], eax
738 # endif
739 }
740 # endif
741 return fOldEfl;
742 }
743 #endif
744
745
746 /**
747 * Modifies the [RE]FLAGS register by AND'ing out one or more flags.
748 * @returns Original value.
749 * @param fAndEfl The flags to keep.
750 */
751 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
752 DECLASM(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl);
753 #else
754 DECLINLINE(RTCCUINTREG) ASMClearFlags(RTCCUINTREG fAndEfl)
755 {
756 RTCCUINTREG fOldEfl;
757 # if RT_INLINE_ASM_GNU_STYLE
758 # ifdef RT_ARCH_AMD64
759 __asm__ __volatile__("pushfq\n\t"
760 "movq (%%rsp), %0\n\t"
761 "andq %1, (%%rsp)\n\t"
762 "popfq\n\t"
763 : "=&r" (fOldEfl)
764 : "rn" (fAndEfl) );
765 # else
766 __asm__ __volatile__("pushfl\n\t"
767 "movl (%%esp), %0\n\t"
768 "andl %1, (%%esp)\n\t"
769 "popfl\n\t"
770 : "=&r" (fOldEfl)
771 : "rn" (fAndEfl) );
772 # endif
773 # elif RT_INLINE_ASM_USES_INTRIN >= 15
774 fOldEfl = __readeflags();
775 __writeeflags(fOldEfl & fAndEfl);
776 # else
777 __asm
778 {
779 # ifdef RT_ARCH_AMD64
780 mov rdx, [fAndEfl]
781 pushfq
782 mov rdx, [rsp]
783 and [rsp], rdx
784 popfq
785 mov [fOldEfl], rax
786 # else
787 mov edx, [fAndEfl]
788 pushfd
789 mov edx, [esp]
790 and [esp], edx
791 popfd
792 mov [fOldEfl], eax
793 # endif
794 }
795 # endif
796 return fOldEfl;
797 }
798 #endif
799
800
801 /**
802 * Gets the content of the CPU timestamp counter register.
803 *
804 * @returns TSC.
805 */
806 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
807 DECLASM(uint64_t) ASMReadTSC(void);
808 #else
809 DECLINLINE(uint64_t) ASMReadTSC(void)
810 {
811 RTUINT64U u;
812 # if RT_INLINE_ASM_GNU_STYLE
813 __asm__ __volatile__("rdtsc\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi));
814 # else
815 # if RT_INLINE_ASM_USES_INTRIN
816 u.u = __rdtsc();
817 # else
818 __asm
819 {
820 rdtsc
821 mov [u.s.Lo], eax
822 mov [u.s.Hi], edx
823 }
824 # endif
825 # endif
826 return u.u;
827 }
828 #endif
829
830
831 /**
832 * Gets the content of the CPU timestamp counter register and the
833 * assoicated AUX value.
834 *
835 * @returns TSC.
836 * @param puAux Where to store the AUX value.
837 */
838 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
839 DECLASM(uint64_t) ASMReadTscWithAux(uint32_t *puAux);
840 #else
841 DECLINLINE(uint64_t) ASMReadTscWithAux(uint32_t *puAux)
842 {
843 RTUINT64U u;
844 # if RT_INLINE_ASM_GNU_STYLE
845 /* rdtscp is not supported by ancient linux build VM of course :-( */
846 /*__asm__ __volatile__("rdtscp\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux)); */
847 __asm__ __volatile__(".byte 0x0f,0x01,0xf9\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi), "=c" (*puAux));
848 # else
849 # if RT_INLINE_ASM_USES_INTRIN >= 15
850 u.u = __rdtscp(puAux);
851 # else
852 __asm
853 {
854 rdtscp
855 mov [u.s.Lo], eax
856 mov [u.s.Hi], edx
857 mov eax, [puAux]
858 mov [eax], ecx
859 }
860 # endif
861 # endif
862 return u.u;
863 }
864 #endif
865
866
867 /**
868 * Performs the cpuid instruction returning all registers.
869 *
870 * @param uOperator CPUID operation (eax).
871 * @param pvEAX Where to store eax.
872 * @param pvEBX Where to store ebx.
873 * @param pvECX Where to store ecx.
874 * @param pvEDX Where to store edx.
875 * @remark We're using void pointers to ease the use of special bitfield structures and such.
876 */
877 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
878 DECLASM(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
879 #else
880 DECLINLINE(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
881 {
882 # if RT_INLINE_ASM_GNU_STYLE
883 # ifdef RT_ARCH_AMD64
884 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
885 __asm__ __volatile__ ("cpuid\n\t"
886 : "=a" (uRAX),
887 "=b" (uRBX),
888 "=c" (uRCX),
889 "=d" (uRDX)
890 : "0" (uOperator), "2" (0));
891 *(uint32_t *)pvEAX = (uint32_t)uRAX;
892 *(uint32_t *)pvEBX = (uint32_t)uRBX;
893 *(uint32_t *)pvECX = (uint32_t)uRCX;
894 *(uint32_t *)pvEDX = (uint32_t)uRDX;
895 # else
896 __asm__ __volatile__ ("xchgl %%ebx, %1\n\t"
897 "cpuid\n\t"
898 "xchgl %%ebx, %1\n\t"
899 : "=a" (*(uint32_t *)pvEAX),
900 "=r" (*(uint32_t *)pvEBX),
901 "=c" (*(uint32_t *)pvECX),
902 "=d" (*(uint32_t *)pvEDX)
903 : "0" (uOperator), "2" (0));
904 # endif
905
906 # elif RT_INLINE_ASM_USES_INTRIN
907 int aInfo[4];
908 __cpuid(aInfo, uOperator);
909 *(uint32_t *)pvEAX = aInfo[0];
910 *(uint32_t *)pvEBX = aInfo[1];
911 *(uint32_t *)pvECX = aInfo[2];
912 *(uint32_t *)pvEDX = aInfo[3];
913
914 # else
915 uint32_t uEAX;
916 uint32_t uEBX;
917 uint32_t uECX;
918 uint32_t uEDX;
919 __asm
920 {
921 push ebx
922 mov eax, [uOperator]
923 cpuid
924 mov [uEAX], eax
925 mov [uEBX], ebx
926 mov [uECX], ecx
927 mov [uEDX], edx
928 pop ebx
929 }
930 *(uint32_t *)pvEAX = uEAX;
931 *(uint32_t *)pvEBX = uEBX;
932 *(uint32_t *)pvECX = uECX;
933 *(uint32_t *)pvEDX = uEDX;
934 # endif
935 }
936 #endif
937
938
939 /**
940 * Performs the CPUID instruction with EAX and ECX input returning ALL output
941 * registers.
942 *
943 * @param uOperator CPUID operation (eax).
944 * @param uIdxECX ecx index
945 * @param pvEAX Where to store eax.
946 * @param pvEBX Where to store ebx.
947 * @param pvECX Where to store ecx.
948 * @param pvEDX Where to store edx.
949 * @remark We're using void pointers to ease the use of special bitfield structures and such.
950 */
951 #if RT_INLINE_ASM_EXTERNAL || RT_INLINE_ASM_USES_INTRIN
952 DECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
953 #else
954 DECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
955 {
956 # if RT_INLINE_ASM_GNU_STYLE
957 # ifdef RT_ARCH_AMD64
958 RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
959 __asm__ ("cpuid\n\t"
960 : "=a" (uRAX),
961 "=b" (uRBX),
962 "=c" (uRCX),
963 "=d" (uRDX)
964 : "0" (uOperator),
965 "2" (uIdxECX));
966 *(uint32_t *)pvEAX = (uint32_t)uRAX;
967 *(uint32_t *)pvEBX = (uint32_t)uRBX;
968 *(uint32_t *)pvECX = (uint32_t)uRCX;
969 *(uint32_t *)pvEDX = (uint32_t)uRDX;
970 # else
971 __asm__ ("xchgl %%ebx, %1\n\t"
972 "cpuid\n\t"
973 "xchgl %%ebx, %1\n\t"
974 : "=a" (*(uint32_t *)pvEAX),
975 "=r" (*(uint32_t *)pvEBX),
976 "=c" (*(uint32_t *)pvECX),
977 "=d" (*(uint32_t *)pvEDX)
978 : "0" (uOperator),
979 "2" (uIdxECX));
980 # endif
981
982 # elif RT_INLINE_ASM_USES_INTRIN
983 int aInfo[4];
984 __cpuidex(aInfo, uOperator, uIdxECX);
985 *(uint32_t *)pvEAX = aInfo[0];
986 *(uint32_t *)pvEBX = aInfo[1];
987 *(uint32_t *)pvECX = aInfo[2];
988 *(uint32_t *)pvEDX = aInfo[3];
989
990 # else
991 uint32_t uEAX;
992 uint32_t uEBX;
993 uint32_t uECX;
994 uint32_t uEDX;
995 __asm
996 {
997 push ebx
998 mov eax, [uOperator]
999 mov ecx, [uIdxECX]
1000 cpuid
1001 mov [uEAX], eax
1002 mov [uEBX], ebx
1003 mov [uECX], ecx
1004 mov [uEDX], edx
1005 pop ebx
1006 }
1007 *(uint32_t *)pvEAX = uEAX;
1008 *(uint32_t *)pvEBX = uEBX;
1009 *(uint32_t *)pvECX = uECX;
1010 *(uint32_t *)pvEDX = uEDX;
1011 # endif
1012 }
1013 #endif
1014
1015
1016 /**
1017 * CPUID variant that initializes all 4 registers before the CPUID instruction.
1018 *
1019 * @returns The EAX result value.
1020 * @param uOperator CPUID operation (eax).
1021 * @param uInitEBX The value to assign EBX prior to the CPUID instruction.
1022 * @param uInitECX The value to assign ECX prior to the CPUID instruction.
1023 * @param uInitEDX The value to assign EDX prior to the CPUID instruction.
1024 * @param pvEAX Where to store eax. Optional.
1025 * @param pvEBX Where to store ebx. Optional.
1026 * @param pvECX Where to store ecx. Optional.
1027 * @param pvEDX Where to store edx. Optional.
1028 */
1029 DECLASM(uint32_t) ASMCpuIdExSlow(uint32_t uOperator, uint32_t uInitEBX, uint32_t uInitECX, uint32_t uInitEDX,
1030 void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
1031
1032
1033 /**
1034 * Performs the cpuid instruction returning ecx and edx.
1035 *
1036 * @param uOperator CPUID operation (eax).
1037 * @param pvECX Where to store ecx.
1038 * @param pvEDX Where to store edx.
1039 * @remark We're using void pointers to ease the use of special bitfield structures and such.
1040 */
1041 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1042 DECLASM(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX);
1043 #else
1044 DECLINLINE(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX)
1045 {
1046 uint32_t uEBX;
1047 ASMCpuId(uOperator, &uOperator, &uEBX, pvECX, pvEDX);
1048 }
1049 #endif
1050
1051
1052 /**
1053 * Performs the cpuid instruction returning eax.
1054 *
1055 * @param uOperator CPUID operation (eax).
1056 * @returns EAX after cpuid operation.
1057 */
1058 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1059 DECLASM(uint32_t) ASMCpuId_EAX(uint32_t uOperator);
1060 #else
1061 DECLINLINE(uint32_t) ASMCpuId_EAX(uint32_t uOperator)
1062 {
1063 RTCCUINTREG xAX;
1064 # if RT_INLINE_ASM_GNU_STYLE
1065 # ifdef RT_ARCH_AMD64
1066 __asm__ ("cpuid"
1067 : "=a" (xAX)
1068 : "0" (uOperator)
1069 : "rbx", "rcx", "rdx");
1070 # elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1071 __asm__ ("push %%ebx\n\t"
1072 "cpuid\n\t"
1073 "pop %%ebx\n\t"
1074 : "=a" (xAX)
1075 : "0" (uOperator)
1076 : "ecx", "edx");
1077 # else
1078 __asm__ ("cpuid"
1079 : "=a" (xAX)
1080 : "0" (uOperator)
1081 : "edx", "ecx", "ebx");
1082 # endif
1083
1084 # elif RT_INLINE_ASM_USES_INTRIN
1085 int aInfo[4];
1086 __cpuid(aInfo, uOperator);
1087 xAX = aInfo[0];
1088
1089 # else
1090 __asm
1091 {
1092 push ebx
1093 mov eax, [uOperator]
1094 cpuid
1095 mov [xAX], eax
1096 pop ebx
1097 }
1098 # endif
1099 return (uint32_t)xAX;
1100 }
1101 #endif
1102
1103
1104 /**
1105 * Performs the cpuid instruction returning ebx.
1106 *
1107 * @param uOperator CPUID operation (eax).
1108 * @returns EBX after cpuid operation.
1109 */
1110 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1111 DECLASM(uint32_t) ASMCpuId_EBX(uint32_t uOperator);
1112 #else
1113 DECLINLINE(uint32_t) ASMCpuId_EBX(uint32_t uOperator)
1114 {
1115 RTCCUINTREG xBX;
1116 # if RT_INLINE_ASM_GNU_STYLE
1117 # ifdef RT_ARCH_AMD64
1118 RTCCUINTREG uSpill;
1119 __asm__ ("cpuid"
1120 : "=a" (uSpill),
1121 "=b" (xBX)
1122 : "0" (uOperator)
1123 : "rdx", "rcx");
1124 # elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1125 __asm__ ("push %%ebx\n\t"
1126 "cpuid\n\t"
1127 "mov %%ebx, %%edx\n\t"
1128 "pop %%ebx\n\t"
1129 : "=a" (uOperator),
1130 "=d" (xBX)
1131 : "0" (uOperator)
1132 : "ecx");
1133 # else
1134 __asm__ ("cpuid"
1135 : "=a" (uOperator),
1136 "=b" (xBX)
1137 : "0" (uOperator)
1138 : "edx", "ecx");
1139 # endif
1140
1141 # elif RT_INLINE_ASM_USES_INTRIN
1142 int aInfo[4];
1143 __cpuid(aInfo, uOperator);
1144 xBX = aInfo[1];
1145
1146 # else
1147 __asm
1148 {
1149 push ebx
1150 mov eax, [uOperator]
1151 cpuid
1152 mov [xBX], ebx
1153 pop ebx
1154 }
1155 # endif
1156 return (uint32_t)xBX;
1157 }
1158 #endif
1159
1160
1161 /**
1162 * Performs the cpuid instruction returning ecx.
1163 *
1164 * @param uOperator CPUID operation (eax).
1165 * @returns ECX after cpuid operation.
1166 */
1167 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1168 DECLASM(uint32_t) ASMCpuId_ECX(uint32_t uOperator);
1169 #else
1170 DECLINLINE(uint32_t) ASMCpuId_ECX(uint32_t uOperator)
1171 {
1172 RTCCUINTREG xCX;
1173 # if RT_INLINE_ASM_GNU_STYLE
1174 # ifdef RT_ARCH_AMD64
1175 RTCCUINTREG uSpill;
1176 __asm__ ("cpuid"
1177 : "=a" (uSpill),
1178 "=c" (xCX)
1179 : "0" (uOperator)
1180 : "rbx", "rdx");
1181 # elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1182 __asm__ ("push %%ebx\n\t"
1183 "cpuid\n\t"
1184 "pop %%ebx\n\t"
1185 : "=a" (uOperator),
1186 "=c" (xCX)
1187 : "0" (uOperator)
1188 : "edx");
1189 # else
1190 __asm__ ("cpuid"
1191 : "=a" (uOperator),
1192 "=c" (xCX)
1193 : "0" (uOperator)
1194 : "ebx", "edx");
1195
1196 # endif
1197
1198 # elif RT_INLINE_ASM_USES_INTRIN
1199 int aInfo[4];
1200 __cpuid(aInfo, uOperator);
1201 xCX = aInfo[2];
1202
1203 # else
1204 __asm
1205 {
1206 push ebx
1207 mov eax, [uOperator]
1208 cpuid
1209 mov [xCX], ecx
1210 pop ebx
1211 }
1212 # endif
1213 return (uint32_t)xCX;
1214 }
1215 #endif
1216
1217
1218 /**
1219 * Performs the cpuid instruction returning edx.
1220 *
1221 * @param uOperator CPUID operation (eax).
1222 * @returns EDX after cpuid operation.
1223 */
1224 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1225 DECLASM(uint32_t) ASMCpuId_EDX(uint32_t uOperator);
1226 #else
1227 DECLINLINE(uint32_t) ASMCpuId_EDX(uint32_t uOperator)
1228 {
1229 RTCCUINTREG xDX;
1230 # if RT_INLINE_ASM_GNU_STYLE
1231 # ifdef RT_ARCH_AMD64
1232 RTCCUINTREG uSpill;
1233 __asm__ ("cpuid"
1234 : "=a" (uSpill),
1235 "=d" (xDX)
1236 : "0" (uOperator)
1237 : "rbx", "rcx");
1238 # elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1239 __asm__ ("push %%ebx\n\t"
1240 "cpuid\n\t"
1241 "pop %%ebx\n\t"
1242 : "=a" (uOperator),
1243 "=d" (xDX)
1244 : "0" (uOperator)
1245 : "ecx");
1246 # else
1247 __asm__ ("cpuid"
1248 : "=a" (uOperator),
1249 "=d" (xDX)
1250 : "0" (uOperator)
1251 : "ebx", "ecx");
1252 # endif
1253
1254 # elif RT_INLINE_ASM_USES_INTRIN
1255 int aInfo[4];
1256 __cpuid(aInfo, uOperator);
1257 xDX = aInfo[3];
1258
1259 # else
1260 __asm
1261 {
1262 push ebx
1263 mov eax, [uOperator]
1264 cpuid
1265 mov [xDX], edx
1266 pop ebx
1267 }
1268 # endif
1269 return (uint32_t)xDX;
1270 }
1271 #endif
1272
1273
1274 /**
1275 * Checks if the current CPU supports CPUID.
1276 *
1277 * @returns true if CPUID is supported.
1278 */
1279 #ifdef __WATCOMC__
1280 DECLASM(bool) ASMHasCpuId(void);
1281 #else
1282 DECLINLINE(bool) ASMHasCpuId(void)
1283 {
1284 # ifdef RT_ARCH_AMD64
1285 return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
1286 # else /* !RT_ARCH_AMD64 */
1287 bool fRet = false;
1288 # if RT_INLINE_ASM_GNU_STYLE
1289 uint32_t u1;
1290 uint32_t u2;
1291 __asm__ ("pushf\n\t"
1292 "pop %1\n\t"
1293 "mov %1, %2\n\t"
1294 "xorl $0x200000, %1\n\t"
1295 "push %1\n\t"
1296 "popf\n\t"
1297 "pushf\n\t"
1298 "pop %1\n\t"
1299 "cmpl %1, %2\n\t"
1300 "setne %0\n\t"
1301 "push %2\n\t"
1302 "popf\n\t"
1303 : "=m" (fRet), "=r" (u1), "=r" (u2));
1304 # else
1305 __asm
1306 {
1307 pushfd
1308 pop eax
1309 mov ebx, eax
1310 xor eax, 0200000h
1311 push eax
1312 popfd
1313 pushfd
1314 pop eax
1315 cmp eax, ebx
1316 setne fRet
1317 push ebx
1318 popfd
1319 }
1320 # endif
1321 return fRet;
1322 # endif /* !RT_ARCH_AMD64 */
1323 }
1324 #endif
1325
1326
1327 /**
1328 * Gets the APIC ID of the current CPU.
1329 *
1330 * @returns the APIC ID.
1331 */
1332 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1333 DECLASM(uint8_t) ASMGetApicId(void);
1334 #else
1335 DECLINLINE(uint8_t) ASMGetApicId(void)
1336 {
1337 RTCCUINTREG xBX;
1338 # if RT_INLINE_ASM_GNU_STYLE
1339 # ifdef RT_ARCH_AMD64
1340 RTCCUINTREG uSpill;
1341 __asm__ __volatile__ ("cpuid"
1342 : "=a" (uSpill),
1343 "=b" (xBX)
1344 : "0" (1)
1345 : "rcx", "rdx");
1346 # elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1347 RTCCUINTREG uSpill;
1348 __asm__ __volatile__ ("mov %%ebx,%1\n\t"
1349 "cpuid\n\t"
1350 "xchgl %%ebx,%1\n\t"
1351 : "=a" (uSpill),
1352 "=rm" (xBX)
1353 : "0" (1)
1354 : "ecx", "edx");
1355 # else
1356 RTCCUINTREG uSpill;
1357 __asm__ __volatile__ ("cpuid"
1358 : "=a" (uSpill),
1359 "=b" (xBX)
1360 : "0" (1)
1361 : "ecx", "edx");
1362 # endif
1363
1364 # elif RT_INLINE_ASM_USES_INTRIN
1365 int aInfo[4];
1366 __cpuid(aInfo, 1);
1367 xBX = aInfo[1];
1368
1369 # else
1370 __asm
1371 {
1372 push ebx
1373 mov eax, 1
1374 cpuid
1375 mov [xBX], ebx
1376 pop ebx
1377 }
1378 # endif
1379 return (uint8_t)(xBX >> 24);
1380 }
1381 #endif
1382
1383
1384 /**
1385 * Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
1386 *
1387 * @returns true/false.
1388 * @param uEBX EBX return from ASMCpuId(0)
1389 * @param uECX ECX return from ASMCpuId(0)
1390 * @param uEDX EDX return from ASMCpuId(0)
1391 */
1392 DECLINLINE(bool) ASMIsIntelCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1393 {
1394 return uEBX == UINT32_C(0x756e6547)
1395 && uECX == UINT32_C(0x6c65746e)
1396 && uEDX == UINT32_C(0x49656e69);
1397 }
1398
1399
1400 /**
1401 * Tests if this is a genuine Intel CPU.
1402 *
1403 * @returns true/false.
1404 * @remarks ASSUMES that cpuid is supported by the CPU.
1405 */
1406 DECLINLINE(bool) ASMIsIntelCpu(void)
1407 {
1408 uint32_t uEAX, uEBX, uECX, uEDX;
1409 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1410 return ASMIsIntelCpuEx(uEBX, uECX, uEDX);
1411 }
1412
1413
1414 /**
1415 * Tests if it an authentic AMD CPU based on the ASMCpuId(0) output.
1416 *
1417 * @returns true/false.
1418 * @param uEBX EBX return from ASMCpuId(0)
1419 * @param uECX ECX return from ASMCpuId(0)
1420 * @param uEDX EDX return from ASMCpuId(0)
1421 */
1422 DECLINLINE(bool) ASMIsAmdCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1423 {
1424 return uEBX == UINT32_C(0x68747541)
1425 && uECX == UINT32_C(0x444d4163)
1426 && uEDX == UINT32_C(0x69746e65);
1427 }
1428
1429
1430 /**
1431 * Tests if this is an authentic AMD CPU.
1432 *
1433 * @returns true/false.
1434 * @remarks ASSUMES that cpuid is supported by the CPU.
1435 */
1436 DECLINLINE(bool) ASMIsAmdCpu(void)
1437 {
1438 uint32_t uEAX, uEBX, uECX, uEDX;
1439 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1440 return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
1441 }
1442
1443
1444 /**
1445 * Tests if it a centaur hauling VIA CPU based on the ASMCpuId(0) output.
1446 *
1447 * @returns true/false.
1448 * @param uEBX EBX return from ASMCpuId(0).
1449 * @param uECX ECX return from ASMCpuId(0).
1450 * @param uEDX EDX return from ASMCpuId(0).
1451 */
1452 DECLINLINE(bool) ASMIsViaCentaurCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
1453 {
1454 return uEBX == UINT32_C(0x746e6543)
1455 && uECX == UINT32_C(0x736c7561)
1456 && uEDX == UINT32_C(0x48727561);
1457 }
1458
1459
1460 /**
1461 * Tests if this is a centaur hauling VIA CPU.
1462 *
1463 * @returns true/false.
1464 * @remarks ASSUMES that cpuid is supported by the CPU.
1465 */
1466 DECLINLINE(bool) ASMIsViaCentaurCpu(void)
1467 {
1468 uint32_t uEAX, uEBX, uECX, uEDX;
1469 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
1470 return ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX);
1471 }
1472
1473
1474 /**
1475 * Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range.
1476 *
1477 *
1478 * @returns true/false.
1479 * @param uEAX The EAX value of CPUID leaf 0x00000000.
1480 *
1481 * @note This only succeeds if there are at least two leaves in the range.
1482 * @remarks The upper range limit is just some half reasonable value we've
1483 * picked out of thin air.
1484 */
1485 DECLINLINE(bool) ASMIsValidStdRange(uint32_t uEAX)
1486 {
1487 return uEAX >= UINT32_C(0x00000001) && uEAX <= UINT32_C(0x000fffff);
1488 }
1489
1490
1491 /**
1492 * Checks whether ASMCpuId_EAX(0x80000000) indicates a valid range.
1493 *
1494 * This only succeeds if there are at least two leaves in the range.
1495 *
1496 * @returns true/false.
1497 * @param uEAX The EAX value of CPUID leaf 0x80000000.
1498 *
1499 * @note This only succeeds if there are at least two leaves in the range.
1500 * @remarks The upper range limit is just some half reasonable value we've
1501 * picked out of thin air.
1502 */
1503 DECLINLINE(bool) ASMIsValidExtRange(uint32_t uEAX)
1504 {
1505 return uEAX >= UINT32_C(0x80000001) && uEAX <= UINT32_C(0x800fffff);
1506 }
1507
1508
1509 /**
1510 * Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
1511 *
1512 * @returns Family.
1513 * @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
1514 */
1515 DECLINLINE(uint32_t) ASMGetCpuFamily(uint32_t uEAX)
1516 {
1517 return ((uEAX >> 8) & 0xf) == 0xf
1518 ? ((uEAX >> 20) & 0x7f) + 0xf
1519 : ((uEAX >> 8) & 0xf);
1520 }
1521
1522
1523 /**
1524 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
1525 *
1526 * @returns Model.
1527 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1528 */
1529 DECLINLINE(uint32_t) ASMGetCpuModelIntel(uint32_t uEAX)
1530 {
1531 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6) /* family! */
1532 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1533 : ((uEAX >> 4) & 0xf);
1534 }
1535
1536
1537 /**
1538 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
1539 *
1540 * @returns Model.
1541 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1542 */
1543 DECLINLINE(uint32_t) ASMGetCpuModelAMD(uint32_t uEAX)
1544 {
1545 return ((uEAX >> 8) & 0xf) == 0xf
1546 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1547 : ((uEAX >> 4) & 0xf);
1548 }
1549
1550
1551 /**
1552 * Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
1553 *
1554 * @returns Model.
1555 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1556 * @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
1557 */
1558 DECLINLINE(uint32_t) ASMGetCpuModel(uint32_t uEAX, bool fIntel)
1559 {
1560 return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6 && fIntel) /* family! */
1561 ? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
1562 : ((uEAX >> 4) & 0xf);
1563 }
1564
1565
1566 /**
1567 * Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
1568 *
1569 * @returns Model.
1570 * @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
1571 */
1572 DECLINLINE(uint32_t) ASMGetCpuStepping(uint32_t uEAX)
1573 {
1574 return uEAX & 0xf;
1575 }
1576
1577
1578 /**
1579 * Get cr0.
1580 * @returns cr0.
1581 */
1582 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1583 DECLASM(RTCCUINTXREG) ASMGetCR0(void);
1584 #else
1585 DECLINLINE(RTCCUINTXREG) ASMGetCR0(void)
1586 {
1587 RTCCUINTXREG uCR0;
1588 # if RT_INLINE_ASM_USES_INTRIN
1589 uCR0 = __readcr0();
1590
1591 # elif RT_INLINE_ASM_GNU_STYLE
1592 # ifdef RT_ARCH_AMD64
1593 __asm__ __volatile__("movq %%cr0, %0\t\n" : "=r" (uCR0));
1594 # else
1595 __asm__ __volatile__("movl %%cr0, %0\t\n" : "=r" (uCR0));
1596 # endif
1597 # else
1598 __asm
1599 {
1600 # ifdef RT_ARCH_AMD64
1601 mov rax, cr0
1602 mov [uCR0], rax
1603 # else
1604 mov eax, cr0
1605 mov [uCR0], eax
1606 # endif
1607 }
1608 # endif
1609 return uCR0;
1610 }
1611 #endif
1612
1613
1614 /**
1615 * Sets the CR0 register.
1616 * @param uCR0 The new CR0 value.
1617 */
1618 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1619 DECLASM(void) ASMSetCR0(RTCCUINTXREG uCR0);
1620 #else
1621 DECLINLINE(void) ASMSetCR0(RTCCUINTXREG uCR0)
1622 {
1623 # if RT_INLINE_ASM_USES_INTRIN
1624 __writecr0(uCR0);
1625
1626 # elif RT_INLINE_ASM_GNU_STYLE
1627 # ifdef RT_ARCH_AMD64
1628 __asm__ __volatile__("movq %0, %%cr0\n\t" :: "r" (uCR0));
1629 # else
1630 __asm__ __volatile__("movl %0, %%cr0\n\t" :: "r" (uCR0));
1631 # endif
1632 # else
1633 __asm
1634 {
1635 # ifdef RT_ARCH_AMD64
1636 mov rax, [uCR0]
1637 mov cr0, rax
1638 # else
1639 mov eax, [uCR0]
1640 mov cr0, eax
1641 # endif
1642 }
1643 # endif
1644 }
1645 #endif
1646
1647
1648 /**
1649 * Get cr2.
1650 * @returns cr2.
1651 */
1652 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1653 DECLASM(RTCCUINTXREG) ASMGetCR2(void);
1654 #else
1655 DECLINLINE(RTCCUINTXREG) ASMGetCR2(void)
1656 {
1657 RTCCUINTXREG uCR2;
1658 # if RT_INLINE_ASM_USES_INTRIN
1659 uCR2 = __readcr2();
1660
1661 # elif RT_INLINE_ASM_GNU_STYLE
1662 # ifdef RT_ARCH_AMD64
1663 __asm__ __volatile__("movq %%cr2, %0\t\n" : "=r" (uCR2));
1664 # else
1665 __asm__ __volatile__("movl %%cr2, %0\t\n" : "=r" (uCR2));
1666 # endif
1667 # else
1668 __asm
1669 {
1670 # ifdef RT_ARCH_AMD64
1671 mov rax, cr2
1672 mov [uCR2], rax
1673 # else
1674 mov eax, cr2
1675 mov [uCR2], eax
1676 # endif
1677 }
1678 # endif
1679 return uCR2;
1680 }
1681 #endif
1682
1683
1684 /**
1685 * Sets the CR2 register.
1686 * @param uCR2 The new CR0 value.
1687 */
1688 #if RT_INLINE_ASM_EXTERNAL
1689 DECLASM(void) ASMSetCR2(RTCCUINTXREG uCR2);
1690 #else
1691 DECLINLINE(void) ASMSetCR2(RTCCUINTXREG uCR2)
1692 {
1693 # if RT_INLINE_ASM_GNU_STYLE
1694 # ifdef RT_ARCH_AMD64
1695 __asm__ __volatile__("movq %0, %%cr2\n\t" :: "r" (uCR2));
1696 # else
1697 __asm__ __volatile__("movl %0, %%cr2\n\t" :: "r" (uCR2));
1698 # endif
1699 # else
1700 __asm
1701 {
1702 # ifdef RT_ARCH_AMD64
1703 mov rax, [uCR2]
1704 mov cr2, rax
1705 # else
1706 mov eax, [uCR2]
1707 mov cr2, eax
1708 # endif
1709 }
1710 # endif
1711 }
1712 #endif
1713
1714
1715 /**
1716 * Get cr3.
1717 * @returns cr3.
1718 */
1719 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1720 DECLASM(RTCCUINTXREG) ASMGetCR3(void);
1721 #else
1722 DECLINLINE(RTCCUINTXREG) ASMGetCR3(void)
1723 {
1724 RTCCUINTXREG uCR3;
1725 # if RT_INLINE_ASM_USES_INTRIN
1726 uCR3 = __readcr3();
1727
1728 # elif RT_INLINE_ASM_GNU_STYLE
1729 # ifdef RT_ARCH_AMD64
1730 __asm__ __volatile__("movq %%cr3, %0\t\n" : "=r" (uCR3));
1731 # else
1732 __asm__ __volatile__("movl %%cr3, %0\t\n" : "=r" (uCR3));
1733 # endif
1734 # else
1735 __asm
1736 {
1737 # ifdef RT_ARCH_AMD64
1738 mov rax, cr3
1739 mov [uCR3], rax
1740 # else
1741 mov eax, cr3
1742 mov [uCR3], eax
1743 # endif
1744 }
1745 # endif
1746 return uCR3;
1747 }
1748 #endif
1749
1750
1751 /**
1752 * Sets the CR3 register.
1753 *
1754 * @param uCR3 New CR3 value.
1755 */
1756 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1757 DECLASM(void) ASMSetCR3(RTCCUINTXREG uCR3);
1758 #else
1759 DECLINLINE(void) ASMSetCR3(RTCCUINTXREG uCR3)
1760 {
1761 # if RT_INLINE_ASM_USES_INTRIN
1762 __writecr3(uCR3);
1763
1764 # elif RT_INLINE_ASM_GNU_STYLE
1765 # ifdef RT_ARCH_AMD64
1766 __asm__ __volatile__("movq %0, %%cr3\n\t" : : "r" (uCR3));
1767 # else
1768 __asm__ __volatile__("movl %0, %%cr3\n\t" : : "r" (uCR3));
1769 # endif
1770 # else
1771 __asm
1772 {
1773 # ifdef RT_ARCH_AMD64
1774 mov rax, [uCR3]
1775 mov cr3, rax
1776 # else
1777 mov eax, [uCR3]
1778 mov cr3, eax
1779 # endif
1780 }
1781 # endif
1782 }
1783 #endif
1784
1785
1786 /**
1787 * Reloads the CR3 register.
1788 */
1789 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1790 DECLASM(void) ASMReloadCR3(void);
1791 #else
1792 DECLINLINE(void) ASMReloadCR3(void)
1793 {
1794 # if RT_INLINE_ASM_USES_INTRIN
1795 __writecr3(__readcr3());
1796
1797 # elif RT_INLINE_ASM_GNU_STYLE
1798 RTCCUINTXREG u;
1799 # ifdef RT_ARCH_AMD64
1800 __asm__ __volatile__("movq %%cr3, %0\n\t"
1801 "movq %0, %%cr3\n\t"
1802 : "=r" (u));
1803 # else
1804 __asm__ __volatile__("movl %%cr3, %0\n\t"
1805 "movl %0, %%cr3\n\t"
1806 : "=r" (u));
1807 # endif
1808 # else
1809 __asm
1810 {
1811 # ifdef RT_ARCH_AMD64
1812 mov rax, cr3
1813 mov cr3, rax
1814 # else
1815 mov eax, cr3
1816 mov cr3, eax
1817 # endif
1818 }
1819 # endif
1820 }
1821 #endif
1822
1823
1824 /**
1825 * Get cr4.
1826 * @returns cr4.
1827 */
1828 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1829 DECLASM(RTCCUINTXREG) ASMGetCR4(void);
1830 #else
1831 DECLINLINE(RTCCUINTXREG) ASMGetCR4(void)
1832 {
1833 RTCCUINTXREG uCR4;
1834 # if RT_INLINE_ASM_USES_INTRIN
1835 uCR4 = __readcr4();
1836
1837 # elif RT_INLINE_ASM_GNU_STYLE
1838 # ifdef RT_ARCH_AMD64
1839 __asm__ __volatile__("movq %%cr4, %0\t\n" : "=r" (uCR4));
1840 # else
1841 __asm__ __volatile__("movl %%cr4, %0\t\n" : "=r" (uCR4));
1842 # endif
1843 # else
1844 __asm
1845 {
1846 # ifdef RT_ARCH_AMD64
1847 mov rax, cr4
1848 mov [uCR4], rax
1849 # else
1850 push eax /* just in case */
1851 /*mov eax, cr4*/
1852 _emit 0x0f
1853 _emit 0x20
1854 _emit 0xe0
1855 mov [uCR4], eax
1856 pop eax
1857 # endif
1858 }
1859 # endif
1860 return uCR4;
1861 }
1862 #endif
1863
1864
1865 /**
1866 * Sets the CR4 register.
1867 *
1868 * @param uCR4 New CR4 value.
1869 */
1870 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1871 DECLASM(void) ASMSetCR4(RTCCUINTXREG uCR4);
1872 #else
1873 DECLINLINE(void) ASMSetCR4(RTCCUINTXREG uCR4)
1874 {
1875 # if RT_INLINE_ASM_USES_INTRIN
1876 __writecr4(uCR4);
1877
1878 # elif RT_INLINE_ASM_GNU_STYLE
1879 # ifdef RT_ARCH_AMD64
1880 __asm__ __volatile__("movq %0, %%cr4\n\t" : : "r" (uCR4));
1881 # else
1882 __asm__ __volatile__("movl %0, %%cr4\n\t" : : "r" (uCR4));
1883 # endif
1884 # else
1885 __asm
1886 {
1887 # ifdef RT_ARCH_AMD64
1888 mov rax, [uCR4]
1889 mov cr4, rax
1890 # else
1891 mov eax, [uCR4]
1892 _emit 0x0F
1893 _emit 0x22
1894 _emit 0xE0 /* mov cr4, eax */
1895 # endif
1896 }
1897 # endif
1898 }
1899 #endif
1900
1901
1902 /**
1903 * Get cr8.
1904 * @returns cr8.
1905 * @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
1906 */
1907 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1908 DECLASM(RTCCUINTXREG) ASMGetCR8(void);
1909 #else
1910 DECLINLINE(RTCCUINTXREG) ASMGetCR8(void)
1911 {
1912 # ifdef RT_ARCH_AMD64
1913 RTCCUINTXREG uCR8;
1914 # if RT_INLINE_ASM_USES_INTRIN
1915 uCR8 = __readcr8();
1916
1917 # elif RT_INLINE_ASM_GNU_STYLE
1918 __asm__ __volatile__("movq %%cr8, %0\t\n" : "=r" (uCR8));
1919 # else
1920 __asm
1921 {
1922 mov rax, cr8
1923 mov [uCR8], rax
1924 }
1925 # endif
1926 return uCR8;
1927 # else /* !RT_ARCH_AMD64 */
1928 return 0;
1929 # endif /* !RT_ARCH_AMD64 */
1930 }
1931 #endif
1932
1933
1934 /**
1935 * Get XCR0 (eXtended feature Control Register 0).
1936 * @returns xcr0.
1937 */
1938 DECLASM(uint64_t) ASMGetXcr0(void);
1939
1940 /**
1941 * Sets the XCR0 register.
1942 * @param uXcr0 The new XCR0 value.
1943 */
1944 DECLASM(void) ASMSetXcr0(uint64_t uXcr0);
1945
1946 struct X86XSAVEAREA;
1947 /**
1948 * Save extended CPU state.
1949 * @param pXStateArea Where to save the state.
1950 * @param fComponents Which state components to save.
1951 */
1952 DECLASM(void) ASMXSave(struct X86XSAVEAREA *pXStateArea, uint64_t fComponents);
1953
1954 /**
1955 * Loads extended CPU state.
1956 * @param pXStateArea Where to load the state from.
1957 * @param fComponents Which state components to load.
1958 */
1959 DECLASM(void) ASMXRstor(struct X86XSAVEAREA const *pXStateArea, uint64_t fComponents);
1960
1961
1962 /**
1963 * Enables interrupts (EFLAGS.IF).
1964 */
1965 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1966 DECLASM(void) ASMIntEnable(void);
1967 #else
1968 DECLINLINE(void) ASMIntEnable(void)
1969 {
1970 # if RT_INLINE_ASM_GNU_STYLE
1971 __asm("sti\n");
1972 # elif RT_INLINE_ASM_USES_INTRIN
1973 _enable();
1974 # else
1975 __asm sti
1976 # endif
1977 }
1978 #endif
1979
1980
1981 /**
1982 * Disables interrupts (!EFLAGS.IF).
1983 */
1984 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1985 DECLASM(void) ASMIntDisable(void);
1986 #else
1987 DECLINLINE(void) ASMIntDisable(void)
1988 {
1989 # if RT_INLINE_ASM_GNU_STYLE
1990 __asm("cli\n");
1991 # elif RT_INLINE_ASM_USES_INTRIN
1992 _disable();
1993 # else
1994 __asm cli
1995 # endif
1996 }
1997 #endif
1998
1999
2000 /**
2001 * Disables interrupts and returns previous xFLAGS.
2002 */
2003 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2004 DECLASM(RTCCUINTREG) ASMIntDisableFlags(void);
2005 #else
2006 DECLINLINE(RTCCUINTREG) ASMIntDisableFlags(void)
2007 {
2008 RTCCUINTREG xFlags;
2009 # if RT_INLINE_ASM_GNU_STYLE
2010 # ifdef RT_ARCH_AMD64
2011 __asm__ __volatile__("pushfq\n\t"
2012 "cli\n\t"
2013 "popq %0\n\t"
2014 : "=r" (xFlags));
2015 # else
2016 __asm__ __volatile__("pushfl\n\t"
2017 "cli\n\t"
2018 "popl %0\n\t"
2019 : "=r" (xFlags));
2020 # endif
2021 # elif RT_INLINE_ASM_USES_INTRIN && !defined(RT_ARCH_X86)
2022 xFlags = ASMGetFlags();
2023 _disable();
2024 # else
2025 __asm {
2026 pushfd
2027 cli
2028 pop [xFlags]
2029 }
2030 # endif
2031 return xFlags;
2032 }
2033 #endif
2034
2035
2036 /**
2037 * Are interrupts enabled?
2038 *
2039 * @returns true / false.
2040 */
2041 DECLINLINE(bool) ASMIntAreEnabled(void)
2042 {
2043 RTCCUINTREG uFlags = ASMGetFlags();
2044 return uFlags & 0x200 /* X86_EFL_IF */ ? true : false;
2045 }
2046
2047
2048 /**
2049 * Halts the CPU until interrupted.
2050 */
2051 #if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 14
2052 DECLASM(void) ASMHalt(void);
2053 #else
2054 DECLINLINE(void) ASMHalt(void)
2055 {
2056 # if RT_INLINE_ASM_GNU_STYLE
2057 __asm__ __volatile__("hlt\n\t");
2058 # elif RT_INLINE_ASM_USES_INTRIN
2059 __halt();
2060 # else
2061 __asm {
2062 hlt
2063 }
2064 # endif
2065 }
2066 #endif
2067
2068
2069 /**
2070 * Reads a machine specific register.
2071 *
2072 * @returns Register content.
2073 * @param uRegister Register to read.
2074 */
2075 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2076 DECLASM(uint64_t) ASMRdMsr(uint32_t uRegister);
2077 #else
2078 DECLINLINE(uint64_t) ASMRdMsr(uint32_t uRegister)
2079 {
2080 RTUINT64U u;
2081 # if RT_INLINE_ASM_GNU_STYLE
2082 __asm__ __volatile__("rdmsr\n\t"
2083 : "=a" (u.s.Lo),
2084 "=d" (u.s.Hi)
2085 : "c" (uRegister));
2086
2087 # elif RT_INLINE_ASM_USES_INTRIN
2088 u.u = __readmsr(uRegister);
2089
2090 # else
2091 __asm
2092 {
2093 mov ecx, [uRegister]
2094 rdmsr
2095 mov [u.s.Lo], eax
2096 mov [u.s.Hi], edx
2097 }
2098 # endif
2099
2100 return u.u;
2101 }
2102 #endif
2103
2104
2105 /**
2106 * Writes a machine specific register.
2107 *
2108 * @returns Register content.
2109 * @param uRegister Register to write to.
2110 * @param u64Val Value to write.
2111 */
2112 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2113 DECLASM(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val);
2114 #else
2115 DECLINLINE(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val)
2116 {
2117 RTUINT64U u;
2118
2119 u.u = u64Val;
2120 # if RT_INLINE_ASM_GNU_STYLE
2121 __asm__ __volatile__("wrmsr\n\t"
2122 ::"a" (u.s.Lo),
2123 "d" (u.s.Hi),
2124 "c" (uRegister));
2125
2126 # elif RT_INLINE_ASM_USES_INTRIN
2127 __writemsr(uRegister, u.u);
2128
2129 # else
2130 __asm
2131 {
2132 mov ecx, [uRegister]
2133 mov edx, [u.s.Hi]
2134 mov eax, [u.s.Lo]
2135 wrmsr
2136 }
2137 # endif
2138 }
2139 #endif
2140
2141
2142 /**
2143 * Reads a machine specific register, extended version (for AMD).
2144 *
2145 * @returns Register content.
2146 * @param uRegister Register to read.
2147 * @param uXDI RDI/EDI value.
2148 */
2149 #if RT_INLINE_ASM_EXTERNAL
2150 DECLASM(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI);
2151 #else
2152 DECLINLINE(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI)
2153 {
2154 RTUINT64U u;
2155 # if RT_INLINE_ASM_GNU_STYLE
2156 __asm__ __volatile__("rdmsr\n\t"
2157 : "=a" (u.s.Lo),
2158 "=d" (u.s.Hi)
2159 : "c" (uRegister),
2160 "D" (uXDI));
2161
2162 # else
2163 __asm
2164 {
2165 mov ecx, [uRegister]
2166 xchg edi, [uXDI]
2167 rdmsr
2168 mov [u.s.Lo], eax
2169 mov [u.s.Hi], edx
2170 xchg edi, [uXDI]
2171 }
2172 # endif
2173
2174 return u.u;
2175 }
2176 #endif
2177
2178
2179 /**
2180 * Writes a machine specific register, extended version (for AMD).
2181 *
2182 * @returns Register content.
2183 * @param uRegister Register to write to.
2184 * @param uXDI RDI/EDI value.
2185 * @param u64Val Value to write.
2186 */
2187 #if RT_INLINE_ASM_EXTERNAL
2188 DECLASM(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val);
2189 #else
2190 DECLINLINE(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTXREG uXDI, uint64_t u64Val)
2191 {
2192 RTUINT64U u;
2193
2194 u.u = u64Val;
2195 # if RT_INLINE_ASM_GNU_STYLE
2196 __asm__ __volatile__("wrmsr\n\t"
2197 ::"a" (u.s.Lo),
2198 "d" (u.s.Hi),
2199 "c" (uRegister),
2200 "D" (uXDI));
2201
2202 # else
2203 __asm
2204 {
2205 mov ecx, [uRegister]
2206 xchg edi, [uXDI]
2207 mov edx, [u.s.Hi]
2208 mov eax, [u.s.Lo]
2209 wrmsr
2210 xchg edi, [uXDI]
2211 }
2212 # endif
2213 }
2214 #endif
2215
2216
2217
2218 /**
2219 * Reads low part of a machine specific register.
2220 *
2221 * @returns Register content.
2222 * @param uRegister Register to read.
2223 */
2224 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2225 DECLASM(uint32_t) ASMRdMsr_Low(uint32_t uRegister);
2226 #else
2227 DECLINLINE(uint32_t) ASMRdMsr_Low(uint32_t uRegister)
2228 {
2229 uint32_t u32;
2230 # if RT_INLINE_ASM_GNU_STYLE
2231 __asm__ __volatile__("rdmsr\n\t"
2232 : "=a" (u32)
2233 : "c" (uRegister)
2234 : "edx");
2235
2236 # elif RT_INLINE_ASM_USES_INTRIN
2237 u32 = (uint32_t)__readmsr(uRegister);
2238
2239 #else
2240 __asm
2241 {
2242 mov ecx, [uRegister]
2243 rdmsr
2244 mov [u32], eax
2245 }
2246 # endif
2247
2248 return u32;
2249 }
2250 #endif
2251
2252
2253 /**
2254 * Reads high part of a machine specific register.
2255 *
2256 * @returns Register content.
2257 * @param uRegister Register to read.
2258 */
2259 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2260 DECLASM(uint32_t) ASMRdMsr_High(uint32_t uRegister);
2261 #else
2262 DECLINLINE(uint32_t) ASMRdMsr_High(uint32_t uRegister)
2263 {
2264 uint32_t u32;
2265 # if RT_INLINE_ASM_GNU_STYLE
2266 __asm__ __volatile__("rdmsr\n\t"
2267 : "=d" (u32)
2268 : "c" (uRegister)
2269 : "eax");
2270
2271 # elif RT_INLINE_ASM_USES_INTRIN
2272 u32 = (uint32_t)(__readmsr(uRegister) >> 32);
2273
2274 # else
2275 __asm
2276 {
2277 mov ecx, [uRegister]
2278 rdmsr
2279 mov [u32], edx
2280 }
2281 # endif
2282
2283 return u32;
2284 }
2285 #endif
2286
2287
2288 /**
2289 * Gets dr0.
2290 *
2291 * @returns dr0.
2292 */
2293 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2294 DECLASM(RTCCUINTXREG) ASMGetDR0(void);
2295 #else
2296 DECLINLINE(RTCCUINTXREG) ASMGetDR0(void)
2297 {
2298 RTCCUINTXREG uDR0;
2299 # if RT_INLINE_ASM_USES_INTRIN
2300 uDR0 = __readdr(0);
2301 # elif RT_INLINE_ASM_GNU_STYLE
2302 # ifdef RT_ARCH_AMD64
2303 __asm__ __volatile__("movq %%dr0, %0\n\t" : "=r" (uDR0));
2304 # else
2305 __asm__ __volatile__("movl %%dr0, %0\n\t" : "=r" (uDR0));
2306 # endif
2307 # else
2308 __asm
2309 {
2310 # ifdef RT_ARCH_AMD64
2311 mov rax, dr0
2312 mov [uDR0], rax
2313 # else
2314 mov eax, dr0
2315 mov [uDR0], eax
2316 # endif
2317 }
2318 # endif
2319 return uDR0;
2320 }
2321 #endif
2322
2323
2324 /**
2325 * Gets dr1.
2326 *
2327 * @returns dr1.
2328 */
2329 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2330 DECLASM(RTCCUINTXREG) ASMGetDR1(void);
2331 #else
2332 DECLINLINE(RTCCUINTXREG) ASMGetDR1(void)
2333 {
2334 RTCCUINTXREG uDR1;
2335 # if RT_INLINE_ASM_USES_INTRIN
2336 uDR1 = __readdr(1);
2337 # elif RT_INLINE_ASM_GNU_STYLE
2338 # ifdef RT_ARCH_AMD64
2339 __asm__ __volatile__("movq %%dr1, %0\n\t" : "=r" (uDR1));
2340 # else
2341 __asm__ __volatile__("movl %%dr1, %0\n\t" : "=r" (uDR1));
2342 # endif
2343 # else
2344 __asm
2345 {
2346 # ifdef RT_ARCH_AMD64
2347 mov rax, dr1
2348 mov [uDR1], rax
2349 # else
2350 mov eax, dr1
2351 mov [uDR1], eax
2352 # endif
2353 }
2354 # endif
2355 return uDR1;
2356 }
2357 #endif
2358
2359
2360 /**
2361 * Gets dr2.
2362 *
2363 * @returns dr2.
2364 */
2365 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2366 DECLASM(RTCCUINTXREG) ASMGetDR2(void);
2367 #else
2368 DECLINLINE(RTCCUINTXREG) ASMGetDR2(void)
2369 {
2370 RTCCUINTXREG uDR2;
2371 # if RT_INLINE_ASM_USES_INTRIN
2372 uDR2 = __readdr(2);
2373 # elif RT_INLINE_ASM_GNU_STYLE
2374 # ifdef RT_ARCH_AMD64
2375 __asm__ __volatile__("movq %%dr2, %0\n\t" : "=r" (uDR2));
2376 # else
2377 __asm__ __volatile__("movl %%dr2, %0\n\t" : "=r" (uDR2));
2378 # endif
2379 # else
2380 __asm
2381 {
2382 # ifdef RT_ARCH_AMD64
2383 mov rax, dr2
2384 mov [uDR2], rax
2385 # else
2386 mov eax, dr2
2387 mov [uDR2], eax
2388 # endif
2389 }
2390 # endif
2391 return uDR2;
2392 }
2393 #endif
2394
2395
2396 /**
2397 * Gets dr3.
2398 *
2399 * @returns dr3.
2400 */
2401 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2402 DECLASM(RTCCUINTXREG) ASMGetDR3(void);
2403 #else
2404 DECLINLINE(RTCCUINTXREG) ASMGetDR3(void)
2405 {
2406 RTCCUINTXREG uDR3;
2407 # if RT_INLINE_ASM_USES_INTRIN
2408 uDR3 = __readdr(3);
2409 # elif RT_INLINE_ASM_GNU_STYLE
2410 # ifdef RT_ARCH_AMD64
2411 __asm__ __volatile__("movq %%dr3, %0\n\t" : "=r" (uDR3));
2412 # else
2413 __asm__ __volatile__("movl %%dr3, %0\n\t" : "=r" (uDR3));
2414 # endif
2415 # else
2416 __asm
2417 {
2418 # ifdef RT_ARCH_AMD64
2419 mov rax, dr3
2420 mov [uDR3], rax
2421 # else
2422 mov eax, dr3
2423 mov [uDR3], eax
2424 # endif
2425 }
2426 # endif
2427 return uDR3;
2428 }
2429 #endif
2430
2431
2432 /**
2433 * Gets dr6.
2434 *
2435 * @returns dr6.
2436 */
2437 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2438 DECLASM(RTCCUINTXREG) ASMGetDR6(void);
2439 #else
2440 DECLINLINE(RTCCUINTXREG) ASMGetDR6(void)
2441 {
2442 RTCCUINTXREG uDR6;
2443 # if RT_INLINE_ASM_USES_INTRIN
2444 uDR6 = __readdr(6);
2445 # elif RT_INLINE_ASM_GNU_STYLE
2446 # ifdef RT_ARCH_AMD64
2447 __asm__ __volatile__("movq %%dr6, %0\n\t" : "=r" (uDR6));
2448 # else
2449 __asm__ __volatile__("movl %%dr6, %0\n\t" : "=r" (uDR6));
2450 # endif
2451 # else
2452 __asm
2453 {
2454 # ifdef RT_ARCH_AMD64
2455 mov rax, dr6
2456 mov [uDR6], rax
2457 # else
2458 mov eax, dr6
2459 mov [uDR6], eax
2460 # endif
2461 }
2462 # endif
2463 return uDR6;
2464 }
2465 #endif
2466
2467
2468 /**
2469 * Reads and clears DR6.
2470 *
2471 * @returns DR6.
2472 */
2473 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2474 DECLASM(RTCCUINTXREG) ASMGetAndClearDR6(void);
2475 #else
2476 DECLINLINE(RTCCUINTXREG) ASMGetAndClearDR6(void)
2477 {
2478 RTCCUINTXREG uDR6;
2479 # if RT_INLINE_ASM_USES_INTRIN
2480 uDR6 = __readdr(6);
2481 __writedr(6, 0xffff0ff0U); /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2482 # elif RT_INLINE_ASM_GNU_STYLE
2483 RTCCUINTXREG uNewValue = 0xffff0ff0U;/* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2484 # ifdef RT_ARCH_AMD64
2485 __asm__ __volatile__("movq %%dr6, %0\n\t"
2486 "movq %1, %%dr6\n\t"
2487 : "=r" (uDR6)
2488 : "r" (uNewValue));
2489 # else
2490 __asm__ __volatile__("movl %%dr6, %0\n\t"
2491 "movl %1, %%dr6\n\t"
2492 : "=r" (uDR6)
2493 : "r" (uNewValue));
2494 # endif
2495 # else
2496 __asm
2497 {
2498 # ifdef RT_ARCH_AMD64
2499 mov rax, dr6
2500 mov [uDR6], rax
2501 mov rcx, rax
2502 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
2503 mov dr6, rcx
2504 # else
2505 mov eax, dr6
2506 mov [uDR6], eax
2507 mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 is zero. */
2508 mov dr6, ecx
2509 # endif
2510 }
2511 # endif
2512 return uDR6;
2513 }
2514 #endif
2515
2516
2517 /**
2518 * Gets dr7.
2519 *
2520 * @returns dr7.
2521 */
2522 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2523 DECLASM(RTCCUINTXREG) ASMGetDR7(void);
2524 #else
2525 DECLINLINE(RTCCUINTXREG) ASMGetDR7(void)
2526 {
2527 RTCCUINTXREG uDR7;
2528 # if RT_INLINE_ASM_USES_INTRIN
2529 uDR7 = __readdr(7);
2530 # elif RT_INLINE_ASM_GNU_STYLE
2531 # ifdef RT_ARCH_AMD64
2532 __asm__ __volatile__("movq %%dr7, %0\n\t" : "=r" (uDR7));
2533 # else
2534 __asm__ __volatile__("movl %%dr7, %0\n\t" : "=r" (uDR7));
2535 # endif
2536 # else
2537 __asm
2538 {
2539 # ifdef RT_ARCH_AMD64
2540 mov rax, dr7
2541 mov [uDR7], rax
2542 # else
2543 mov eax, dr7
2544 mov [uDR7], eax
2545 # endif
2546 }
2547 # endif
2548 return uDR7;
2549 }
2550 #endif
2551
2552
2553 /**
2554 * Sets dr0.
2555 *
2556 * @param uDRVal Debug register value to write
2557 */
2558 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2559 DECLASM(void) ASMSetDR0(RTCCUINTXREG uDRVal);
2560 #else
2561 DECLINLINE(void) ASMSetDR0(RTCCUINTXREG uDRVal)
2562 {
2563 # if RT_INLINE_ASM_USES_INTRIN
2564 __writedr(0, uDRVal);
2565 # elif RT_INLINE_ASM_GNU_STYLE
2566 # ifdef RT_ARCH_AMD64
2567 __asm__ __volatile__("movq %0, %%dr0\n\t" : : "r" (uDRVal));
2568 # else
2569 __asm__ __volatile__("movl %0, %%dr0\n\t" : : "r" (uDRVal));
2570 # endif
2571 # else
2572 __asm
2573 {
2574 # ifdef RT_ARCH_AMD64
2575 mov rax, [uDRVal]
2576 mov dr0, rax
2577 # else
2578 mov eax, [uDRVal]
2579 mov dr0, eax
2580 # endif
2581 }
2582 # endif
2583 }
2584 #endif
2585
2586
2587 /**
2588 * Sets dr1.
2589 *
2590 * @param uDRVal Debug register value to write
2591 */
2592 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2593 DECLASM(void) ASMSetDR1(RTCCUINTXREG uDRVal);
2594 #else
2595 DECLINLINE(void) ASMSetDR1(RTCCUINTXREG uDRVal)
2596 {
2597 # if RT_INLINE_ASM_USES_INTRIN
2598 __writedr(1, uDRVal);
2599 # elif RT_INLINE_ASM_GNU_STYLE
2600 # ifdef RT_ARCH_AMD64
2601 __asm__ __volatile__("movq %0, %%dr1\n\t" : : "r" (uDRVal));
2602 # else
2603 __asm__ __volatile__("movl %0, %%dr1\n\t" : : "r" (uDRVal));
2604 # endif
2605 # else
2606 __asm
2607 {
2608 # ifdef RT_ARCH_AMD64
2609 mov rax, [uDRVal]
2610 mov dr1, rax
2611 # else
2612 mov eax, [uDRVal]
2613 mov dr1, eax
2614 # endif
2615 }
2616 # endif
2617 }
2618 #endif
2619
2620
2621 /**
2622 * Sets dr2.
2623 *
2624 * @param uDRVal Debug register value to write
2625 */
2626 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2627 DECLASM(void) ASMSetDR2(RTCCUINTXREG uDRVal);
2628 #else
2629 DECLINLINE(void) ASMSetDR2(RTCCUINTXREG uDRVal)
2630 {
2631 # if RT_INLINE_ASM_USES_INTRIN
2632 __writedr(2, uDRVal);
2633 # elif RT_INLINE_ASM_GNU_STYLE
2634 # ifdef RT_ARCH_AMD64
2635 __asm__ __volatile__("movq %0, %%dr2\n\t" : : "r" (uDRVal));
2636 # else
2637 __asm__ __volatile__("movl %0, %%dr2\n\t" : : "r" (uDRVal));
2638 # endif
2639 # else
2640 __asm
2641 {
2642 # ifdef RT_ARCH_AMD64
2643 mov rax, [uDRVal]
2644 mov dr2, rax
2645 # else
2646 mov eax, [uDRVal]
2647 mov dr2, eax
2648 # endif
2649 }
2650 # endif
2651 }
2652 #endif
2653
2654
2655 /**
2656 * Sets dr3.
2657 *
2658 * @param uDRVal Debug register value to write
2659 */
2660 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2661 DECLASM(void) ASMSetDR3(RTCCUINTXREG uDRVal);
2662 #else
2663 DECLINLINE(void) ASMSetDR3(RTCCUINTXREG uDRVal)
2664 {
2665 # if RT_INLINE_ASM_USES_INTRIN
2666 __writedr(3, uDRVal);
2667 # elif RT_INLINE_ASM_GNU_STYLE
2668 # ifdef RT_ARCH_AMD64
2669 __asm__ __volatile__("movq %0, %%dr3\n\t" : : "r" (uDRVal));
2670 # else
2671 __asm__ __volatile__("movl %0, %%dr3\n\t" : : "r" (uDRVal));
2672 # endif
2673 # else
2674 __asm
2675 {
2676 # ifdef RT_ARCH_AMD64
2677 mov rax, [uDRVal]
2678 mov dr3, rax
2679 # else
2680 mov eax, [uDRVal]
2681 mov dr3, eax
2682 # endif
2683 }
2684 # endif
2685 }
2686 #endif
2687
2688
2689 /**
2690 * Sets dr6.
2691 *
2692 * @param uDRVal Debug register value to write
2693 */
2694 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2695 DECLASM(void) ASMSetDR6(RTCCUINTXREG uDRVal);
2696 #else
2697 DECLINLINE(void) ASMSetDR6(RTCCUINTXREG uDRVal)
2698 {
2699 # if RT_INLINE_ASM_USES_INTRIN
2700 __writedr(6, uDRVal);
2701 # elif RT_INLINE_ASM_GNU_STYLE
2702 # ifdef RT_ARCH_AMD64
2703 __asm__ __volatile__("movq %0, %%dr6\n\t" : : "r" (uDRVal));
2704 # else
2705 __asm__ __volatile__("movl %0, %%dr6\n\t" : : "r" (uDRVal));
2706 # endif
2707 # else
2708 __asm
2709 {
2710 # ifdef RT_ARCH_AMD64
2711 mov rax, [uDRVal]
2712 mov dr6, rax
2713 # else
2714 mov eax, [uDRVal]
2715 mov dr6, eax
2716 # endif
2717 }
2718 # endif
2719 }
2720 #endif
2721
2722
2723 /**
2724 * Sets dr7.
2725 *
2726 * @param uDRVal Debug register value to write
2727 */
2728 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2729 DECLASM(void) ASMSetDR7(RTCCUINTXREG uDRVal);
2730 #else
2731 DECLINLINE(void) ASMSetDR7(RTCCUINTXREG uDRVal)
2732 {
2733 # if RT_INLINE_ASM_USES_INTRIN
2734 __writedr(7, uDRVal);
2735 # elif RT_INLINE_ASM_GNU_STYLE
2736 # ifdef RT_ARCH_AMD64
2737 __asm__ __volatile__("movq %0, %%dr7\n\t" : : "r" (uDRVal));
2738 # else
2739 __asm__ __volatile__("movl %0, %%dr7\n\t" : : "r" (uDRVal));
2740 # endif
2741 # else
2742 __asm
2743 {
2744 # ifdef RT_ARCH_AMD64
2745 mov rax, [uDRVal]
2746 mov dr7, rax
2747 # else
2748 mov eax, [uDRVal]
2749 mov dr7, eax
2750 # endif
2751 }
2752 # endif
2753 }
2754 #endif
2755
2756
2757 /**
2758 * Writes a 8-bit unsigned integer to an I/O port, ordered.
2759 *
2760 * @param Port I/O port to write to.
2761 * @param u8 8-bit integer to write.
2762 */
2763 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2764 DECLASM(void) ASMOutU8(RTIOPORT Port, uint8_t u8);
2765 #else
2766 DECLINLINE(void) ASMOutU8(RTIOPORT Port, uint8_t u8)
2767 {
2768 # if RT_INLINE_ASM_GNU_STYLE
2769 __asm__ __volatile__("outb %b1, %w0\n\t"
2770 :: "Nd" (Port),
2771 "a" (u8));
2772
2773 # elif RT_INLINE_ASM_USES_INTRIN
2774 __outbyte(Port, u8);
2775
2776 # else
2777 __asm
2778 {
2779 mov dx, [Port]
2780 mov al, [u8]
2781 out dx, al
2782 }
2783 # endif
2784 }
2785 #endif
2786
2787
2788 /**
2789 * Reads a 8-bit unsigned integer from an I/O port, ordered.
2790 *
2791 * @returns 8-bit integer.
2792 * @param Port I/O port to read from.
2793 */
2794 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2795 DECLASM(uint8_t) ASMInU8(RTIOPORT Port);
2796 #else
2797 DECLINLINE(uint8_t) ASMInU8(RTIOPORT Port)
2798 {
2799 uint8_t u8;
2800 # if RT_INLINE_ASM_GNU_STYLE
2801 __asm__ __volatile__("inb %w1, %b0\n\t"
2802 : "=a" (u8)
2803 : "Nd" (Port));
2804
2805 # elif RT_INLINE_ASM_USES_INTRIN
2806 u8 = __inbyte(Port);
2807
2808 # else
2809 __asm
2810 {
2811 mov dx, [Port]
2812 in al, dx
2813 mov [u8], al
2814 }
2815 # endif
2816 return u8;
2817 }
2818 #endif
2819
2820
2821 /**
2822 * Writes a 16-bit unsigned integer to an I/O port, ordered.
2823 *
2824 * @param Port I/O port to write to.
2825 * @param u16 16-bit integer to write.
2826 */
2827 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2828 DECLASM(void) ASMOutU16(RTIOPORT Port, uint16_t u16);
2829 #else
2830 DECLINLINE(void) ASMOutU16(RTIOPORT Port, uint16_t u16)
2831 {
2832 # if RT_INLINE_ASM_GNU_STYLE
2833 __asm__ __volatile__("outw %w1, %w0\n\t"
2834 :: "Nd" (Port),
2835 "a" (u16));
2836
2837 # elif RT_INLINE_ASM_USES_INTRIN
2838 __outword(Port, u16);
2839
2840 # else
2841 __asm
2842 {
2843 mov dx, [Port]
2844 mov ax, [u16]
2845 out dx, ax
2846 }
2847 # endif
2848 }
2849 #endif
2850
2851
2852 /**
2853 * Reads a 16-bit unsigned integer from an I/O port, ordered.
2854 *
2855 * @returns 16-bit integer.
2856 * @param Port I/O port to read from.
2857 */
2858 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2859 DECLASM(uint16_t) ASMInU16(RTIOPORT Port);
2860 #else
2861 DECLINLINE(uint16_t) ASMInU16(RTIOPORT Port)
2862 {
2863 uint16_t u16;
2864 # if RT_INLINE_ASM_GNU_STYLE
2865 __asm__ __volatile__("inw %w1, %w0\n\t"
2866 : "=a" (u16)
2867 : "Nd" (Port));
2868
2869 # elif RT_INLINE_ASM_USES_INTRIN
2870 u16 = __inword(Port);
2871
2872 # else
2873 __asm
2874 {
2875 mov dx, [Port]
2876 in ax, dx
2877 mov [u16], ax
2878 }
2879 # endif
2880 return u16;
2881 }
2882 #endif
2883
2884
2885 /**
2886 * Writes a 32-bit unsigned integer to an I/O port, ordered.
2887 *
2888 * @param Port I/O port to write to.
2889 * @param u32 32-bit integer to write.
2890 */
2891 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2892 DECLASM(void) ASMOutU32(RTIOPORT Port, uint32_t u32);
2893 #else
2894 DECLINLINE(void) ASMOutU32(RTIOPORT Port, uint32_t u32)
2895 {
2896 # if RT_INLINE_ASM_GNU_STYLE
2897 __asm__ __volatile__("outl %1, %w0\n\t"
2898 :: "Nd" (Port),
2899 "a" (u32));
2900
2901 # elif RT_INLINE_ASM_USES_INTRIN
2902 __outdword(Port, u32);
2903
2904 # else
2905 __asm
2906 {
2907 mov dx, [Port]
2908 mov eax, [u32]
2909 out dx, eax
2910 }
2911 # endif
2912 }
2913 #endif
2914
2915
2916 /**
2917 * Reads a 32-bit unsigned integer from an I/O port, ordered.
2918 *
2919 * @returns 32-bit integer.
2920 * @param Port I/O port to read from.
2921 */
2922 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2923 DECLASM(uint32_t) ASMInU32(RTIOPORT Port);
2924 #else
2925 DECLINLINE(uint32_t) ASMInU32(RTIOPORT Port)
2926 {
2927 uint32_t u32;
2928 # if RT_INLINE_ASM_GNU_STYLE
2929 __asm__ __volatile__("inl %w1, %0\n\t"
2930 : "=a" (u32)
2931 : "Nd" (Port));
2932
2933 # elif RT_INLINE_ASM_USES_INTRIN
2934 u32 = __indword(Port);
2935
2936 # else
2937 __asm
2938 {
2939 mov dx, [Port]
2940 in eax, dx
2941 mov [u32], eax
2942 }
2943 # endif
2944 return u32;
2945 }
2946 #endif
2947
2948
2949 /**
2950 * Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
2951 *
2952 * @param Port I/O port to write to.
2953 * @param pau8 Pointer to the string buffer.
2954 * @param c The number of items to write.
2955 */
2956 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2957 DECLASM(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c);
2958 #else
2959 DECLINLINE(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c)
2960 {
2961 # if RT_INLINE_ASM_GNU_STYLE
2962 __asm__ __volatile__("rep; outsb\n\t"
2963 : "+S" (pau8),
2964 "+c" (c)
2965 : "d" (Port));
2966
2967 # elif RT_INLINE_ASM_USES_INTRIN
2968 __outbytestring(Port, (unsigned char *)pau8, (unsigned long)c);
2969
2970 # else
2971 __asm
2972 {
2973 mov dx, [Port]
2974 mov ecx, [c]
2975 mov eax, [pau8]
2976 xchg esi, eax
2977 rep outsb
2978 xchg esi, eax
2979 }
2980 # endif
2981 }
2982 #endif
2983
2984
2985 /**
2986 * Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
2987 *
2988 * @param Port I/O port to read from.
2989 * @param pau8 Pointer to the string buffer (output).
2990 * @param c The number of items to read.
2991 */
2992 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
2993 DECLASM(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c);
2994 #else
2995 DECLINLINE(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c)
2996 {
2997 # if RT_INLINE_ASM_GNU_STYLE
2998 __asm__ __volatile__("rep; insb\n\t"
2999 : "+D" (pau8),
3000 "+c" (c)
3001 : "d" (Port));
3002
3003 # elif RT_INLINE_ASM_USES_INTRIN
3004 __inbytestring(Port, pau8, (unsigned long)c);
3005
3006 # else
3007 __asm
3008 {
3009 mov dx, [Port]
3010 mov ecx, [c]
3011 mov eax, [pau8]
3012 xchg edi, eax
3013 rep insb
3014 xchg edi, eax
3015 }
3016 # endif
3017 }
3018 #endif
3019
3020
3021 /**
3022 * Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
3023 *
3024 * @param Port I/O port to write to.
3025 * @param pau16 Pointer to the string buffer.
3026 * @param c The number of items to write.
3027 */
3028 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3029 DECLASM(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c);
3030 #else
3031 DECLINLINE(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c)
3032 {
3033 # if RT_INLINE_ASM_GNU_STYLE
3034 __asm__ __volatile__("rep; outsw\n\t"
3035 : "+S" (pau16),
3036 "+c" (c)
3037 : "d" (Port));
3038
3039 # elif RT_INLINE_ASM_USES_INTRIN
3040 __outwordstring(Port, (unsigned short *)pau16, (unsigned long)c);
3041
3042 # else
3043 __asm
3044 {
3045 mov dx, [Port]
3046 mov ecx, [c]
3047 mov eax, [pau16]
3048 xchg esi, eax
3049 rep outsw
3050 xchg esi, eax
3051 }
3052 # endif
3053 }
3054 #endif
3055
3056
3057 /**
3058 * Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
3059 *
3060 * @param Port I/O port to read from.
3061 * @param pau16 Pointer to the string buffer (output).
3062 * @param c The number of items to read.
3063 */
3064 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3065 DECLASM(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c);
3066 #else
3067 DECLINLINE(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c)
3068 {
3069 # if RT_INLINE_ASM_GNU_STYLE
3070 __asm__ __volatile__("rep; insw\n\t"
3071 : "+D" (pau16),
3072 "+c" (c)
3073 : "d" (Port));
3074
3075 # elif RT_INLINE_ASM_USES_INTRIN
3076 __inwordstring(Port, pau16, (unsigned long)c);
3077
3078 # else
3079 __asm
3080 {
3081 mov dx, [Port]
3082 mov ecx, [c]
3083 mov eax, [pau16]
3084 xchg edi, eax
3085 rep insw
3086 xchg edi, eax
3087 }
3088 # endif
3089 }
3090 #endif
3091
3092
3093 /**
3094 * Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
3095 *
3096 * @param Port I/O port to write to.
3097 * @param pau32 Pointer to the string buffer.
3098 * @param c The number of items to write.
3099 */
3100 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3101 DECLASM(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c);
3102 #else
3103 DECLINLINE(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c)
3104 {
3105 # if RT_INLINE_ASM_GNU_STYLE
3106 __asm__ __volatile__("rep; outsl\n\t"
3107 : "+S" (pau32),
3108 "+c" (c)
3109 : "d" (Port));
3110
3111 # elif RT_INLINE_ASM_USES_INTRIN
3112 __outdwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
3113
3114 # else
3115 __asm
3116 {
3117 mov dx, [Port]
3118 mov ecx, [c]
3119 mov eax, [pau32]
3120 xchg esi, eax
3121 rep outsd
3122 xchg esi, eax
3123 }
3124 # endif
3125 }
3126 #endif
3127
3128
3129 /**
3130 * Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
3131 *
3132 * @param Port I/O port to read from.
3133 * @param pau32 Pointer to the string buffer (output).
3134 * @param c The number of items to read.
3135 */
3136 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3137 DECLASM(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c);
3138 #else
3139 DECLINLINE(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c)
3140 {
3141 # if RT_INLINE_ASM_GNU_STYLE
3142 __asm__ __volatile__("rep; insl\n\t"
3143 : "+D" (pau32),
3144 "+c" (c)
3145 : "d" (Port));
3146
3147 # elif RT_INLINE_ASM_USES_INTRIN
3148 __indwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
3149
3150 # else
3151 __asm
3152 {
3153 mov dx, [Port]
3154 mov ecx, [c]
3155 mov eax, [pau32]
3156 xchg edi, eax
3157 rep insd
3158 xchg edi, eax
3159 }
3160 # endif
3161 }
3162 #endif
3163
3164
3165 /**
3166 * Invalidate page.
3167 *
3168 * @param uPtr Address of the page to invalidate.
3169 */
3170 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3171 DECLASM(void) ASMInvalidatePage(RTCCUINTXREG uPtr);
3172 #else
3173 DECLINLINE(void) ASMInvalidatePage(RTCCUINTXREG uPtr)
3174 {
3175 # if RT_INLINE_ASM_USES_INTRIN
3176 __invlpg((void *)uPtr);
3177
3178 # elif RT_INLINE_ASM_GNU_STYLE
3179 __asm__ __volatile__("invlpg %0\n\t"
3180 : : "m" (*(uint8_t *)(uintptr_t)uPtr));
3181 # else
3182 __asm
3183 {
3184 # ifdef RT_ARCH_AMD64
3185 mov rax, [uPtr]
3186 invlpg [rax]
3187 # else
3188 mov eax, [uPtr]
3189 invlpg [eax]
3190 # endif
3191 }
3192 # endif
3193 }
3194 #endif
3195
3196
3197 /**
3198 * Write back the internal caches and invalidate them.
3199 */
3200 #if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
3201 DECLASM(void) ASMWriteBackAndInvalidateCaches(void);
3202 #else
3203 DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
3204 {
3205 # if RT_INLINE_ASM_USES_INTRIN
3206 __wbinvd();
3207
3208 # elif RT_INLINE_ASM_GNU_STYLE
3209 __asm__ __volatile__("wbinvd");
3210 # else
3211 __asm
3212 {
3213 wbinvd
3214 }
3215 # endif
3216 }
3217 #endif
3218
3219
3220 /**
3221 * Invalidate internal and (perhaps) external caches without first
3222 * flushing dirty cache lines. Use with extreme care.
3223 */
3224 #if RT_INLINE_ASM_EXTERNAL
3225 DECLASM(void) ASMInvalidateInternalCaches(void);
3226 #else
3227 DECLINLINE(void) ASMInvalidateInternalCaches(void)
3228 {
3229 # if RT_INLINE_ASM_GNU_STYLE
3230 __asm__ __volatile__("invd");
3231 # else
3232 __asm
3233 {
3234 invd
3235 }
3236 # endif
3237 }
3238 #endif
3239
3240
3241 /**
3242 * Memory load/store fence, waits for any pending writes and reads to complete.
3243 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3244 */
3245 DECLINLINE(void) ASMMemoryFenceSSE2(void)
3246 {
3247 #if RT_INLINE_ASM_GNU_STYLE
3248 __asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
3249 #elif RT_INLINE_ASM_USES_INTRIN
3250 _mm_mfence();
3251 #else
3252 __asm
3253 {
3254 _emit 0x0f
3255 _emit 0xae
3256 _emit 0xf0
3257 }
3258 #endif
3259 }
3260
3261
3262 /**
3263 * Memory store fence, waits for any writes to complete.
3264 * Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
3265 */
3266 DECLINLINE(void) ASMWriteFenceSSE(void)
3267 {
3268 #if RT_INLINE_ASM_GNU_STYLE
3269 __asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
3270 #elif RT_INLINE_ASM_USES_INTRIN
3271 _mm_sfence();
3272 #else
3273 __asm
3274 {
3275 _emit 0x0f
3276 _emit 0xae
3277 _emit 0xf8
3278 }
3279 #endif
3280 }
3281
3282
3283 /**
3284 * Memory load fence, waits for any pending reads to complete.
3285 * Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
3286 */
3287 DECLINLINE(void) ASMReadFenceSSE2(void)
3288 {
3289 #if RT_INLINE_ASM_GNU_STYLE
3290 __asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
3291 #elif RT_INLINE_ASM_USES_INTRIN
3292 _mm_lfence();
3293 #else
3294 __asm
3295 {
3296 _emit 0x0f
3297 _emit 0xae
3298 _emit 0xe8
3299 }
3300 #endif
3301 }
3302
3303 #if !defined(_MSC_VER) || !defined(RT_ARCH_AMD64)
3304
3305 /*
3306 * Clear the AC bit in the EFLAGS register.
3307 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3308 * Requires to be executed in R0.
3309 */
3310 DECLINLINE(void) ASMClearAC(void)
3311 {
3312 #if RT_INLINE_ASM_GNU_STYLE
3313 __asm__ __volatile__ (".byte 0x0f,0x01,0xca\n\t");
3314 #else
3315 __asm
3316 {
3317 _emit 0x0f
3318 _emit 0x01
3319 _emit 0xca
3320 }
3321 #endif
3322 }
3323
3324
3325 /*
3326 * Set the AC bit in the EFLAGS register.
3327 * Requires the X86_CPUID_STEXT_FEATURE_EBX_SMAP CPUID bit set.
3328 * Requires to be executed in R0.
3329 */
3330 DECLINLINE(void) ASMSetAC(void)
3331 {
3332 #if RT_INLINE_ASM_GNU_STYLE
3333 __asm__ __volatile__ (".byte 0x0f,0x01,0xcb\n\t");
3334 #else
3335 __asm
3336 {
3337 _emit 0x0f
3338 _emit 0x01
3339 _emit 0xcb
3340 }
3341 #endif
3342 }
3343
3344 #endif /* !_MSC_VER) || !RT_ARCH_AMD64 */
3345
3346 /** @} */
3347 #endif
3348