2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * 'Traps.c' handles hardware traps and faults after we have saved some
13 #include <linux/interrupt.h>
14 #include <linux/kallsyms.h>
15 #include <linux/spinlock.h>
16 #include <linux/highmem.h>
17 #include <linux/kprobes.h>
18 #include <linux/uaccess.h>
19 #include <linux/utsname.h>
20 #include <linux/kdebug.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/ptrace.h>
24 #include <linux/string.h>
25 #include <linux/unwind.h>
26 #include <linux/delay.h>
27 #include <linux/errno.h>
28 #include <linux/kexec.h>
29 #include <linux/sched.h>
30 #include <linux/timer.h>
31 #include <linux/init.h>
32 #include <linux/bug.h>
33 #include <linux/nmi.h>
37 #include <linux/ioport.h>
38 #include <linux/eisa.h>
42 #include <linux/mca.h>
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
49 #include <asm/processor-flags.h>
50 #include <asm/arch_hooks.h>
51 #include <asm/stacktrace.h>
52 #include <asm/processor.h>
53 #include <asm/debugreg.h>
54 #include <asm/atomic.h>
55 #include <asm/system.h>
56 #include <asm/unwind.h>
62 #include <asm/traps.h>
64 #include "mach_traps.h"
65 #include "cpu/mcheck/mce.h"
67 DECLARE_BITMAP(used_vectors
, NR_VECTORS
);
68 EXPORT_SYMBOL_GPL(used_vectors
);
70 asmlinkage
int system_call(void);
72 /* Do we ignore FPU interrupts ? */
76 * The IDT has to be page-aligned to simplify the Pentium
77 * F0 0F bug workaround.. We have a special link segment
80 gate_desc idt_table
[256]
81 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
83 static int ignore_nmis
;
85 static inline void conditional_sti(struct pt_regs
*regs
)
87 if (regs
->flags
& X86_EFLAGS_IF
)
92 die_if_kernel(const char *str
, struct pt_regs
*regs
, long err
)
94 if (!user_mode_vm(regs
))
99 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
100 * invalid offset set (the LAZY one) and the faulting thread has
101 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
102 * we set the offset field correctly and return 1.
104 static int lazy_iobitmap_copy(void)
106 struct thread_struct
*thread
;
107 struct tss_struct
*tss
;
111 tss
= &per_cpu(init_tss
, cpu
);
112 thread
= ¤t
->thread
;
114 if (tss
->x86_tss
.io_bitmap_base
== INVALID_IO_BITMAP_OFFSET_LAZY
&&
115 thread
->io_bitmap_ptr
) {
116 memcpy(tss
->io_bitmap
, thread
->io_bitmap_ptr
,
117 thread
->io_bitmap_max
);
119 * If the previously set map was extending to higher ports
120 * than the current one, pad extra space with 0xff (no access).
122 if (thread
->io_bitmap_max
< tss
->io_bitmap_max
) {
123 memset((char *) tss
->io_bitmap
+
124 thread
->io_bitmap_max
, 0xff,
125 tss
->io_bitmap_max
- thread
->io_bitmap_max
);
127 tss
->io_bitmap_max
= thread
->io_bitmap_max
;
128 tss
->x86_tss
.io_bitmap_base
= IO_BITMAP_OFFSET
;
129 tss
->io_bitmap_owner
= thread
;
139 static void __kprobes
140 do_trap(int trapnr
, int signr
, char *str
, struct pt_regs
*regs
,
141 long error_code
, siginfo_t
*info
)
143 struct task_struct
*tsk
= current
;
145 if (regs
->flags
& X86_VM_MASK
) {
147 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
148 * On nmi (interrupt 2), do_trap should not be called.
155 if (!user_mode(regs
))
160 * We want error_code and trap_no set for userspace faults and
161 * kernelspace faults which result in die(), but not
162 * kernelspace faults which are fixed up. die() gives the
163 * process no chance to handle the signal and notice the
164 * kernel fault information, so that won't result in polluting
165 * the information about previously queued, but not yet
166 * delivered, faults. See also do_general_protection below.
168 tsk
->thread
.error_code
= error_code
;
169 tsk
->thread
.trap_no
= trapnr
;
172 force_sig_info(signr
, info
, tsk
);
174 force_sig(signr
, tsk
);
178 if (!fixup_exception(regs
)) {
179 tsk
->thread
.error_code
= error_code
;
180 tsk
->thread
.trap_no
= trapnr
;
181 die(str
, regs
, error_code
);
186 if (handle_vm86_trap((struct kernel_vm86_regs
*) regs
,
192 #define DO_ERROR(trapnr, signr, str, name) \
193 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
195 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
198 conditional_sti(regs); \
199 do_trap(trapnr, signr, str, regs, error_code, NULL); \
202 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
203 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
206 info.si_signo = signr; \
208 info.si_code = sicode; \
209 info.si_addr = (void __user *)siaddr; \
210 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
213 conditional_sti(regs); \
214 do_trap(trapnr, signr, str, regs, error_code, &info); \
217 DO_ERROR_INFO(0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->ip
)
218 DO_ERROR(4, SIGSEGV
, "overflow", overflow
)
219 DO_ERROR(5, SIGSEGV
, "bounds", bounds
)
220 DO_ERROR_INFO(6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->ip
)
221 DO_ERROR(9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
222 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
223 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
224 DO_ERROR(12, SIGBUS
, "stack segment", stack_segment
)
225 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
227 dotraplinkage
void __kprobes
228 do_general_protection(struct pt_regs
*regs
, long error_code
)
230 struct task_struct
*tsk
;
232 conditional_sti(regs
);
234 if (lazy_iobitmap_copy()) {
235 /* restart the faulting instruction */
239 if (regs
->flags
& X86_VM_MASK
)
243 if (!user_mode(regs
))
246 tsk
->thread
.error_code
= error_code
;
247 tsk
->thread
.trap_no
= 13;
249 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
250 printk_ratelimit()) {
252 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
253 tsk
->comm
, task_pid_nr(tsk
),
254 regs
->ip
, regs
->sp
, error_code
);
255 print_vma_addr(" in ", regs
->ip
);
259 force_sig(SIGSEGV
, tsk
);
264 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
268 if (fixup_exception(regs
))
271 tsk
->thread
.error_code
= error_code
;
272 tsk
->thread
.trap_no
= 13;
273 if (notify_die(DIE_GPF
, "general protection fault", regs
,
274 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
276 die("general protection fault", regs
, error_code
);
279 static notrace __kprobes
void
280 mem_parity_error(unsigned char reason
, struct pt_regs
*regs
)
283 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
284 reason
, smp_processor_id());
287 "You have some hardware problem, likely on the PCI bus.\n");
289 #if defined(CONFIG_EDAC)
290 if (edac_handler_set()) {
291 edac_atomic_assert_error();
296 if (panic_on_unrecovered_nmi
)
297 panic("NMI: Not continuing");
299 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
301 /* Clear and disable the memory parity error line. */
302 clear_mem_error(reason
);
305 static notrace __kprobes
void
306 io_check_error(unsigned char reason
, struct pt_regs
*regs
)
310 printk(KERN_EMERG
"NMI: IOCK error (debug interrupt?)\n");
311 show_registers(regs
);
313 /* Re-enable the IOCK line, wait for a few seconds */
314 reason
= (reason
& 0xf) | 8;
325 static notrace __kprobes
void
326 unknown_nmi_error(unsigned char reason
, struct pt_regs
*regs
)
328 if (notify_die(DIE_NMIUNKNOWN
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
332 * Might actually be able to figure out what the guilty party
341 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
342 reason
, smp_processor_id());
344 printk(KERN_EMERG
"Do you have a strange power saving mode enabled?\n");
345 if (panic_on_unrecovered_nmi
)
346 panic("NMI: Not continuing");
348 printk(KERN_EMERG
"Dazed and confused, but trying to continue\n");
351 static DEFINE_SPINLOCK(nmi_print_lock
);
353 void notrace __kprobes
die_nmi(char *str
, struct pt_regs
*regs
, int do_panic
)
355 if (notify_die(DIE_NMIWATCHDOG
, str
, regs
, 0, 2, SIGINT
) == NOTIFY_STOP
)
358 spin_lock(&nmi_print_lock
);
360 * We are in trouble anyway, lets at least try
361 * to get a message out:
364 printk(KERN_EMERG
"%s", str
);
365 printk(" on CPU%d, ip %08lx, registers:\n",
366 smp_processor_id(), regs
->ip
);
367 show_registers(regs
);
369 panic("Non maskable interrupt");
371 spin_unlock(&nmi_print_lock
);
375 * If we are in kernel we are probably nested up pretty bad
376 * and might aswell get out now while we still can:
378 if (!user_mode_vm(regs
)) {
379 current
->thread
.trap_no
= 2;
386 static notrace __kprobes
void default_do_nmi(struct pt_regs
*regs
)
388 unsigned char reason
= 0;
391 cpu
= smp_processor_id();
393 /* Only the BSP gets external NMIs from the system. */
395 reason
= get_nmi_reason();
397 if (!(reason
& 0xc0)) {
398 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
401 #ifdef CONFIG_X86_LOCAL_APIC
403 * Ok, so this is none of the documented NMI sources,
404 * so it must be the NMI watchdog.
406 if (nmi_watchdog_tick(regs
, reason
))
408 if (!do_nmi_callback(regs
, cpu
))
409 unknown_nmi_error(reason
, regs
);
411 unknown_nmi_error(reason
, regs
);
416 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
419 /* AK: following checks seem to be broken on modern chipsets. FIXME */
421 mem_parity_error(reason
, regs
);
423 io_check_error(reason
, regs
);
425 * Reassert NMI in case it became active meanwhile
426 * as it's edge-triggered:
431 dotraplinkage notrace __kprobes
void
432 do_nmi(struct pt_regs
*regs
, long error_code
)
438 cpu
= smp_processor_id();
443 default_do_nmi(regs
);
454 void restart_nmi(void)
460 dotraplinkage
void __kprobes
do_int3(struct pt_regs
*regs
, long error_code
)
462 #ifdef CONFIG_KPROBES
463 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
)
466 conditional_sti(regs
);
468 if (notify_die(DIE_TRAP
, "int3", regs
, error_code
, 3, SIGTRAP
)
473 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
477 * Our handling of the processor debug registers is non-trivial.
478 * We do not clear them on entry and exit from the kernel. Therefore
479 * it is possible to get a watchpoint trap here from inside the kernel.
480 * However, the code in ./ptrace.c has ensured that the user can
481 * only set watchpoints on userspace addresses. Therefore the in-kernel
482 * watchpoint trap can only occur in code which is reading/writing
483 * from user space. Such code must not hold kernel locks (since it
484 * can equally take a page fault), therefore it is safe to call
485 * force_sig_info even though that claims and releases locks.
487 * Code in ./signal.c ensures that the debug control register
488 * is restored before we deliver any signal, and therefore that
489 * user code runs with the correct debug control register even though
492 * Being careful here means that we don't have to be as careful in a
493 * lot of more complicated places (task switching can be a bit lazy
494 * about restoring all the debug state, and ptrace doesn't have to
495 * find every occurrence of the TF bit that could be saved away even
498 dotraplinkage
void __kprobes
do_debug(struct pt_regs
*regs
, long error_code
)
500 struct task_struct
*tsk
= current
;
501 unsigned int condition
;
504 get_debugreg(condition
, 6);
507 * The processor cleared BTF, so don't mark that we need it set.
509 clear_tsk_thread_flag(tsk
, TIF_DEBUGCTLMSR
);
510 tsk
->thread
.debugctlmsr
= 0;
512 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
513 SIGTRAP
) == NOTIFY_STOP
)
515 /* It's safe to allow irq's after DR6 has been saved */
516 if (regs
->flags
& X86_EFLAGS_IF
)
519 /* Mask out spurious debug traps due to lazy DR7 setting */
520 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
521 if (!tsk
->thread
.debugreg7
)
525 if (regs
->flags
& X86_VM_MASK
)
528 /* Save debug status register where ptrace can see it */
529 tsk
->thread
.debugreg6
= condition
;
532 * Single-stepping through TF: make sure we ignore any events in
533 * kernel space (but re-enable TF when returning to user mode).
535 if (condition
& DR_STEP
) {
537 * We already checked v86 mode above, so we can
538 * check for kernel mode by just checking the CPL
541 if (!user_mode(regs
))
542 goto clear_TF_reenable
;
545 si_code
= get_si_code((unsigned long)condition
);
546 /* Ok, finally something we can handle */
547 send_sigtrap(tsk
, regs
, error_code
, si_code
);
550 * Disable additional traps. They'll be re-enabled when
551 * the signal is delivered.
558 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
, 1);
562 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
563 regs
->flags
&= ~X86_EFLAGS_TF
;
568 * Note that we play around with the 'TS' bit in an attempt to get
569 * the correct behaviour even in the presence of the asynchronous
572 void math_error(void __user
*ip
)
574 struct task_struct
*task
;
576 unsigned short cwd
, swd
;
579 * Save the info for the exception handler and clear the error.
583 task
->thread
.trap_no
= 16;
584 task
->thread
.error_code
= 0;
585 info
.si_signo
= SIGFPE
;
587 info
.si_code
= __SI_FAULT
;
590 * (~cwd & swd) will mask out exceptions that are not set to unmasked
591 * status. 0x3f is the exception bits in these regs, 0x200 is the
592 * C1 reg you need in case of a stack fault, 0x040 is the stack
593 * fault bit. We should only be taking one exception at a time,
594 * so if this combination doesn't produce any single exception,
595 * then we have a bad program that isn't synchronizing its FPU usage
596 * and it will suffer the consequences since we won't be able to
597 * fully reproduce the context of the exception
599 cwd
= get_fpu_cwd(task
);
600 swd
= get_fpu_swd(task
);
601 switch (swd
& ~cwd
& 0x3f) {
602 case 0x000: /* No unmasked exception */
604 default: /* Multiple exceptions */
606 case 0x001: /* Invalid Op */
608 * swd & 0x240 == 0x040: Stack Underflow
609 * swd & 0x240 == 0x240: Stack Overflow
610 * User must clear the SF bit (0x40) if set
612 info
.si_code
= FPE_FLTINV
;
614 case 0x002: /* Denormalize */
615 case 0x010: /* Underflow */
616 info
.si_code
= FPE_FLTUND
;
618 case 0x004: /* Zero Divide */
619 info
.si_code
= FPE_FLTDIV
;
621 case 0x008: /* Overflow */
622 info
.si_code
= FPE_FLTOVF
;
624 case 0x020: /* Precision */
625 info
.si_code
= FPE_FLTRES
;
628 force_sig_info(SIGFPE
, &info
, task
);
631 dotraplinkage
void do_coprocessor_error(struct pt_regs
*regs
, long error_code
)
633 conditional_sti(regs
);
635 math_error((void __user
*)regs
->ip
);
638 static void simd_math_error(void __user
*ip
)
640 struct task_struct
*task
;
642 unsigned short mxcsr
;
645 * Save the info for the exception handler and clear the error.
649 task
->thread
.trap_no
= 19;
650 task
->thread
.error_code
= 0;
651 info
.si_signo
= SIGFPE
;
653 info
.si_code
= __SI_FAULT
;
656 * The SIMD FPU exceptions are handled a little differently, as there
657 * is only a single status/control register. Thus, to determine which
658 * unmasked exception was caught we must mask the exception mask bits
659 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
661 mxcsr
= get_fpu_mxcsr(task
);
662 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
666 case 0x001: /* Invalid Op */
667 info
.si_code
= FPE_FLTINV
;
669 case 0x002: /* Denormalize */
670 case 0x010: /* Underflow */
671 info
.si_code
= FPE_FLTUND
;
673 case 0x004: /* Zero Divide */
674 info
.si_code
= FPE_FLTDIV
;
676 case 0x008: /* Overflow */
677 info
.si_code
= FPE_FLTOVF
;
679 case 0x020: /* Precision */
680 info
.si_code
= FPE_FLTRES
;
683 force_sig_info(SIGFPE
, &info
, task
);
687 do_simd_coprocessor_error(struct pt_regs
*regs
, long error_code
)
689 conditional_sti(regs
);
692 /* Handle SIMD FPU exceptions on PIII+ processors. */
694 simd_math_error((void __user
*)regs
->ip
);
698 * Handle strange cache flush from user space exception
699 * in all other cases. This is undocumented behaviour.
701 if (regs
->flags
& X86_VM_MASK
) {
702 handle_vm86_fault((struct kernel_vm86_regs
*)regs
, error_code
);
705 current
->thread
.trap_no
= 19;
706 current
->thread
.error_code
= error_code
;
707 die_if_kernel("cache flush denied", regs
, error_code
);
708 force_sig(SIGSEGV
, current
);
712 do_spurious_interrupt_bug(struct pt_regs
*regs
, long error_code
)
714 conditional_sti(regs
);
716 /* No need to warn about this any longer. */
717 printk(KERN_INFO
"Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
721 unsigned long patch_espfix_desc(unsigned long uesp
, unsigned long kesp
)
723 struct desc_struct
*gdt
= get_cpu_gdt_table(smp_processor_id());
724 unsigned long base
= (kesp
- uesp
) & -THREAD_SIZE
;
725 unsigned long new_kesp
= kesp
- base
;
726 unsigned long lim_pages
= (new_kesp
| (THREAD_SIZE
- 1)) >> PAGE_SHIFT
;
727 __u64 desc
= *(__u64
*)&gdt
[GDT_ENTRY_ESPFIX_SS
];
729 /* Set up base for espfix segment */
730 desc
&= 0x00f0ff0000000000ULL
;
731 desc
|= ((((__u64
)base
) << 16) & 0x000000ffffff0000ULL
) |
732 ((((__u64
)base
) << 32) & 0xff00000000000000ULL
) |
733 ((((__u64
)lim_pages
) << 32) & 0x000f000000000000ULL
) |
734 (lim_pages
& 0xffff);
735 *(__u64
*)&gdt
[GDT_ENTRY_ESPFIX_SS
] = desc
;
741 * 'math_state_restore()' saves the current math information in the
742 * old math state array, and gets the new ones from the current task
744 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
745 * Don't touch unless you *really* know how it works.
747 * Must be called with kernel preemption disabled (in this case,
748 * local interrupts are disabled at the call-site in entry.S).
750 asmlinkage
void math_state_restore(void)
752 struct thread_info
*thread
= current_thread_info();
753 struct task_struct
*tsk
= thread
->task
;
755 if (!tsk_used_math(tsk
)) {
758 * does a slab alloc which can sleep
764 do_group_exit(SIGKILL
);
770 clts(); /* Allow maths ops (or we recurse) */
772 thread
->status
|= TS_USEDFPU
; /* So we fnsave on switch_to() */
775 EXPORT_SYMBOL_GPL(math_state_restore
);
777 #ifndef CONFIG_MATH_EMULATION
779 asmlinkage
void math_emulate(long arg
)
782 "math-emulation not enabled and no coprocessor found.\n");
783 printk(KERN_EMERG
"killing %s.\n", current
->comm
);
784 force_sig(SIGFPE
, current
);
788 #endif /* CONFIG_MATH_EMULATION */
790 dotraplinkage
void __kprobes
791 do_device_not_available(struct pt_regs
*regs
, long error
)
793 if (read_cr0() & X86_CR0_EM
) {
794 conditional_sti(regs
);
797 math_state_restore(); /* interrupts still off */
798 conditional_sti(regs
);
802 #ifdef CONFIG_X86_MCE
803 dotraplinkage
void __kprobes
do_machine_check(struct pt_regs
*regs
, long error
)
805 conditional_sti(regs
);
806 machine_check_vector(regs
, error
);
810 dotraplinkage
void do_iret_error(struct pt_regs
*regs
, long error_code
)
815 info
.si_signo
= SIGILL
;
817 info
.si_code
= ILL_BADSTK
;
819 if (notify_die(DIE_TRAP
, "iret exception",
820 regs
, error_code
, 32, SIGILL
) == NOTIFY_STOP
)
822 do_trap(32, SIGILL
, "iret exception", regs
, error_code
, &info
);
825 void __init
trap_init(void)
830 void __iomem
*p
= early_ioremap(0x0FFFD9, 4);
832 if (readl(p
) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
837 set_intr_gate(0, ÷_error
);
838 set_intr_gate(1, &debug
);
839 set_intr_gate(2, &nmi
);
840 set_system_intr_gate(3, &int3
); /* int3 can be called from all */
841 set_system_intr_gate(4, &overflow
); /* int4 can be called from all */
842 set_intr_gate(5, &bounds
);
843 set_intr_gate(6, &invalid_op
);
844 set_intr_gate(7, &device_not_available
);
845 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS
);
846 set_intr_gate(9, &coprocessor_segment_overrun
);
847 set_intr_gate(10, &invalid_TSS
);
848 set_intr_gate(11, &segment_not_present
);
849 set_intr_gate(12, &stack_segment
);
850 set_intr_gate(13, &general_protection
);
851 set_intr_gate(14, &page_fault
);
852 set_intr_gate(15, &spurious_interrupt_bug
);
853 set_intr_gate(16, &coprocessor_error
);
854 set_intr_gate(17, &alignment_check
);
855 #ifdef CONFIG_X86_MCE
856 set_intr_gate(18, &machine_check
);
858 set_intr_gate(19, &simd_coprocessor_error
);
861 printk(KERN_INFO
"Enabling fast FPU save and restore... ");
862 set_in_cr4(X86_CR4_OSFXSR
);
867 "Enabling unmasked SIMD FPU exception support... ");
868 set_in_cr4(X86_CR4_OSXMMEXCPT
);
872 set_system_gate(SYSCALL_VECTOR
, &system_call
);
874 /* Reserve all the builtin and the syscall vector: */
875 for (i
= 0; i
< FIRST_EXTERNAL_VECTOR
; i
++)
876 set_bit(i
, used_vectors
);
878 set_bit(SYSCALL_VECTOR
, used_vectors
);
881 * Should be a barrier for any external CPU state: