]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/lib/delay_32.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / lib / delay_32.c
1 /*
2 * Precise Delay Loops for i386
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * The __delay function must _NOT_ be inlined as its execution time
8 * depends wildly on alignment on many x86 processors. The additional
9 * jump magic is needed to get the timing stable on all the CPU's
10 * we have to worry about.
11 */
12
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/preempt.h>
16 #include <linux/delay.h>
17
18 #include <asm/processor.h>
19 #include <asm/delay.h>
20 #include <asm/timer.h>
21
22 #ifdef CONFIG_SMP
23 # include <asm/smp.h>
24 #endif
25
26 /* simple loop based delay: */
27 static void delay_loop(unsigned long loops)
28 {
29 int d0;
30
31 __asm__ __volatile__(
32 "\tjmp 1f\n"
33 ".align 16\n"
34 "1:\tjmp 2f\n"
35 ".align 16\n"
36 "2:\tdecl %0\n\tjns 2b"
37 :"=&a" (d0)
38 :"0" (loops));
39 }
40
41 /* TSC based delay: */
42 static void delay_tsc(unsigned long loops)
43 {
44 unsigned long bclock, now;
45
46 preempt_disable(); /* TSC's are per-cpu */
47 rdtscl(bclock);
48 do {
49 rep_nop();
50 rdtscl(now);
51 } while ((now-bclock) < loops);
52 preempt_enable();
53 }
54
55 /*
56 * Since we calibrate only once at boot, this
57 * function should be set once at boot and not changed
58 */
59 static void (*delay_fn)(unsigned long) = delay_loop;
60
61 void use_tsc_delay(void)
62 {
63 delay_fn = delay_tsc;
64 }
65
66 int read_current_timer(unsigned long *timer_val)
67 {
68 if (delay_fn == delay_tsc) {
69 rdtscl(*timer_val);
70 return 0;
71 }
72 return -1;
73 }
74
75 void __delay(unsigned long loops)
76 {
77 delay_fn(loops);
78 }
79
80 inline void __const_udelay(unsigned long xloops)
81 {
82 int d0;
83
84 xloops *= 4;
85 __asm__("mull %0"
86 :"=d" (xloops), "=&a" (d0)
87 :"1" (xloops), "0"
88 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
89
90 __delay(++xloops);
91 }
92
93 void __udelay(unsigned long usecs)
94 {
95 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
96 }
97
98 void __ndelay(unsigned long nsecs)
99 {
100 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
101 }
102
103 EXPORT_SYMBOL(__delay);
104 EXPORT_SYMBOL(__const_udelay);
105 EXPORT_SYMBOL(__udelay);
106 EXPORT_SYMBOL(__ndelay);