]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* calibrate.c: default delay calibration |
2 | * | |
3 | * Excised from init/main.c | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
cd354f1a | 7 | #include <linux/jiffies.h> |
1da177e4 LT |
8 | #include <linux/delay.h> |
9 | #include <linux/init.h> | |
941e492b | 10 | #include <linux/timex.h> |
3da757da | 11 | #include <linux/smp.h> |
7afe1845 | 12 | #include <linux/percpu.h> |
8a9e1b0f | 13 | |
f3f3149f | 14 | unsigned long lpj_fine; |
bfe8df3d | 15 | unsigned long preset_lpj; |
1da177e4 LT |
16 | static int __init lpj_setup(char *str) |
17 | { | |
18 | preset_lpj = simple_strtoul(str,NULL,0); | |
19 | return 1; | |
20 | } | |
21 | ||
22 | __setup("lpj=", lpj_setup); | |
23 | ||
8a9e1b0f VP |
24 | #ifdef ARCH_HAS_READ_CURRENT_TIMER |
25 | ||
26 | /* This routine uses the read_current_timer() routine and gets the | |
27 | * loops per jiffy directly, instead of guessing it using delay(). | |
28 | * Also, this code tries to handle non-maskable asynchronous events | |
29 | * (like SMIs) | |
30 | */ | |
31 | #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) | |
32 | #define MAX_DIRECT_CALIBRATION_RETRIES 5 | |
33 | ||
0db0628d | 34 | static unsigned long calibrate_delay_direct(void) |
8a9e1b0f VP |
35 | { |
36 | unsigned long pre_start, start, post_start; | |
37 | unsigned long pre_end, end, post_end; | |
38 | unsigned long start_jiffies; | |
f3f3149f AK |
39 | unsigned long timer_rate_min, timer_rate_max; |
40 | unsigned long good_timer_sum = 0; | |
41 | unsigned long good_timer_count = 0; | |
d2b46313 AW |
42 | unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES]; |
43 | int max = -1; /* index of measured_times with max/min values or not set */ | |
44 | int min = -1; | |
8a9e1b0f VP |
45 | int i; |
46 | ||
47 | if (read_current_timer(&pre_start) < 0 ) | |
48 | return 0; | |
49 | ||
50 | /* | |
51 | * A simple loop like | |
52 | * while ( jiffies < start_jiffies+1) | |
53 | * start = read_current_timer(); | |
54 | * will not do. As we don't really know whether jiffy switch | |
55 | * happened first or timer_value was read first. And some asynchronous | |
56 | * event can happen between these two events introducing errors in lpj. | |
57 | * | |
58 | * So, we do | |
59 | * 1. pre_start <- When we are sure that jiffy switch hasn't happened | |
60 | * 2. check jiffy switch | |
61 | * 3. start <- timer value before or after jiffy switch | |
62 | * 4. post_start <- When we are sure that jiffy switch has happened | |
63 | * | |
64 | * Note, we don't know anything about order of 2 and 3. | |
65 | * Now, by looking at post_start and pre_start difference, we can | |
66 | * check whether any asynchronous event happened or not | |
67 | */ | |
68 | ||
69 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { | |
70 | pre_start = 0; | |
71 | read_current_timer(&start); | |
72 | start_jiffies = jiffies; | |
70a06228 | 73 | while (time_before_eq(jiffies, start_jiffies + 1)) { |
8a9e1b0f VP |
74 | pre_start = start; |
75 | read_current_timer(&start); | |
76 | } | |
77 | read_current_timer(&post_start); | |
78 | ||
79 | pre_end = 0; | |
80 | end = post_start; | |
70a06228 TD |
81 | while (time_before_eq(jiffies, start_jiffies + 1 + |
82 | DELAY_CALIBRATION_TICKS)) { | |
8a9e1b0f VP |
83 | pre_end = end; |
84 | read_current_timer(&end); | |
85 | } | |
86 | read_current_timer(&post_end); | |
87 | ||
f3f3149f AK |
88 | timer_rate_max = (post_end - pre_start) / |
89 | DELAY_CALIBRATION_TICKS; | |
90 | timer_rate_min = (pre_end - post_start) / | |
91 | DELAY_CALIBRATION_TICKS; | |
8a9e1b0f VP |
92 | |
93 | /* | |
f3f3149f | 94 | * If the upper limit and lower limit of the timer_rate is |
8a9e1b0f VP |
95 | * >= 12.5% apart, redo calibration. |
96 | */ | |
d2b46313 AW |
97 | if (start >= post_end) |
98 | printk(KERN_NOTICE "calibrate_delay_direct() ignoring " | |
99 | "timer_rate as we had a TSC wrap around" | |
100 | " start=%lu >=post_end=%lu\n", | |
101 | start, post_end); | |
102 | if (start < post_end && pre_start != 0 && pre_end != 0 && | |
f3f3149f AK |
103 | (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { |
104 | good_timer_count++; | |
105 | good_timer_sum += timer_rate_max; | |
d2b46313 AW |
106 | measured_times[i] = timer_rate_max; |
107 | if (max < 0 || timer_rate_max > measured_times[max]) | |
108 | max = i; | |
109 | if (min < 0 || timer_rate_max < measured_times[min]) | |
110 | min = i; | |
111 | } else | |
112 | measured_times[i] = 0; | |
113 | ||
8a9e1b0f VP |
114 | } |
115 | ||
d2b46313 AW |
116 | /* |
117 | * Find the maximum & minimum - if they differ too much throw out the | |
118 | * one with the largest difference from the mean and try again... | |
119 | */ | |
120 | while (good_timer_count > 1) { | |
121 | unsigned long estimate; | |
122 | unsigned long maxdiff; | |
123 | ||
124 | /* compute the estimate */ | |
125 | estimate = (good_timer_sum/good_timer_count); | |
126 | maxdiff = estimate >> 3; | |
127 | ||
128 | /* if range is within 12% let's take it */ | |
129 | if ((measured_times[max] - measured_times[min]) < maxdiff) | |
130 | return estimate; | |
131 | ||
132 | /* ok - drop the worse value and try again... */ | |
133 | good_timer_sum = 0; | |
134 | good_timer_count = 0; | |
135 | if ((measured_times[max] - estimate) < | |
136 | (estimate - measured_times[min])) { | |
137 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " | |
138 | "min bogoMips estimate %d = %lu\n", | |
139 | min, measured_times[min]); | |
140 | measured_times[min] = 0; | |
141 | min = max; | |
142 | } else { | |
143 | printk(KERN_NOTICE "calibrate_delay_direct() dropping " | |
144 | "max bogoMips estimate %d = %lu\n", | |
145 | max, measured_times[max]); | |
146 | measured_times[max] = 0; | |
147 | max = min; | |
148 | } | |
149 | ||
150 | for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) { | |
151 | if (measured_times[i] == 0) | |
152 | continue; | |
153 | good_timer_count++; | |
154 | good_timer_sum += measured_times[i]; | |
155 | if (measured_times[i] < measured_times[min]) | |
156 | min = i; | |
157 | if (measured_times[i] > measured_times[max]) | |
158 | max = i; | |
159 | } | |
160 | ||
161 | } | |
8a9e1b0f | 162 | |
d2b46313 AW |
163 | printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good " |
164 | "estimate for loops_per_jiffy.\nProbably due to long platform " | |
165 | "interrupts. Consider using \"lpj=\" boot option.\n"); | |
8a9e1b0f VP |
166 | return 0; |
167 | } | |
168 | #else | |
0db0628d PG |
169 | static unsigned long calibrate_delay_direct(void) |
170 | { | |
171 | return 0; | |
172 | } | |
8a9e1b0f VP |
173 | #endif |
174 | ||
1da177e4 LT |
175 | /* |
176 | * This is the number of bits of precision for the loops_per_jiffy. Each | |
191e5688 PC |
177 | * time we refine our estimate after the first takes 1.5/HZ seconds, so try |
178 | * to start with a good estimate. | |
3da757da | 179 | * For the boot cpu we can skip the delay calibration and assign it a value |
f3f3149f AK |
180 | * calculated based on the timer frequency. |
181 | * For the rest of the CPUs we cannot assume that the timer frequency is same as | |
3da757da | 182 | * the cpu frequency, hence do the calibration for those. |
1da177e4 LT |
183 | */ |
184 | #define LPS_PREC 8 | |
185 | ||
0db0628d | 186 | static unsigned long calibrate_delay_converge(void) |
1da177e4 | 187 | { |
191e5688 | 188 | /* First stage - slowly accelerate to find initial bounds */ |
b1b5f65e | 189 | unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; |
191e5688 | 190 | int trials = 0, band = 0, trial_in_band = 0; |
71c696b1 PC |
191 | |
192 | lpj = (1<<12); | |
191e5688 PC |
193 | |
194 | /* wait for "start of" clock tick */ | |
195 | ticks = jiffies; | |
196 | while (ticks == jiffies) | |
197 | ; /* nothing */ | |
198 | /* Go .. */ | |
199 | ticks = jiffies; | |
200 | do { | |
201 | if (++trial_in_band == (1<<band)) { | |
202 | ++band; | |
203 | trial_in_band = 0; | |
204 | } | |
205 | __delay(lpj * band); | |
206 | trials += band; | |
207 | } while (ticks == jiffies); | |
208 | /* | |
209 | * We overshot, so retreat to a clear underestimate. Then estimate | |
210 | * the largest likely undershoot. This defines our chop bounds. | |
211 | */ | |
212 | trials -= band; | |
b1b5f65e PC |
213 | loopadd_base = lpj * band; |
214 | lpj_base = lpj * trials; | |
215 | ||
216 | recalibrate: | |
217 | lpj = lpj_base; | |
218 | loopadd = loopadd_base; | |
71c696b1 PC |
219 | |
220 | /* | |
221 | * Do a binary approximation to get lpj set to | |
191e5688 | 222 | * equal one clock (up to LPS_PREC bits) |
71c696b1 | 223 | */ |
b1b5f65e | 224 | chop_limit = lpj >> LPS_PREC; |
191e5688 PC |
225 | while (loopadd > chop_limit) { |
226 | lpj += loopadd; | |
71c696b1 PC |
227 | ticks = jiffies; |
228 | while (ticks == jiffies) | |
191e5688 | 229 | ; /* nothing */ |
71c696b1 PC |
230 | ticks = jiffies; |
231 | __delay(lpj); | |
232 | if (jiffies != ticks) /* longer than 1 tick */ | |
191e5688 PC |
233 | lpj -= loopadd; |
234 | loopadd >>= 1; | |
71c696b1 | 235 | } |
b1b5f65e PC |
236 | /* |
237 | * If we incremented every single time possible, presume we've | |
238 | * massively underestimated initially, and retry with a higher | |
239 | * start, and larger range. (Only seen on x86_64, due to SMIs) | |
240 | */ | |
241 | if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) { | |
242 | lpj_base = lpj; | |
243 | loopadd_base <<= 2; | |
244 | goto recalibrate; | |
245 | } | |
71c696b1 PC |
246 | |
247 | return lpj; | |
248 | } | |
249 | ||
7afe1845 SN |
250 | static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 }; |
251 | ||
b565201c JS |
252 | /* |
253 | * Check if cpu calibration delay is already known. For example, | |
254 | * some processors with multi-core sockets may have all cores | |
255 | * with the same calibration delay. | |
256 | * | |
257 | * Architectures should override this function if a faster calibration | |
258 | * method is available. | |
259 | */ | |
0db0628d | 260 | unsigned long __attribute__((weak)) calibrate_delay_is_known(void) |
b565201c JS |
261 | { |
262 | return 0; | |
263 | } | |
264 | ||
e6639117 PDS |
265 | /* |
266 | * Indicate the cpu delay calibration is done. This can be used by | |
267 | * architectures to stop accepting delay timer registrations after this point. | |
268 | */ | |
269 | ||
270 | void __attribute__((weak)) calibration_delay_done(void) | |
271 | { | |
272 | } | |
273 | ||
0db0628d | 274 | void calibrate_delay(void) |
71c696b1 | 275 | { |
1b19ca9f | 276 | unsigned long lpj; |
feae3203 | 277 | static bool printed; |
7afe1845 | 278 | int this_cpu = smp_processor_id(); |
1da177e4 | 279 | |
7afe1845 SN |
280 | if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { |
281 | lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); | |
8595c539 DT |
282 | if (!printed) |
283 | pr_info("Calibrating delay loop (skipped) " | |
7afe1845 SN |
284 | "already calibrated this CPU"); |
285 | } else if (preset_lpj) { | |
1b19ca9f | 286 | lpj = preset_lpj; |
feae3203 MT |
287 | if (!printed) |
288 | pr_info("Calibrating delay loop (skipped) " | |
289 | "preset value.. "); | |
290 | } else if ((!printed) && lpj_fine) { | |
1b19ca9f | 291 | lpj = lpj_fine; |
feae3203 | 292 | pr_info("Calibrating delay loop (skipped), " |
f3f3149f | 293 | "value calculated using timer frequency.. "); |
b565201c JS |
294 | } else if ((lpj = calibrate_delay_is_known())) { |
295 | ; | |
1b19ca9f | 296 | } else if ((lpj = calibrate_delay_direct()) != 0) { |
feae3203 MT |
297 | if (!printed) |
298 | pr_info("Calibrating delay using timer " | |
299 | "specific routine.. "); | |
1da177e4 | 300 | } else { |
feae3203 MT |
301 | if (!printed) |
302 | pr_info("Calibrating delay loop... "); | |
1b19ca9f | 303 | lpj = calibrate_delay_converge(); |
1da177e4 | 304 | } |
7afe1845 | 305 | per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; |
feae3203 MT |
306 | if (!printed) |
307 | pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", | |
1b19ca9f RK |
308 | lpj/(500000/HZ), |
309 | (lpj/(5000/HZ)) % 100, lpj); | |
feae3203 | 310 | |
1b19ca9f | 311 | loops_per_jiffy = lpj; |
feae3203 | 312 | printed = true; |
e6639117 PDS |
313 | |
314 | calibration_delay_done(); | |
1da177e4 | 315 | } |