]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame_incremental - kernel/time/clocksource.c
clocksource: Simplify the logic around clocksource wrapping safety margins
[mirror_ubuntu-jammy-kernel.git] / kernel / time / clocksource.c
... / ...
CommitLineData
1/*
2 * linux/kernel/time/clocksource.c
3 *
4 * This file contains the functions which manage clocksource drivers.
5 *
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * TODO WishList:
23 * o Allow clocksource drivers to be unregistered
24 */
25
26#include <linux/device.h>
27#include <linux/clocksource.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31#include <linux/tick.h>
32#include <linux/kthread.h>
33
34#include "tick-internal.h"
35#include "timekeeping_internal.h"
36
37/**
38 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
39 * @mult: pointer to mult variable
40 * @shift: pointer to shift variable
41 * @from: frequency to convert from
42 * @to: frequency to convert to
43 * @maxsec: guaranteed runtime conversion range in seconds
44 *
45 * The function evaluates the shift/mult pair for the scaled math
46 * operations of clocksources and clockevents.
47 *
48 * @to and @from are frequency values in HZ. For clock sources @to is
49 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
50 * event @to is the counter frequency and @from is NSEC_PER_SEC.
51 *
52 * The @maxsec conversion range argument controls the time frame in
53 * seconds which must be covered by the runtime conversion with the
54 * calculated mult and shift factors. This guarantees that no 64bit
55 * overflow happens when the input value of the conversion is
56 * multiplied with the calculated mult factor. Larger ranges may
57 * reduce the conversion accuracy by chosing smaller mult and shift
58 * factors.
59 */
60void
61clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
62{
63 u64 tmp;
64 u32 sft, sftacc= 32;
65
66 /*
67 * Calculate the shift factor which is limiting the conversion
68 * range:
69 */
70 tmp = ((u64)maxsec * from) >> 32;
71 while (tmp) {
72 tmp >>=1;
73 sftacc--;
74 }
75
76 /*
77 * Find the conversion shift/mult pair which has the best
78 * accuracy and fits the maxsec conversion range:
79 */
80 for (sft = 32; sft > 0; sft--) {
81 tmp = (u64) to << sft;
82 tmp += from / 2;
83 do_div(tmp, from);
84 if ((tmp >> sftacc) == 0)
85 break;
86 }
87 *mult = tmp;
88 *shift = sft;
89}
90
91/*[Clocksource internal variables]---------
92 * curr_clocksource:
93 * currently selected clocksource.
94 * clocksource_list:
95 * linked list with the registered clocksources
96 * clocksource_mutex:
97 * protects manipulations to curr_clocksource and the clocksource_list
98 * override_name:
99 * Name of the user-specified clocksource.
100 */
101static struct clocksource *curr_clocksource;
102static LIST_HEAD(clocksource_list);
103static DEFINE_MUTEX(clocksource_mutex);
104static char override_name[CS_NAME_LEN];
105static int finished_booting;
106
107#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
108static void clocksource_watchdog_work(struct work_struct *work);
109static void clocksource_select(void);
110
111static LIST_HEAD(watchdog_list);
112static struct clocksource *watchdog;
113static struct timer_list watchdog_timer;
114static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
115static DEFINE_SPINLOCK(watchdog_lock);
116static int watchdog_running;
117static atomic_t watchdog_reset_pending;
118
119static int clocksource_watchdog_kthread(void *data);
120static void __clocksource_change_rating(struct clocksource *cs, int rating);
121
122/*
123 * Interval: 0.5sec Threshold: 0.0625s
124 */
125#define WATCHDOG_INTERVAL (HZ >> 1)
126#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
127
128static void clocksource_watchdog_work(struct work_struct *work)
129{
130 /*
131 * If kthread_run fails the next watchdog scan over the
132 * watchdog_list will find the unstable clock again.
133 */
134 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
135}
136
137static void __clocksource_unstable(struct clocksource *cs)
138{
139 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
140 cs->flags |= CLOCK_SOURCE_UNSTABLE;
141 if (finished_booting)
142 schedule_work(&watchdog_work);
143}
144
145static void clocksource_unstable(struct clocksource *cs, int64_t delta)
146{
147 printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
148 cs->name, delta);
149 __clocksource_unstable(cs);
150}
151
152/**
153 * clocksource_mark_unstable - mark clocksource unstable via watchdog
154 * @cs: clocksource to be marked unstable
155 *
156 * This function is called instead of clocksource_change_rating from
157 * cpu hotplug code to avoid a deadlock between the clocksource mutex
158 * and the cpu hotplug mutex. It defers the update of the clocksource
159 * to the watchdog thread.
160 */
161void clocksource_mark_unstable(struct clocksource *cs)
162{
163 unsigned long flags;
164
165 spin_lock_irqsave(&watchdog_lock, flags);
166 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
167 if (list_empty(&cs->wd_list))
168 list_add(&cs->wd_list, &watchdog_list);
169 __clocksource_unstable(cs);
170 }
171 spin_unlock_irqrestore(&watchdog_lock, flags);
172}
173
174static void clocksource_watchdog(unsigned long data)
175{
176 struct clocksource *cs;
177 cycle_t csnow, wdnow, delta;
178 int64_t wd_nsec, cs_nsec;
179 int next_cpu, reset_pending;
180
181 spin_lock(&watchdog_lock);
182 if (!watchdog_running)
183 goto out;
184
185 reset_pending = atomic_read(&watchdog_reset_pending);
186
187 list_for_each_entry(cs, &watchdog_list, wd_list) {
188
189 /* Clocksource already marked unstable? */
190 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
191 if (finished_booting)
192 schedule_work(&watchdog_work);
193 continue;
194 }
195
196 local_irq_disable();
197 csnow = cs->read(cs);
198 wdnow = watchdog->read(watchdog);
199 local_irq_enable();
200
201 /* Clocksource initialized ? */
202 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
203 atomic_read(&watchdog_reset_pending)) {
204 cs->flags |= CLOCK_SOURCE_WATCHDOG;
205 cs->wd_last = wdnow;
206 cs->cs_last = csnow;
207 continue;
208 }
209
210 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
211 wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
212 watchdog->shift);
213
214 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
215 cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
216 cs->cs_last = csnow;
217 cs->wd_last = wdnow;
218
219 if (atomic_read(&watchdog_reset_pending))
220 continue;
221
222 /* Check the deviation from the watchdog clocksource. */
223 if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
224 clocksource_unstable(cs, cs_nsec - wd_nsec);
225 continue;
226 }
227
228 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
229 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
230 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
231 /* Mark it valid for high-res. */
232 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
233
234 /*
235 * clocksource_done_booting() will sort it if
236 * finished_booting is not set yet.
237 */
238 if (!finished_booting)
239 continue;
240
241 /*
242 * If this is not the current clocksource let
243 * the watchdog thread reselect it. Due to the
244 * change to high res this clocksource might
245 * be preferred now. If it is the current
246 * clocksource let the tick code know about
247 * that change.
248 */
249 if (cs != curr_clocksource) {
250 cs->flags |= CLOCK_SOURCE_RESELECT;
251 schedule_work(&watchdog_work);
252 } else {
253 tick_clock_notify();
254 }
255 }
256 }
257
258 /*
259 * We only clear the watchdog_reset_pending, when we did a
260 * full cycle through all clocksources.
261 */
262 if (reset_pending)
263 atomic_dec(&watchdog_reset_pending);
264
265 /*
266 * Cycle through CPUs to check if the CPUs stay synchronized
267 * to each other.
268 */
269 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
270 if (next_cpu >= nr_cpu_ids)
271 next_cpu = cpumask_first(cpu_online_mask);
272 watchdog_timer.expires += WATCHDOG_INTERVAL;
273 add_timer_on(&watchdog_timer, next_cpu);
274out:
275 spin_unlock(&watchdog_lock);
276}
277
278static inline void clocksource_start_watchdog(void)
279{
280 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
281 return;
282 init_timer(&watchdog_timer);
283 watchdog_timer.function = clocksource_watchdog;
284 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
285 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
286 watchdog_running = 1;
287}
288
289static inline void clocksource_stop_watchdog(void)
290{
291 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
292 return;
293 del_timer(&watchdog_timer);
294 watchdog_running = 0;
295}
296
297static inline void clocksource_reset_watchdog(void)
298{
299 struct clocksource *cs;
300
301 list_for_each_entry(cs, &watchdog_list, wd_list)
302 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
303}
304
305static void clocksource_resume_watchdog(void)
306{
307 atomic_inc(&watchdog_reset_pending);
308}
309
310static void clocksource_enqueue_watchdog(struct clocksource *cs)
311{
312 unsigned long flags;
313
314 spin_lock_irqsave(&watchdog_lock, flags);
315 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
316 /* cs is a clocksource to be watched. */
317 list_add(&cs->wd_list, &watchdog_list);
318 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
319 } else {
320 /* cs is a watchdog. */
321 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
322 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
323 /* Pick the best watchdog. */
324 if (!watchdog || cs->rating > watchdog->rating) {
325 watchdog = cs;
326 /* Reset watchdog cycles */
327 clocksource_reset_watchdog();
328 }
329 }
330 /* Check if the watchdog timer needs to be started. */
331 clocksource_start_watchdog();
332 spin_unlock_irqrestore(&watchdog_lock, flags);
333}
334
335static void clocksource_dequeue_watchdog(struct clocksource *cs)
336{
337 unsigned long flags;
338
339 spin_lock_irqsave(&watchdog_lock, flags);
340 if (cs != watchdog) {
341 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
342 /* cs is a watched clocksource. */
343 list_del_init(&cs->wd_list);
344 /* Check if the watchdog timer needs to be stopped. */
345 clocksource_stop_watchdog();
346 }
347 }
348 spin_unlock_irqrestore(&watchdog_lock, flags);
349}
350
351static int __clocksource_watchdog_kthread(void)
352{
353 struct clocksource *cs, *tmp;
354 unsigned long flags;
355 LIST_HEAD(unstable);
356 int select = 0;
357
358 spin_lock_irqsave(&watchdog_lock, flags);
359 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
360 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
361 list_del_init(&cs->wd_list);
362 list_add(&cs->wd_list, &unstable);
363 select = 1;
364 }
365 if (cs->flags & CLOCK_SOURCE_RESELECT) {
366 cs->flags &= ~CLOCK_SOURCE_RESELECT;
367 select = 1;
368 }
369 }
370 /* Check if the watchdog timer needs to be stopped. */
371 clocksource_stop_watchdog();
372 spin_unlock_irqrestore(&watchdog_lock, flags);
373
374 /* Needs to be done outside of watchdog lock */
375 list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
376 list_del_init(&cs->wd_list);
377 __clocksource_change_rating(cs, 0);
378 }
379 return select;
380}
381
382static int clocksource_watchdog_kthread(void *data)
383{
384 mutex_lock(&clocksource_mutex);
385 if (__clocksource_watchdog_kthread())
386 clocksource_select();
387 mutex_unlock(&clocksource_mutex);
388 return 0;
389}
390
391static bool clocksource_is_watchdog(struct clocksource *cs)
392{
393 return cs == watchdog;
394}
395
396#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
397
398static void clocksource_enqueue_watchdog(struct clocksource *cs)
399{
400 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
401 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
402}
403
404static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
405static inline void clocksource_resume_watchdog(void) { }
406static inline int __clocksource_watchdog_kthread(void) { return 0; }
407static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
408void clocksource_mark_unstable(struct clocksource *cs) { }
409
410#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
411
412/**
413 * clocksource_suspend - suspend the clocksource(s)
414 */
415void clocksource_suspend(void)
416{
417 struct clocksource *cs;
418
419 list_for_each_entry_reverse(cs, &clocksource_list, list)
420 if (cs->suspend)
421 cs->suspend(cs);
422}
423
424/**
425 * clocksource_resume - resume the clocksource(s)
426 */
427void clocksource_resume(void)
428{
429 struct clocksource *cs;
430
431 list_for_each_entry(cs, &clocksource_list, list)
432 if (cs->resume)
433 cs->resume(cs);
434
435 clocksource_resume_watchdog();
436}
437
438/**
439 * clocksource_touch_watchdog - Update watchdog
440 *
441 * Update the watchdog after exception contexts such as kgdb so as not
442 * to incorrectly trip the watchdog. This might fail when the kernel
443 * was stopped in code which holds watchdog_lock.
444 */
445void clocksource_touch_watchdog(void)
446{
447 clocksource_resume_watchdog();
448}
449
450/**
451 * clocksource_max_adjustment- Returns max adjustment amount
452 * @cs: Pointer to clocksource
453 *
454 */
455static u32 clocksource_max_adjustment(struct clocksource *cs)
456{
457 u64 ret;
458 /*
459 * We won't try to correct for more than 11% adjustments (110,000 ppm),
460 */
461 ret = (u64)cs->mult * 11;
462 do_div(ret,100);
463 return (u32)ret;
464}
465
466/**
467 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
468 * @mult: cycle to nanosecond multiplier
469 * @shift: cycle to nanosecond divisor (power of two)
470 * @maxadj: maximum adjustment value to mult (~11%)
471 * @mask: bitmask for two's complement subtraction of non 64 bit counters
472 *
473 * NOTE: This function includes a safety margin of 50%, so that bad clock values
474 * can be detected.
475 */
476u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
477{
478 u64 max_nsecs, max_cycles;
479
480 /*
481 * Calculate the maximum number of cycles that we can pass to the
482 * cyc2ns() function without overflowing a 64-bit result.
483 */
484 max_cycles = ULLONG_MAX;
485 do_div(max_cycles, mult+maxadj);
486
487 /*
488 * The actual maximum number of cycles we can defer the clocksource is
489 * determined by the minimum of max_cycles and mask.
490 * Note: Here we subtract the maxadj to make sure we don't sleep for
491 * too long if there's a large negative adjustment.
492 */
493 max_cycles = min(max_cycles, mask);
494 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
495
496 /* Return 50% of the actual maximum, so we can detect bad values */
497 max_nsecs >>= 1;
498
499 return max_nsecs;
500}
501
502/**
503 * clocksource_max_deferment - Returns max time the clocksource should be deferred
504 * @cs: Pointer to clocksource
505 *
506 */
507static u64 clocksource_max_deferment(struct clocksource *cs)
508{
509 u64 max_nsecs;
510
511 max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
512 cs->mask);
513 return max_nsecs;
514}
515
516#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
517
518static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
519{
520 struct clocksource *cs;
521
522 if (!finished_booting || list_empty(&clocksource_list))
523 return NULL;
524
525 /*
526 * We pick the clocksource with the highest rating. If oneshot
527 * mode is active, we pick the highres valid clocksource with
528 * the best rating.
529 */
530 list_for_each_entry(cs, &clocksource_list, list) {
531 if (skipcur && cs == curr_clocksource)
532 continue;
533 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
534 continue;
535 return cs;
536 }
537 return NULL;
538}
539
540static void __clocksource_select(bool skipcur)
541{
542 bool oneshot = tick_oneshot_mode_active();
543 struct clocksource *best, *cs;
544
545 /* Find the best suitable clocksource */
546 best = clocksource_find_best(oneshot, skipcur);
547 if (!best)
548 return;
549
550 /* Check for the override clocksource. */
551 list_for_each_entry(cs, &clocksource_list, list) {
552 if (skipcur && cs == curr_clocksource)
553 continue;
554 if (strcmp(cs->name, override_name) != 0)
555 continue;
556 /*
557 * Check to make sure we don't switch to a non-highres
558 * capable clocksource if the tick code is in oneshot
559 * mode (highres or nohz)
560 */
561 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
562 /* Override clocksource cannot be used. */
563 printk(KERN_WARNING "Override clocksource %s is not "
564 "HRT compatible. Cannot switch while in "
565 "HRT/NOHZ mode\n", cs->name);
566 override_name[0] = 0;
567 } else
568 /* Override clocksource can be used. */
569 best = cs;
570 break;
571 }
572
573 if (curr_clocksource != best && !timekeeping_notify(best)) {
574 pr_info("Switched to clocksource %s\n", best->name);
575 curr_clocksource = best;
576 }
577}
578
579/**
580 * clocksource_select - Select the best clocksource available
581 *
582 * Private function. Must hold clocksource_mutex when called.
583 *
584 * Select the clocksource with the best rating, or the clocksource,
585 * which is selected by userspace override.
586 */
587static void clocksource_select(void)
588{
589 return __clocksource_select(false);
590}
591
592static void clocksource_select_fallback(void)
593{
594 return __clocksource_select(true);
595}
596
597#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
598
599static inline void clocksource_select(void) { }
600static inline void clocksource_select_fallback(void) { }
601
602#endif
603
604/*
605 * clocksource_done_booting - Called near the end of core bootup
606 *
607 * Hack to avoid lots of clocksource churn at boot time.
608 * We use fs_initcall because we want this to start before
609 * device_initcall but after subsys_initcall.
610 */
611static int __init clocksource_done_booting(void)
612{
613 mutex_lock(&clocksource_mutex);
614 curr_clocksource = clocksource_default_clock();
615 finished_booting = 1;
616 /*
617 * Run the watchdog first to eliminate unstable clock sources
618 */
619 __clocksource_watchdog_kthread();
620 clocksource_select();
621 mutex_unlock(&clocksource_mutex);
622 return 0;
623}
624fs_initcall(clocksource_done_booting);
625
626/*
627 * Enqueue the clocksource sorted by rating
628 */
629static void clocksource_enqueue(struct clocksource *cs)
630{
631 struct list_head *entry = &clocksource_list;
632 struct clocksource *tmp;
633
634 list_for_each_entry(tmp, &clocksource_list, list)
635 /* Keep track of the place, where to insert */
636 if (tmp->rating >= cs->rating)
637 entry = &tmp->list;
638 list_add(&cs->list, entry);
639}
640
641/**
642 * __clocksource_updatefreq_scale - Used update clocksource with new freq
643 * @cs: clocksource to be registered
644 * @scale: Scale factor multiplied against freq to get clocksource hz
645 * @freq: clocksource frequency (cycles per second) divided by scale
646 *
647 * This should only be called from the clocksource->enable() method.
648 *
649 * This *SHOULD NOT* be called directly! Please use the
650 * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
651 */
652void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
653{
654 u64 sec;
655 /*
656 * Calc the maximum number of seconds which we can run before
657 * wrapping around. For clocksources which have a mask > 32bit
658 * we need to limit the max sleep time to have a good
659 * conversion precision. 10 minutes is still a reasonable
660 * amount. That results in a shift value of 24 for a
661 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
662 * ~ 0.06ppm granularity for NTP.
663 */
664 sec = cs->mask;
665 do_div(sec, freq);
666 do_div(sec, scale);
667 if (!sec)
668 sec = 1;
669 else if (sec > 600 && cs->mask > UINT_MAX)
670 sec = 600;
671
672 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
673 NSEC_PER_SEC / scale, sec * scale);
674
675 /*
676 * Ensure clocksources that have large 'mult' values don't overflow
677 * when adjusted.
678 */
679 cs->maxadj = clocksource_max_adjustment(cs);
680 while ((cs->mult + cs->maxadj < cs->mult)
681 || (cs->mult - cs->maxadj > cs->mult)) {
682 cs->mult >>= 1;
683 cs->shift--;
684 cs->maxadj = clocksource_max_adjustment(cs);
685 }
686
687 cs->max_idle_ns = clocksource_max_deferment(cs);
688}
689EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
690
691/**
692 * __clocksource_register_scale - Used to install new clocksources
693 * @cs: clocksource to be registered
694 * @scale: Scale factor multiplied against freq to get clocksource hz
695 * @freq: clocksource frequency (cycles per second) divided by scale
696 *
697 * Returns -EBUSY if registration fails, zero otherwise.
698 *
699 * This *SHOULD NOT* be called directly! Please use the
700 * clocksource_register_hz() or clocksource_register_khz helper functions.
701 */
702int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
703{
704
705 /* Initialize mult/shift and max_idle_ns */
706 __clocksource_updatefreq_scale(cs, scale, freq);
707
708 /* Add clocksource to the clocksource list */
709 mutex_lock(&clocksource_mutex);
710 clocksource_enqueue(cs);
711 clocksource_enqueue_watchdog(cs);
712 clocksource_select();
713 mutex_unlock(&clocksource_mutex);
714 return 0;
715}
716EXPORT_SYMBOL_GPL(__clocksource_register_scale);
717
718
719/**
720 * clocksource_register - Used to install new clocksources
721 * @cs: clocksource to be registered
722 *
723 * Returns -EBUSY if registration fails, zero otherwise.
724 */
725int clocksource_register(struct clocksource *cs)
726{
727 /* calculate max adjustment for given mult/shift */
728 cs->maxadj = clocksource_max_adjustment(cs);
729 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
730 "Clocksource %s might overflow on 11%% adjustment\n",
731 cs->name);
732
733 /* calculate max idle time permitted for this clocksource */
734 cs->max_idle_ns = clocksource_max_deferment(cs);
735
736 mutex_lock(&clocksource_mutex);
737 clocksource_enqueue(cs);
738 clocksource_enqueue_watchdog(cs);
739 clocksource_select();
740 mutex_unlock(&clocksource_mutex);
741 return 0;
742}
743EXPORT_SYMBOL(clocksource_register);
744
745static void __clocksource_change_rating(struct clocksource *cs, int rating)
746{
747 list_del(&cs->list);
748 cs->rating = rating;
749 clocksource_enqueue(cs);
750}
751
752/**
753 * clocksource_change_rating - Change the rating of a registered clocksource
754 * @cs: clocksource to be changed
755 * @rating: new rating
756 */
757void clocksource_change_rating(struct clocksource *cs, int rating)
758{
759 mutex_lock(&clocksource_mutex);
760 __clocksource_change_rating(cs, rating);
761 clocksource_select();
762 mutex_unlock(&clocksource_mutex);
763}
764EXPORT_SYMBOL(clocksource_change_rating);
765
766/*
767 * Unbind clocksource @cs. Called with clocksource_mutex held
768 */
769static int clocksource_unbind(struct clocksource *cs)
770{
771 /*
772 * I really can't convince myself to support this on hardware
773 * designed by lobotomized monkeys.
774 */
775 if (clocksource_is_watchdog(cs))
776 return -EBUSY;
777
778 if (cs == curr_clocksource) {
779 /* Select and try to install a replacement clock source */
780 clocksource_select_fallback();
781 if (curr_clocksource == cs)
782 return -EBUSY;
783 }
784 clocksource_dequeue_watchdog(cs);
785 list_del_init(&cs->list);
786 return 0;
787}
788
789/**
790 * clocksource_unregister - remove a registered clocksource
791 * @cs: clocksource to be unregistered
792 */
793int clocksource_unregister(struct clocksource *cs)
794{
795 int ret = 0;
796
797 mutex_lock(&clocksource_mutex);
798 if (!list_empty(&cs->list))
799 ret = clocksource_unbind(cs);
800 mutex_unlock(&clocksource_mutex);
801 return ret;
802}
803EXPORT_SYMBOL(clocksource_unregister);
804
805#ifdef CONFIG_SYSFS
806/**
807 * sysfs_show_current_clocksources - sysfs interface for current clocksource
808 * @dev: unused
809 * @attr: unused
810 * @buf: char buffer to be filled with clocksource list
811 *
812 * Provides sysfs interface for listing current clocksource.
813 */
814static ssize_t
815sysfs_show_current_clocksources(struct device *dev,
816 struct device_attribute *attr, char *buf)
817{
818 ssize_t count = 0;
819
820 mutex_lock(&clocksource_mutex);
821 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
822 mutex_unlock(&clocksource_mutex);
823
824 return count;
825}
826
827ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
828{
829 size_t ret = cnt;
830
831 /* strings from sysfs write are not 0 terminated! */
832 if (!cnt || cnt >= CS_NAME_LEN)
833 return -EINVAL;
834
835 /* strip of \n: */
836 if (buf[cnt-1] == '\n')
837 cnt--;
838 if (cnt > 0)
839 memcpy(dst, buf, cnt);
840 dst[cnt] = 0;
841 return ret;
842}
843
844/**
845 * sysfs_override_clocksource - interface for manually overriding clocksource
846 * @dev: unused
847 * @attr: unused
848 * @buf: name of override clocksource
849 * @count: length of buffer
850 *
851 * Takes input from sysfs interface for manually overriding the default
852 * clocksource selection.
853 */
854static ssize_t sysfs_override_clocksource(struct device *dev,
855 struct device_attribute *attr,
856 const char *buf, size_t count)
857{
858 ssize_t ret;
859
860 mutex_lock(&clocksource_mutex);
861
862 ret = sysfs_get_uname(buf, override_name, count);
863 if (ret >= 0)
864 clocksource_select();
865
866 mutex_unlock(&clocksource_mutex);
867
868 return ret;
869}
870
871/**
872 * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
873 * @dev: unused
874 * @attr: unused
875 * @buf: unused
876 * @count: length of buffer
877 *
878 * Takes input from sysfs interface for manually unbinding a clocksource.
879 */
880static ssize_t sysfs_unbind_clocksource(struct device *dev,
881 struct device_attribute *attr,
882 const char *buf, size_t count)
883{
884 struct clocksource *cs;
885 char name[CS_NAME_LEN];
886 ssize_t ret;
887
888 ret = sysfs_get_uname(buf, name, count);
889 if (ret < 0)
890 return ret;
891
892 ret = -ENODEV;
893 mutex_lock(&clocksource_mutex);
894 list_for_each_entry(cs, &clocksource_list, list) {
895 if (strcmp(cs->name, name))
896 continue;
897 ret = clocksource_unbind(cs);
898 break;
899 }
900 mutex_unlock(&clocksource_mutex);
901
902 return ret ? ret : count;
903}
904
905/**
906 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
907 * @dev: unused
908 * @attr: unused
909 * @buf: char buffer to be filled with clocksource list
910 *
911 * Provides sysfs interface for listing registered clocksources
912 */
913static ssize_t
914sysfs_show_available_clocksources(struct device *dev,
915 struct device_attribute *attr,
916 char *buf)
917{
918 struct clocksource *src;
919 ssize_t count = 0;
920
921 mutex_lock(&clocksource_mutex);
922 list_for_each_entry(src, &clocksource_list, list) {
923 /*
924 * Don't show non-HRES clocksource if the tick code is
925 * in one shot mode (highres=on or nohz=on)
926 */
927 if (!tick_oneshot_mode_active() ||
928 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
929 count += snprintf(buf + count,
930 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
931 "%s ", src->name);
932 }
933 mutex_unlock(&clocksource_mutex);
934
935 count += snprintf(buf + count,
936 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
937
938 return count;
939}
940
941/*
942 * Sysfs setup bits:
943 */
944static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
945 sysfs_override_clocksource);
946
947static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource);
948
949static DEVICE_ATTR(available_clocksource, 0444,
950 sysfs_show_available_clocksources, NULL);
951
952static struct bus_type clocksource_subsys = {
953 .name = "clocksource",
954 .dev_name = "clocksource",
955};
956
957static struct device device_clocksource = {
958 .id = 0,
959 .bus = &clocksource_subsys,
960};
961
962static int __init init_clocksource_sysfs(void)
963{
964 int error = subsys_system_register(&clocksource_subsys, NULL);
965
966 if (!error)
967 error = device_register(&device_clocksource);
968 if (!error)
969 error = device_create_file(
970 &device_clocksource,
971 &dev_attr_current_clocksource);
972 if (!error)
973 error = device_create_file(&device_clocksource,
974 &dev_attr_unbind_clocksource);
975 if (!error)
976 error = device_create_file(
977 &device_clocksource,
978 &dev_attr_available_clocksource);
979 return error;
980}
981
982device_initcall(init_clocksource_sysfs);
983#endif /* CONFIG_SYSFS */
984
985/**
986 * boot_override_clocksource - boot clock override
987 * @str: override name
988 *
989 * Takes a clocksource= boot argument and uses it
990 * as the clocksource override name.
991 */
992static int __init boot_override_clocksource(char* str)
993{
994 mutex_lock(&clocksource_mutex);
995 if (str)
996 strlcpy(override_name, str, sizeof(override_name));
997 mutex_unlock(&clocksource_mutex);
998 return 1;
999}
1000
1001__setup("clocksource=", boot_override_clocksource);
1002
1003/**
1004 * boot_override_clock - Compatibility layer for deprecated boot option
1005 * @str: override name
1006 *
1007 * DEPRECATED! Takes a clock= boot argument and uses it
1008 * as the clocksource override name
1009 */
1010static int __init boot_override_clock(char* str)
1011{
1012 if (!strcmp(str, "pmtmr")) {
1013 printk("Warning: clock=pmtmr is deprecated. "
1014 "Use clocksource=acpi_pm.\n");
1015 return boot_override_clocksource("acpi_pm");
1016 }
1017 printk("Warning! clock= boot option is deprecated. "
1018 "Use clocksource=xyz\n");
1019 return boot_override_clocksource(str);
1020}
1021
1022__setup("clock=", boot_override_clock);