]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/time/clockevents.c
clocksource: Simplify the clocks_calc_max_nsecs() logic
[mirror_ubuntu-jammy-kernel.git] / kernel / time / clockevents.c
1 /*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14 #include <linux/clockchips.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/smp.h>
19 #include <linux/device.h>
20
21 #include "tick-internal.h"
22
23 /* The registered clock event devices */
24 static LIST_HEAD(clockevent_devices);
25 static LIST_HEAD(clockevents_released);
26 /* Protection for the above */
27 static DEFINE_RAW_SPINLOCK(clockevents_lock);
28 /* Protection for unbind operations */
29 static DEFINE_MUTEX(clockevents_mutex);
30
31 struct ce_unbind {
32 struct clock_event_device *ce;
33 int res;
34 };
35
36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 bool ismax)
38 {
39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
41
42 if (unlikely(!evt->mult)) {
43 evt->mult = 1;
44 WARN_ON(1);
45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1ULL << evt->shift)))
76 clc += rnd;
77
78 do_div(clc, evt->mult);
79
80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82 }
83
84 /**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92 {
93 return cev_delta2ns(latch, evt, false);
94 }
95 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
96
97 static int __clockevents_set_mode(struct clock_event_device *dev,
98 enum clock_event_mode mode)
99 {
100 /* Transition with legacy set_mode() callback */
101 if (dev->set_mode) {
102 /* Legacy callback doesn't support new modes */
103 if (mode > CLOCK_EVT_MODE_RESUME)
104 return -ENOSYS;
105 dev->set_mode(mode, dev);
106 return 0;
107 }
108
109 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
110 return 0;
111
112 /* Transition with new mode-specific callbacks */
113 switch (mode) {
114 case CLOCK_EVT_MODE_UNUSED:
115 /*
116 * This is an internal state, which is guaranteed to go from
117 * SHUTDOWN to UNUSED. No driver interaction required.
118 */
119 return 0;
120
121 case CLOCK_EVT_MODE_SHUTDOWN:
122 return dev->set_mode_shutdown(dev);
123
124 case CLOCK_EVT_MODE_PERIODIC:
125 /* Core internal bug */
126 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
127 return -ENOSYS;
128 return dev->set_mode_periodic(dev);
129
130 case CLOCK_EVT_MODE_ONESHOT:
131 /* Core internal bug */
132 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
133 return -ENOSYS;
134 return dev->set_mode_oneshot(dev);
135
136 case CLOCK_EVT_MODE_RESUME:
137 /* Optional callback */
138 if (dev->set_mode_resume)
139 return dev->set_mode_resume(dev);
140 else
141 return 0;
142
143 default:
144 return -ENOSYS;
145 }
146 }
147
148 /**
149 * clockevents_set_mode - set the operating mode of a clock event device
150 * @dev: device to modify
151 * @mode: new mode
152 *
153 * Must be called with interrupts disabled !
154 */
155 void clockevents_set_mode(struct clock_event_device *dev,
156 enum clock_event_mode mode)
157 {
158 if (dev->mode != mode) {
159 if (__clockevents_set_mode(dev, mode))
160 return;
161
162 dev->mode = mode;
163
164 /*
165 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
166 * on it, so fix it up and emit a warning:
167 */
168 if (mode == CLOCK_EVT_MODE_ONESHOT) {
169 if (unlikely(!dev->mult)) {
170 dev->mult = 1;
171 WARN_ON(1);
172 }
173 }
174 }
175 }
176
177 /**
178 * clockevents_shutdown - shutdown the device and clear next_event
179 * @dev: device to shutdown
180 */
181 void clockevents_shutdown(struct clock_event_device *dev)
182 {
183 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
184 dev->next_event.tv64 = KTIME_MAX;
185 }
186
187 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
188
189 /* Limit min_delta to a jiffie */
190 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
191
192 /**
193 * clockevents_increase_min_delta - raise minimum delta of a clock event device
194 * @dev: device to increase the minimum delta
195 *
196 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
197 */
198 static int clockevents_increase_min_delta(struct clock_event_device *dev)
199 {
200 /* Nothing to do if we already reached the limit */
201 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
202 printk_deferred(KERN_WARNING
203 "CE: Reprogramming failure. Giving up\n");
204 dev->next_event.tv64 = KTIME_MAX;
205 return -ETIME;
206 }
207
208 if (dev->min_delta_ns < 5000)
209 dev->min_delta_ns = 5000;
210 else
211 dev->min_delta_ns += dev->min_delta_ns >> 1;
212
213 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
214 dev->min_delta_ns = MIN_DELTA_LIMIT;
215
216 printk_deferred(KERN_WARNING
217 "CE: %s increased min_delta_ns to %llu nsec\n",
218 dev->name ? dev->name : "?",
219 (unsigned long long) dev->min_delta_ns);
220 return 0;
221 }
222
223 /**
224 * clockevents_program_min_delta - Set clock event device to the minimum delay.
225 * @dev: device to program
226 *
227 * Returns 0 on success, -ETIME when the retry loop failed.
228 */
229 static int clockevents_program_min_delta(struct clock_event_device *dev)
230 {
231 unsigned long long clc;
232 int64_t delta;
233 int i;
234
235 for (i = 0;;) {
236 delta = dev->min_delta_ns;
237 dev->next_event = ktime_add_ns(ktime_get(), delta);
238
239 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
240 return 0;
241
242 dev->retries++;
243 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
244 if (dev->set_next_event((unsigned long) clc, dev) == 0)
245 return 0;
246
247 if (++i > 2) {
248 /*
249 * We tried 3 times to program the device with the
250 * given min_delta_ns. Try to increase the minimum
251 * delta, if that fails as well get out of here.
252 */
253 if (clockevents_increase_min_delta(dev))
254 return -ETIME;
255 i = 0;
256 }
257 }
258 }
259
260 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
261
262 /**
263 * clockevents_program_min_delta - Set clock event device to the minimum delay.
264 * @dev: device to program
265 *
266 * Returns 0 on success, -ETIME when the retry loop failed.
267 */
268 static int clockevents_program_min_delta(struct clock_event_device *dev)
269 {
270 unsigned long long clc;
271 int64_t delta;
272
273 delta = dev->min_delta_ns;
274 dev->next_event = ktime_add_ns(ktime_get(), delta);
275
276 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
277 return 0;
278
279 dev->retries++;
280 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
281 return dev->set_next_event((unsigned long) clc, dev);
282 }
283
284 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
285
286 /**
287 * clockevents_program_event - Reprogram the clock event device.
288 * @dev: device to program
289 * @expires: absolute expiry time (monotonic clock)
290 * @force: program minimum delay if expires can not be set
291 *
292 * Returns 0 on success, -ETIME when the event is in the past.
293 */
294 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
295 bool force)
296 {
297 unsigned long long clc;
298 int64_t delta;
299 int rc;
300
301 if (unlikely(expires.tv64 < 0)) {
302 WARN_ON_ONCE(1);
303 return -ETIME;
304 }
305
306 dev->next_event = expires;
307
308 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
309 return 0;
310
311 /* Shortcut for clockevent devices that can deal with ktime. */
312 if (dev->features & CLOCK_EVT_FEAT_KTIME)
313 return dev->set_next_ktime(expires, dev);
314
315 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
316 if (delta <= 0)
317 return force ? clockevents_program_min_delta(dev) : -ETIME;
318
319 delta = min(delta, (int64_t) dev->max_delta_ns);
320 delta = max(delta, (int64_t) dev->min_delta_ns);
321
322 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
323 rc = dev->set_next_event((unsigned long) clc, dev);
324
325 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
326 }
327
328 /*
329 * Called after a notify add to make devices available which were
330 * released from the notifier call.
331 */
332 static void clockevents_notify_released(void)
333 {
334 struct clock_event_device *dev;
335
336 while (!list_empty(&clockevents_released)) {
337 dev = list_entry(clockevents_released.next,
338 struct clock_event_device, list);
339 list_del(&dev->list);
340 list_add(&dev->list, &clockevent_devices);
341 tick_check_new_device(dev);
342 }
343 }
344
345 /*
346 * Try to install a replacement clock event device
347 */
348 static int clockevents_replace(struct clock_event_device *ced)
349 {
350 struct clock_event_device *dev, *newdev = NULL;
351
352 list_for_each_entry(dev, &clockevent_devices, list) {
353 if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED)
354 continue;
355
356 if (!tick_check_replacement(newdev, dev))
357 continue;
358
359 if (!try_module_get(dev->owner))
360 continue;
361
362 if (newdev)
363 module_put(newdev->owner);
364 newdev = dev;
365 }
366 if (newdev) {
367 tick_install_replacement(newdev);
368 list_del_init(&ced->list);
369 }
370 return newdev ? 0 : -EBUSY;
371 }
372
373 /*
374 * Called with clockevents_mutex and clockevents_lock held
375 */
376 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
377 {
378 /* Fast track. Device is unused */
379 if (ced->mode == CLOCK_EVT_MODE_UNUSED) {
380 list_del_init(&ced->list);
381 return 0;
382 }
383
384 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
385 }
386
387 /*
388 * SMP function call to unbind a device
389 */
390 static void __clockevents_unbind(void *arg)
391 {
392 struct ce_unbind *cu = arg;
393 int res;
394
395 raw_spin_lock(&clockevents_lock);
396 res = __clockevents_try_unbind(cu->ce, smp_processor_id());
397 if (res == -EAGAIN)
398 res = clockevents_replace(cu->ce);
399 cu->res = res;
400 raw_spin_unlock(&clockevents_lock);
401 }
402
403 /*
404 * Issues smp function call to unbind a per cpu device. Called with
405 * clockevents_mutex held.
406 */
407 static int clockevents_unbind(struct clock_event_device *ced, int cpu)
408 {
409 struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
410
411 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
412 return cu.res;
413 }
414
415 /*
416 * Unbind a clockevents device.
417 */
418 int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
419 {
420 int ret;
421
422 mutex_lock(&clockevents_mutex);
423 ret = clockevents_unbind(ced, cpu);
424 mutex_unlock(&clockevents_mutex);
425 return ret;
426 }
427 EXPORT_SYMBOL_GPL(clockevents_unbind);
428
429 /* Sanity check of mode transition callbacks */
430 static int clockevents_sanity_check(struct clock_event_device *dev)
431 {
432 /* Legacy set_mode() callback */
433 if (dev->set_mode) {
434 /* We shouldn't be supporting new modes now */
435 WARN_ON(dev->set_mode_periodic || dev->set_mode_oneshot ||
436 dev->set_mode_shutdown || dev->set_mode_resume);
437 return 0;
438 }
439
440 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
441 return 0;
442
443 /* New mode-specific callbacks */
444 if (!dev->set_mode_shutdown)
445 return -EINVAL;
446
447 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
448 !dev->set_mode_periodic)
449 return -EINVAL;
450
451 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
452 !dev->set_mode_oneshot)
453 return -EINVAL;
454
455 return 0;
456 }
457
458 /**
459 * clockevents_register_device - register a clock event device
460 * @dev: device to register
461 */
462 void clockevents_register_device(struct clock_event_device *dev)
463 {
464 unsigned long flags;
465
466 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
467 BUG_ON(clockevents_sanity_check(dev));
468
469 if (!dev->cpumask) {
470 WARN_ON(num_possible_cpus() > 1);
471 dev->cpumask = cpumask_of(smp_processor_id());
472 }
473
474 raw_spin_lock_irqsave(&clockevents_lock, flags);
475
476 list_add(&dev->list, &clockevent_devices);
477 tick_check_new_device(dev);
478 clockevents_notify_released();
479
480 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
481 }
482 EXPORT_SYMBOL_GPL(clockevents_register_device);
483
484 void clockevents_config(struct clock_event_device *dev, u32 freq)
485 {
486 u64 sec;
487
488 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
489 return;
490
491 /*
492 * Calculate the maximum number of seconds we can sleep. Limit
493 * to 10 minutes for hardware which can program more than
494 * 32bit ticks so we still get reasonable conversion values.
495 */
496 sec = dev->max_delta_ticks;
497 do_div(sec, freq);
498 if (!sec)
499 sec = 1;
500 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
501 sec = 600;
502
503 clockevents_calc_mult_shift(dev, freq, sec);
504 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
505 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
506 }
507
508 /**
509 * clockevents_config_and_register - Configure and register a clock event device
510 * @dev: device to register
511 * @freq: The clock frequency
512 * @min_delta: The minimum clock ticks to program in oneshot mode
513 * @max_delta: The maximum clock ticks to program in oneshot mode
514 *
515 * min/max_delta can be 0 for devices which do not support oneshot mode.
516 */
517 void clockevents_config_and_register(struct clock_event_device *dev,
518 u32 freq, unsigned long min_delta,
519 unsigned long max_delta)
520 {
521 dev->min_delta_ticks = min_delta;
522 dev->max_delta_ticks = max_delta;
523 clockevents_config(dev, freq);
524 clockevents_register_device(dev);
525 }
526 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
527
528 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
529 {
530 clockevents_config(dev, freq);
531
532 if (dev->mode == CLOCK_EVT_MODE_ONESHOT)
533 return clockevents_program_event(dev, dev->next_event, false);
534
535 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
536 return __clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
537
538 return 0;
539 }
540
541 /**
542 * clockevents_update_freq - Update frequency and reprogram a clock event device.
543 * @dev: device to modify
544 * @freq: new device frequency
545 *
546 * Reconfigure and reprogram a clock event device in oneshot
547 * mode. Must be called on the cpu for which the device delivers per
548 * cpu timer events. If called for the broadcast device the core takes
549 * care of serialization.
550 *
551 * Returns 0 on success, -ETIME when the event is in the past.
552 */
553 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
554 {
555 unsigned long flags;
556 int ret;
557
558 local_irq_save(flags);
559 ret = tick_broadcast_update_freq(dev, freq);
560 if (ret == -ENODEV)
561 ret = __clockevents_update_freq(dev, freq);
562 local_irq_restore(flags);
563 return ret;
564 }
565
566 /*
567 * Noop handler when we shut down an event device
568 */
569 void clockevents_handle_noop(struct clock_event_device *dev)
570 {
571 }
572
573 /**
574 * clockevents_exchange_device - release and request clock devices
575 * @old: device to release (can be NULL)
576 * @new: device to request (can be NULL)
577 *
578 * Called from the notifier chain. clockevents_lock is held already
579 */
580 void clockevents_exchange_device(struct clock_event_device *old,
581 struct clock_event_device *new)
582 {
583 unsigned long flags;
584
585 local_irq_save(flags);
586 /*
587 * Caller releases a clock event device. We queue it into the
588 * released list and do a notify add later.
589 */
590 if (old) {
591 module_put(old->owner);
592 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
593 list_del(&old->list);
594 list_add(&old->list, &clockevents_released);
595 }
596
597 if (new) {
598 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
599 clockevents_shutdown(new);
600 }
601 local_irq_restore(flags);
602 }
603
604 /**
605 * clockevents_suspend - suspend clock devices
606 */
607 void clockevents_suspend(void)
608 {
609 struct clock_event_device *dev;
610
611 list_for_each_entry_reverse(dev, &clockevent_devices, list)
612 if (dev->suspend)
613 dev->suspend(dev);
614 }
615
616 /**
617 * clockevents_resume - resume clock devices
618 */
619 void clockevents_resume(void)
620 {
621 struct clock_event_device *dev;
622
623 list_for_each_entry(dev, &clockevent_devices, list)
624 if (dev->resume)
625 dev->resume(dev);
626 }
627
628 #ifdef CONFIG_GENERIC_CLOCKEVENTS
629 /**
630 * clockevents_notify - notification about relevant events
631 * Returns 0 on success, any other value on error
632 */
633 int clockevents_notify(unsigned long reason, void *arg)
634 {
635 struct clock_event_device *dev, *tmp;
636 unsigned long flags;
637 int cpu, ret = 0;
638
639 raw_spin_lock_irqsave(&clockevents_lock, flags);
640
641 switch (reason) {
642 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
643 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
644 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
645 tick_broadcast_on_off(reason, arg);
646 break;
647
648 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
649 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
650 ret = tick_broadcast_oneshot_control(reason);
651 break;
652
653 case CLOCK_EVT_NOTIFY_CPU_DYING:
654 tick_handover_do_timer(arg);
655 break;
656
657 case CLOCK_EVT_NOTIFY_SUSPEND:
658 tick_suspend();
659 tick_suspend_broadcast();
660 break;
661
662 case CLOCK_EVT_NOTIFY_RESUME:
663 tick_resume();
664 break;
665
666 case CLOCK_EVT_NOTIFY_CPU_DEAD:
667 tick_shutdown_broadcast_oneshot(arg);
668 tick_shutdown_broadcast(arg);
669 tick_shutdown(arg);
670 /*
671 * Unregister the clock event devices which were
672 * released from the users in the notify chain.
673 */
674 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
675 list_del(&dev->list);
676 /*
677 * Now check whether the CPU has left unused per cpu devices
678 */
679 cpu = *((int *)arg);
680 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
681 if (cpumask_test_cpu(cpu, dev->cpumask) &&
682 cpumask_weight(dev->cpumask) == 1 &&
683 !tick_is_broadcast_device(dev)) {
684 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
685 list_del(&dev->list);
686 }
687 }
688 break;
689 default:
690 break;
691 }
692 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
693 return ret;
694 }
695 EXPORT_SYMBOL_GPL(clockevents_notify);
696
697 #ifdef CONFIG_SYSFS
698 struct bus_type clockevents_subsys = {
699 .name = "clockevents",
700 .dev_name = "clockevent",
701 };
702
703 static DEFINE_PER_CPU(struct device, tick_percpu_dev);
704 static struct tick_device *tick_get_tick_dev(struct device *dev);
705
706 static ssize_t sysfs_show_current_tick_dev(struct device *dev,
707 struct device_attribute *attr,
708 char *buf)
709 {
710 struct tick_device *td;
711 ssize_t count = 0;
712
713 raw_spin_lock_irq(&clockevents_lock);
714 td = tick_get_tick_dev(dev);
715 if (td && td->evtdev)
716 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
717 raw_spin_unlock_irq(&clockevents_lock);
718 return count;
719 }
720 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
721
722 /* We don't support the abomination of removable broadcast devices */
723 static ssize_t sysfs_unbind_tick_dev(struct device *dev,
724 struct device_attribute *attr,
725 const char *buf, size_t count)
726 {
727 char name[CS_NAME_LEN];
728 ssize_t ret = sysfs_get_uname(buf, name, count);
729 struct clock_event_device *ce;
730
731 if (ret < 0)
732 return ret;
733
734 ret = -ENODEV;
735 mutex_lock(&clockevents_mutex);
736 raw_spin_lock_irq(&clockevents_lock);
737 list_for_each_entry(ce, &clockevent_devices, list) {
738 if (!strcmp(ce->name, name)) {
739 ret = __clockevents_try_unbind(ce, dev->id);
740 break;
741 }
742 }
743 raw_spin_unlock_irq(&clockevents_lock);
744 /*
745 * We hold clockevents_mutex, so ce can't go away
746 */
747 if (ret == -EAGAIN)
748 ret = clockevents_unbind(ce, dev->id);
749 mutex_unlock(&clockevents_mutex);
750 return ret ? ret : count;
751 }
752 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
753
754 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
755 static struct device tick_bc_dev = {
756 .init_name = "broadcast",
757 .id = 0,
758 .bus = &clockevents_subsys,
759 };
760
761 static struct tick_device *tick_get_tick_dev(struct device *dev)
762 {
763 return dev == &tick_bc_dev ? tick_get_broadcast_device() :
764 &per_cpu(tick_cpu_device, dev->id);
765 }
766
767 static __init int tick_broadcast_init_sysfs(void)
768 {
769 int err = device_register(&tick_bc_dev);
770
771 if (!err)
772 err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
773 return err;
774 }
775 #else
776 static struct tick_device *tick_get_tick_dev(struct device *dev)
777 {
778 return &per_cpu(tick_cpu_device, dev->id);
779 }
780 static inline int tick_broadcast_init_sysfs(void) { return 0; }
781 #endif
782
783 static int __init tick_init_sysfs(void)
784 {
785 int cpu;
786
787 for_each_possible_cpu(cpu) {
788 struct device *dev = &per_cpu(tick_percpu_dev, cpu);
789 int err;
790
791 dev->id = cpu;
792 dev->bus = &clockevents_subsys;
793 err = device_register(dev);
794 if (!err)
795 err = device_create_file(dev, &dev_attr_current_device);
796 if (!err)
797 err = device_create_file(dev, &dev_attr_unbind_device);
798 if (err)
799 return err;
800 }
801 return tick_broadcast_init_sysfs();
802 }
803
804 static int __init clockevents_init_sysfs(void)
805 {
806 int err = subsys_system_register(&clockevents_subsys, NULL);
807
808 if (!err)
809 err = tick_init_sysfs();
810 return err;
811 }
812 device_initcall(clockevents_init_sysfs);
813 #endif /* SYSFS */
814
815 #endif /* GENERIC_CLOCK_EVENTS */