]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/time/clockevents.c
net: initialize skb->peeked when cloning
[mirror_ubuntu-bionic-kernel.git] / kernel / time / clockevents.c
1 /*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14 #include <linux/clockchips.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/smp.h>
19 #include <linux/device.h>
20
21 #include "tick-internal.h"
22
23 /* The registered clock event devices */
24 static LIST_HEAD(clockevent_devices);
25 static LIST_HEAD(clockevents_released);
26 /* Protection for the above */
27 static DEFINE_RAW_SPINLOCK(clockevents_lock);
28 /* Protection for unbind operations */
29 static DEFINE_MUTEX(clockevents_mutex);
30
31 struct ce_unbind {
32 struct clock_event_device *ce;
33 int res;
34 };
35
36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 bool ismax)
38 {
39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
41
42 if (unlikely(!evt->mult)) {
43 evt->mult = 1;
44 WARN_ON(1);
45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1ULL << evt->shift)))
76 clc += rnd;
77
78 do_div(clc, evt->mult);
79
80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82 }
83
84 /**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92 {
93 return cev_delta2ns(latch, evt, false);
94 }
95 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
96
97 static int __clockevents_switch_state(struct clock_event_device *dev,
98 enum clock_event_state state)
99 {
100 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
101 return 0;
102
103 /* Transition with new state-specific callbacks */
104 switch (state) {
105 case CLOCK_EVT_STATE_DETACHED:
106 /* The clockevent device is getting replaced. Shut it down. */
107
108 case CLOCK_EVT_STATE_SHUTDOWN:
109 if (dev->set_state_shutdown)
110 return dev->set_state_shutdown(dev);
111 return 0;
112
113 case CLOCK_EVT_STATE_PERIODIC:
114 /* Core internal bug */
115 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
116 return -ENOSYS;
117 if (dev->set_state_periodic)
118 return dev->set_state_periodic(dev);
119 return 0;
120
121 case CLOCK_EVT_STATE_ONESHOT:
122 /* Core internal bug */
123 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
124 return -ENOSYS;
125 if (dev->set_state_oneshot)
126 return dev->set_state_oneshot(dev);
127 return 0;
128
129 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
130 /* Core internal bug */
131 if (WARN_ONCE(!clockevent_state_oneshot(dev),
132 "Current state: %d\n",
133 clockevent_get_state(dev)))
134 return -EINVAL;
135
136 if (dev->set_state_oneshot_stopped)
137 return dev->set_state_oneshot_stopped(dev);
138 else
139 return -ENOSYS;
140
141 default:
142 return -ENOSYS;
143 }
144 }
145
146 /**
147 * clockevents_switch_state - set the operating state of a clock event device
148 * @dev: device to modify
149 * @state: new state
150 *
151 * Must be called with interrupts disabled !
152 */
153 void clockevents_switch_state(struct clock_event_device *dev,
154 enum clock_event_state state)
155 {
156 if (clockevent_get_state(dev) != state) {
157 if (__clockevents_switch_state(dev, state))
158 return;
159
160 clockevent_set_state(dev, state);
161
162 /*
163 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
164 * on it, so fix it up and emit a warning:
165 */
166 if (clockevent_state_oneshot(dev)) {
167 if (unlikely(!dev->mult)) {
168 dev->mult = 1;
169 WARN_ON(1);
170 }
171 }
172 }
173 }
174
175 /**
176 * clockevents_shutdown - shutdown the device and clear next_event
177 * @dev: device to shutdown
178 */
179 void clockevents_shutdown(struct clock_event_device *dev)
180 {
181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
182 dev->next_event = KTIME_MAX;
183 }
184
185 /**
186 * clockevents_tick_resume - Resume the tick device before using it again
187 * @dev: device to resume
188 */
189 int clockevents_tick_resume(struct clock_event_device *dev)
190 {
191 int ret = 0;
192
193 if (dev->tick_resume)
194 ret = dev->tick_resume(dev);
195
196 return ret;
197 }
198
199 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
200
201 /* Limit min_delta to a jiffie */
202 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
203
204 /**
205 * clockevents_increase_min_delta - raise minimum delta of a clock event device
206 * @dev: device to increase the minimum delta
207 *
208 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
209 */
210 static int clockevents_increase_min_delta(struct clock_event_device *dev)
211 {
212 /* Nothing to do if we already reached the limit */
213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
214 printk_deferred(KERN_WARNING
215 "CE: Reprogramming failure. Giving up\n");
216 dev->next_event = KTIME_MAX;
217 return -ETIME;
218 }
219
220 if (dev->min_delta_ns < 5000)
221 dev->min_delta_ns = 5000;
222 else
223 dev->min_delta_ns += dev->min_delta_ns >> 1;
224
225 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
226 dev->min_delta_ns = MIN_DELTA_LIMIT;
227
228 printk_deferred(KERN_WARNING
229 "CE: %s increased min_delta_ns to %llu nsec\n",
230 dev->name ? dev->name : "?",
231 (unsigned long long) dev->min_delta_ns);
232 return 0;
233 }
234
235 /**
236 * clockevents_program_min_delta - Set clock event device to the minimum delay.
237 * @dev: device to program
238 *
239 * Returns 0 on success, -ETIME when the retry loop failed.
240 */
241 static int clockevents_program_min_delta(struct clock_event_device *dev)
242 {
243 unsigned long long clc;
244 int64_t delta;
245 int i;
246
247 for (i = 0;;) {
248 delta = dev->min_delta_ns;
249 dev->next_event = ktime_add_ns(ktime_get(), delta);
250
251 if (clockevent_state_shutdown(dev))
252 return 0;
253
254 dev->retries++;
255 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
256 if (dev->set_next_event((unsigned long) clc, dev) == 0)
257 return 0;
258
259 if (++i > 2) {
260 /*
261 * We tried 3 times to program the device with the
262 * given min_delta_ns. Try to increase the minimum
263 * delta, if that fails as well get out of here.
264 */
265 if (clockevents_increase_min_delta(dev))
266 return -ETIME;
267 i = 0;
268 }
269 }
270 }
271
272 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
273
274 /**
275 * clockevents_program_min_delta - Set clock event device to the minimum delay.
276 * @dev: device to program
277 *
278 * Returns 0 on success, -ETIME when the retry loop failed.
279 */
280 static int clockevents_program_min_delta(struct clock_event_device *dev)
281 {
282 unsigned long long clc;
283 int64_t delta = 0;
284 int i;
285
286 for (i = 0; i < 10; i++) {
287 delta += dev->min_delta_ns;
288 dev->next_event = ktime_add_ns(ktime_get(), delta);
289
290 if (clockevent_state_shutdown(dev))
291 return 0;
292
293 dev->retries++;
294 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
295 if (dev->set_next_event((unsigned long) clc, dev) == 0)
296 return 0;
297 }
298 return -ETIME;
299 }
300
301 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
302
303 /**
304 * clockevents_program_event - Reprogram the clock event device.
305 * @dev: device to program
306 * @expires: absolute expiry time (monotonic clock)
307 * @force: program minimum delay if expires can not be set
308 *
309 * Returns 0 on success, -ETIME when the event is in the past.
310 */
311 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
312 bool force)
313 {
314 unsigned long long clc;
315 int64_t delta;
316 int rc;
317
318 if (unlikely(expires < 0)) {
319 WARN_ON_ONCE(1);
320 return -ETIME;
321 }
322
323 dev->next_event = expires;
324
325 if (clockevent_state_shutdown(dev))
326 return 0;
327
328 /* We must be in ONESHOT state here */
329 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
330 clockevent_get_state(dev));
331
332 /* Shortcut for clockevent devices that can deal with ktime. */
333 if (dev->features & CLOCK_EVT_FEAT_KTIME)
334 return dev->set_next_ktime(expires, dev);
335
336 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
337 if (delta <= 0)
338 return force ? clockevents_program_min_delta(dev) : -ETIME;
339
340 delta = min(delta, (int64_t) dev->max_delta_ns);
341 delta = max(delta, (int64_t) dev->min_delta_ns);
342
343 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
344 rc = dev->set_next_event((unsigned long) clc, dev);
345
346 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
347 }
348
349 /*
350 * Called after a notify add to make devices available which were
351 * released from the notifier call.
352 */
353 static void clockevents_notify_released(void)
354 {
355 struct clock_event_device *dev;
356
357 while (!list_empty(&clockevents_released)) {
358 dev = list_entry(clockevents_released.next,
359 struct clock_event_device, list);
360 list_del(&dev->list);
361 list_add(&dev->list, &clockevent_devices);
362 tick_check_new_device(dev);
363 }
364 }
365
366 /*
367 * Try to install a replacement clock event device
368 */
369 static int clockevents_replace(struct clock_event_device *ced)
370 {
371 struct clock_event_device *dev, *newdev = NULL;
372
373 list_for_each_entry(dev, &clockevent_devices, list) {
374 if (dev == ced || !clockevent_state_detached(dev))
375 continue;
376
377 if (!tick_check_replacement(newdev, dev))
378 continue;
379
380 if (!try_module_get(dev->owner))
381 continue;
382
383 if (newdev)
384 module_put(newdev->owner);
385 newdev = dev;
386 }
387 if (newdev) {
388 tick_install_replacement(newdev);
389 list_del_init(&ced->list);
390 }
391 return newdev ? 0 : -EBUSY;
392 }
393
394 /*
395 * Called with clockevents_mutex and clockevents_lock held
396 */
397 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
398 {
399 /* Fast track. Device is unused */
400 if (clockevent_state_detached(ced)) {
401 list_del_init(&ced->list);
402 return 0;
403 }
404
405 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
406 }
407
408 /*
409 * SMP function call to unbind a device
410 */
411 static void __clockevents_unbind(void *arg)
412 {
413 struct ce_unbind *cu = arg;
414 int res;
415
416 raw_spin_lock(&clockevents_lock);
417 res = __clockevents_try_unbind(cu->ce, smp_processor_id());
418 if (res == -EAGAIN)
419 res = clockevents_replace(cu->ce);
420 cu->res = res;
421 raw_spin_unlock(&clockevents_lock);
422 }
423
424 /*
425 * Issues smp function call to unbind a per cpu device. Called with
426 * clockevents_mutex held.
427 */
428 static int clockevents_unbind(struct clock_event_device *ced, int cpu)
429 {
430 struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
431
432 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
433 return cu.res;
434 }
435
436 /*
437 * Unbind a clockevents device.
438 */
439 int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
440 {
441 int ret;
442
443 mutex_lock(&clockevents_mutex);
444 ret = clockevents_unbind(ced, cpu);
445 mutex_unlock(&clockevents_mutex);
446 return ret;
447 }
448 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
449
450 /**
451 * clockevents_register_device - register a clock event device
452 * @dev: device to register
453 */
454 void clockevents_register_device(struct clock_event_device *dev)
455 {
456 unsigned long flags;
457
458 /* Initialize state to DETACHED */
459 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
460
461 if (!dev->cpumask) {
462 WARN_ON(num_possible_cpus() > 1);
463 dev->cpumask = cpumask_of(smp_processor_id());
464 }
465
466 raw_spin_lock_irqsave(&clockevents_lock, flags);
467
468 list_add(&dev->list, &clockevent_devices);
469 tick_check_new_device(dev);
470 clockevents_notify_released();
471
472 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
473 }
474 EXPORT_SYMBOL_GPL(clockevents_register_device);
475
476 static void clockevents_config(struct clock_event_device *dev, u32 freq)
477 {
478 u64 sec;
479
480 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
481 return;
482
483 /*
484 * Calculate the maximum number of seconds we can sleep. Limit
485 * to 10 minutes for hardware which can program more than
486 * 32bit ticks so we still get reasonable conversion values.
487 */
488 sec = dev->max_delta_ticks;
489 do_div(sec, freq);
490 if (!sec)
491 sec = 1;
492 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
493 sec = 600;
494
495 clockevents_calc_mult_shift(dev, freq, sec);
496 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
497 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
498 }
499
500 /**
501 * clockevents_config_and_register - Configure and register a clock event device
502 * @dev: device to register
503 * @freq: The clock frequency
504 * @min_delta: The minimum clock ticks to program in oneshot mode
505 * @max_delta: The maximum clock ticks to program in oneshot mode
506 *
507 * min/max_delta can be 0 for devices which do not support oneshot mode.
508 */
509 void clockevents_config_and_register(struct clock_event_device *dev,
510 u32 freq, unsigned long min_delta,
511 unsigned long max_delta)
512 {
513 dev->min_delta_ticks = min_delta;
514 dev->max_delta_ticks = max_delta;
515 clockevents_config(dev, freq);
516 clockevents_register_device(dev);
517 }
518 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
519
520 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
521 {
522 clockevents_config(dev, freq);
523
524 if (clockevent_state_oneshot(dev))
525 return clockevents_program_event(dev, dev->next_event, false);
526
527 if (clockevent_state_periodic(dev))
528 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
529
530 return 0;
531 }
532
533 /**
534 * clockevents_update_freq - Update frequency and reprogram a clock event device.
535 * @dev: device to modify
536 * @freq: new device frequency
537 *
538 * Reconfigure and reprogram a clock event device in oneshot
539 * mode. Must be called on the cpu for which the device delivers per
540 * cpu timer events. If called for the broadcast device the core takes
541 * care of serialization.
542 *
543 * Returns 0 on success, -ETIME when the event is in the past.
544 */
545 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
546 {
547 unsigned long flags;
548 int ret;
549
550 local_irq_save(flags);
551 ret = tick_broadcast_update_freq(dev, freq);
552 if (ret == -ENODEV)
553 ret = __clockevents_update_freq(dev, freq);
554 local_irq_restore(flags);
555 return ret;
556 }
557
558 /*
559 * Noop handler when we shut down an event device
560 */
561 void clockevents_handle_noop(struct clock_event_device *dev)
562 {
563 }
564
565 /**
566 * clockevents_exchange_device - release and request clock devices
567 * @old: device to release (can be NULL)
568 * @new: device to request (can be NULL)
569 *
570 * Called from various tick functions with clockevents_lock held and
571 * interrupts disabled.
572 */
573 void clockevents_exchange_device(struct clock_event_device *old,
574 struct clock_event_device *new)
575 {
576 /*
577 * Caller releases a clock event device. We queue it into the
578 * released list and do a notify add later.
579 */
580 if (old) {
581 module_put(old->owner);
582 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
583 list_del(&old->list);
584 list_add(&old->list, &clockevents_released);
585 }
586
587 if (new) {
588 BUG_ON(!clockevent_state_detached(new));
589 clockevents_shutdown(new);
590 }
591 }
592
593 /**
594 * clockevents_suspend - suspend clock devices
595 */
596 void clockevents_suspend(void)
597 {
598 struct clock_event_device *dev;
599
600 list_for_each_entry_reverse(dev, &clockevent_devices, list)
601 if (dev->suspend && !clockevent_state_detached(dev))
602 dev->suspend(dev);
603 }
604
605 /**
606 * clockevents_resume - resume clock devices
607 */
608 void clockevents_resume(void)
609 {
610 struct clock_event_device *dev;
611
612 list_for_each_entry(dev, &clockevent_devices, list)
613 if (dev->resume && !clockevent_state_detached(dev))
614 dev->resume(dev);
615 }
616
617 #ifdef CONFIG_HOTPLUG_CPU
618 /**
619 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
620 */
621 void tick_cleanup_dead_cpu(int cpu)
622 {
623 struct clock_event_device *dev, *tmp;
624 unsigned long flags;
625
626 raw_spin_lock_irqsave(&clockevents_lock, flags);
627
628 tick_shutdown_broadcast_oneshot(cpu);
629 tick_shutdown_broadcast(cpu);
630 tick_shutdown(cpu);
631 /*
632 * Unregister the clock event devices which were
633 * released from the users in the notify chain.
634 */
635 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
636 list_del(&dev->list);
637 /*
638 * Now check whether the CPU has left unused per cpu devices
639 */
640 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
641 if (cpumask_test_cpu(cpu, dev->cpumask) &&
642 cpumask_weight(dev->cpumask) == 1 &&
643 !tick_is_broadcast_device(dev)) {
644 BUG_ON(!clockevent_state_detached(dev));
645 list_del(&dev->list);
646 }
647 }
648 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
649 }
650 #endif
651
652 #ifdef CONFIG_SYSFS
653 static struct bus_type clockevents_subsys = {
654 .name = "clockevents",
655 .dev_name = "clockevent",
656 };
657
658 static DEFINE_PER_CPU(struct device, tick_percpu_dev);
659 static struct tick_device *tick_get_tick_dev(struct device *dev);
660
661 static ssize_t sysfs_show_current_tick_dev(struct device *dev,
662 struct device_attribute *attr,
663 char *buf)
664 {
665 struct tick_device *td;
666 ssize_t count = 0;
667
668 raw_spin_lock_irq(&clockevents_lock);
669 td = tick_get_tick_dev(dev);
670 if (td && td->evtdev)
671 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
672 raw_spin_unlock_irq(&clockevents_lock);
673 return count;
674 }
675 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
676
677 /* We don't support the abomination of removable broadcast devices */
678 static ssize_t sysfs_unbind_tick_dev(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t count)
681 {
682 char name[CS_NAME_LEN];
683 ssize_t ret = sysfs_get_uname(buf, name, count);
684 struct clock_event_device *ce;
685
686 if (ret < 0)
687 return ret;
688
689 ret = -ENODEV;
690 mutex_lock(&clockevents_mutex);
691 raw_spin_lock_irq(&clockevents_lock);
692 list_for_each_entry(ce, &clockevent_devices, list) {
693 if (!strcmp(ce->name, name)) {
694 ret = __clockevents_try_unbind(ce, dev->id);
695 break;
696 }
697 }
698 raw_spin_unlock_irq(&clockevents_lock);
699 /*
700 * We hold clockevents_mutex, so ce can't go away
701 */
702 if (ret == -EAGAIN)
703 ret = clockevents_unbind(ce, dev->id);
704 mutex_unlock(&clockevents_mutex);
705 return ret ? ret : count;
706 }
707 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
708
709 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
710 static struct device tick_bc_dev = {
711 .init_name = "broadcast",
712 .id = 0,
713 .bus = &clockevents_subsys,
714 };
715
716 static struct tick_device *tick_get_tick_dev(struct device *dev)
717 {
718 return dev == &tick_bc_dev ? tick_get_broadcast_device() :
719 &per_cpu(tick_cpu_device, dev->id);
720 }
721
722 static __init int tick_broadcast_init_sysfs(void)
723 {
724 int err = device_register(&tick_bc_dev);
725
726 if (!err)
727 err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
728 return err;
729 }
730 #else
731 static struct tick_device *tick_get_tick_dev(struct device *dev)
732 {
733 return &per_cpu(tick_cpu_device, dev->id);
734 }
735 static inline int tick_broadcast_init_sysfs(void) { return 0; }
736 #endif
737
738 static int __init tick_init_sysfs(void)
739 {
740 int cpu;
741
742 for_each_possible_cpu(cpu) {
743 struct device *dev = &per_cpu(tick_percpu_dev, cpu);
744 int err;
745
746 dev->id = cpu;
747 dev->bus = &clockevents_subsys;
748 err = device_register(dev);
749 if (!err)
750 err = device_create_file(dev, &dev_attr_current_device);
751 if (!err)
752 err = device_create_file(dev, &dev_attr_unbind_device);
753 if (err)
754 return err;
755 }
756 return tick_broadcast_init_sysfs();
757 }
758
759 static int __init clockevents_init_sysfs(void)
760 {
761 int err = subsys_system_register(&clockevents_subsys, NULL);
762
763 if (!err)
764 err = tick_init_sysfs();
765 return err;
766 }
767 device_initcall(clockevents_init_sysfs);
768 #endif /* SYSFS */