]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/time/tick-broadcast-hrtimer.c
Merge tag 'drm-fixes-for-4.8-rc3' of git://people.freedesktop.org/~airlied/linux
[mirror_ubuntu-artful-kernel.git] / kernel / time / tick-broadcast-hrtimer.c
CommitLineData
5d1638ac
PM
1/*
2 * linux/kernel/time/tick-broadcast-hrtimer.c
3 * This file emulates a local clock event device
4 * via a pseudo clock device.
5 */
6#include <linux/cpu.h>
7#include <linux/err.h>
8#include <linux/hrtimer.h>
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
11#include <linux/profile.h>
12#include <linux/clockchips.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/module.h>
16
17#include "tick-internal.h"
18
19static struct hrtimer bctimer;
20
ecbebcb8 21static int bc_shutdown(struct clock_event_device *evt)
5d1638ac 22{
ecbebcb8
VK
23 /*
24 * Note, we cannot cancel the timer here as we might
25 * run into the following live lock scenario:
26 *
27 * cpu 0 cpu1
28 * lock(broadcast_lock);
29 * hrtimer_interrupt()
30 * bc_handler()
31 * tick_handle_oneshot_broadcast();
32 * lock(broadcast_lock);
33 * hrtimer_cancel()
34 * wait_for_callback()
35 */
36 hrtimer_try_to_cancel(&bctimer);
37 return 0;
5d1638ac
PM
38}
39
40/*
41 * This is called from the guts of the broadcast code when the cpu
42 * which is about to enter idle has the earliest broadcast timer event.
43 */
44static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
45{
a127d2bc 46 int bc_moved;
5d1638ac
PM
47 /*
48 * We try to cancel the timer first. If the callback is on
49 * flight on some other cpu then we let it handle it. If we
50 * were able to cancel the timer nothing can rearm it as we
51 * own broadcast_lock.
52 *
53 * However we can also be called from the event handler of
54 * ce_broadcast_hrtimer itself when it expires. We cannot
55 * restart the timer because we are in the callback, but we
56 * can set the expiry time and let the callback return
57 * HRTIMER_RESTART.
a127d2bc
PM
58 *
59 * Since we are in the idle loop at this point and because
60 * hrtimer_{start/cancel} functions call into tracing,
61 * calls to these functions must be bound within RCU_NONIDLE.
5d1638ac 62 */
b8a62f1f
TG
63 RCU_NONIDLE({
64 bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
65 if (bc_moved)
66 hrtimer_start(&bctimer, expires,
67 HRTIMER_MODE_ABS_PINNED);});
a127d2bc 68 if (bc_moved) {
5d1638ac
PM
69 /* Bind the "device" to the cpu */
70 bc->bound_on = smp_processor_id();
71 } else if (bc->bound_on == smp_processor_id()) {
72 hrtimer_set_expires(&bctimer, expires);
73 }
74 return 0;
75}
76
77static struct clock_event_device ce_broadcast_hrtimer = {
51302137 78 .name = "bc_hrtimer",
ecbebcb8 79 .set_state_shutdown = bc_shutdown,
5d1638ac
PM
80 .set_next_ktime = bc_set_next,
81 .features = CLOCK_EVT_FEAT_ONESHOT |
82 CLOCK_EVT_FEAT_KTIME |
83 CLOCK_EVT_FEAT_HRTIMER,
84 .rating = 0,
85 .bound_on = -1,
86 .min_delta_ns = 1,
87 .max_delta_ns = KTIME_MAX,
88 .min_delta_ticks = 1,
849401b6 89 .max_delta_ticks = ULONG_MAX,
5d1638ac
PM
90 .mult = 1,
91 .shift = 0,
92 .cpumask = cpu_all_mask,
93};
94
95static enum hrtimer_restart bc_handler(struct hrtimer *t)
96{
97 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
98
ecbebcb8 99 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
38d23a6c
AS
100 if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
101 return HRTIMER_RESTART;
ecbebcb8
VK
102
103 return HRTIMER_NORESTART;
5d1638ac
PM
104}
105
106void tick_setup_hrtimer_broadcast(void)
107{
108 hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
109 bctimer.function = bc_handler;
110 clockevents_register_device(&ce_broadcast_hrtimer);
111}