]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/time/clockevents.c
clockevents: Add module refcount
[mirror_ubuntu-jammy-kernel.git] / kernel / time / clockevents.c
CommitLineData
d316c57f
TG
1/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
d316c57f 18#include <linux/smp.h>
d316c57f 19
8e1a928a
HS
20#include "tick-internal.h"
21
d316c57f
TG
22/* The registered clock event devices */
23static LIST_HEAD(clockevent_devices);
24static LIST_HEAD(clockevents_released);
d316c57f 25/* Protection for the above */
b5f91da0 26static DEFINE_RAW_SPINLOCK(clockevents_lock);
d316c57f
TG
27
28/**
29 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
30 * @latch: value to convert
31 * @evt: pointer to clock event device descriptor
32 *
33 * Math helper, returns latch value converted to nanoseconds (bound checked)
34 */
97813f2f 35u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
d316c57f 36{
97813f2f 37 u64 clc = (u64) latch << evt->shift;
d316c57f 38
45fe4fe1
IM
39 if (unlikely(!evt->mult)) {
40 evt->mult = 1;
41 WARN_ON(1);
42 }
43
d316c57f
TG
44 do_div(clc, evt->mult);
45 if (clc < 1000)
46 clc = 1000;
97813f2f
JH
47 if (clc > KTIME_MAX)
48 clc = KTIME_MAX;
d316c57f 49
97813f2f 50 return clc;
d316c57f 51}
c81fc2c3 52EXPORT_SYMBOL_GPL(clockevent_delta2ns);
d316c57f
TG
53
54/**
55 * clockevents_set_mode - set the operating mode of a clock event device
56 * @dev: device to modify
57 * @mode: new mode
58 *
59 * Must be called with interrupts disabled !
60 */
61void clockevents_set_mode(struct clock_event_device *dev,
62 enum clock_event_mode mode)
63{
64 if (dev->mode != mode) {
65 dev->set_mode(mode, dev);
66 dev->mode = mode;
2d68259d
MD
67
68 /*
69 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
70 * on it, so fix it up and emit a warning:
71 */
72 if (mode == CLOCK_EVT_MODE_ONESHOT) {
73 if (unlikely(!dev->mult)) {
74 dev->mult = 1;
75 WARN_ON(1);
76 }
77 }
d316c57f
TG
78 }
79}
80
2344abbc
TG
81/**
82 * clockevents_shutdown - shutdown the device and clear next_event
83 * @dev: device to shutdown
84 */
85void clockevents_shutdown(struct clock_event_device *dev)
86{
87 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
88 dev->next_event.tv64 = KTIME_MAX;
89}
90
d1748302
MS
91#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
92
93/* Limit min_delta to a jiffie */
94#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
95
96/**
97 * clockevents_increase_min_delta - raise minimum delta of a clock event device
98 * @dev: device to increase the minimum delta
99 *
100 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
101 */
102static int clockevents_increase_min_delta(struct clock_event_device *dev)
103{
104 /* Nothing to do if we already reached the limit */
105 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
106 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
107 dev->next_event.tv64 = KTIME_MAX;
108 return -ETIME;
109 }
110
111 if (dev->min_delta_ns < 5000)
112 dev->min_delta_ns = 5000;
113 else
114 dev->min_delta_ns += dev->min_delta_ns >> 1;
115
116 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
117 dev->min_delta_ns = MIN_DELTA_LIMIT;
118
119 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
120 dev->name ? dev->name : "?",
121 (unsigned long long) dev->min_delta_ns);
122 return 0;
123}
124
125/**
126 * clockevents_program_min_delta - Set clock event device to the minimum delay.
127 * @dev: device to program
128 *
129 * Returns 0 on success, -ETIME when the retry loop failed.
130 */
131static int clockevents_program_min_delta(struct clock_event_device *dev)
132{
133 unsigned long long clc;
134 int64_t delta;
135 int i;
136
137 for (i = 0;;) {
138 delta = dev->min_delta_ns;
139 dev->next_event = ktime_add_ns(ktime_get(), delta);
140
141 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
142 return 0;
143
144 dev->retries++;
145 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
146 if (dev->set_next_event((unsigned long) clc, dev) == 0)
147 return 0;
148
149 if (++i > 2) {
150 /*
151 * We tried 3 times to program the device with the
152 * given min_delta_ns. Try to increase the minimum
153 * delta, if that fails as well get out of here.
154 */
155 if (clockevents_increase_min_delta(dev))
156 return -ETIME;
157 i = 0;
158 }
159 }
160}
161
162#else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
163
164/**
165 * clockevents_program_min_delta - Set clock event device to the minimum delay.
166 * @dev: device to program
167 *
168 * Returns 0 on success, -ETIME when the retry loop failed.
169 */
170static int clockevents_program_min_delta(struct clock_event_device *dev)
171{
172 unsigned long long clc;
173 int64_t delta;
174
175 delta = dev->min_delta_ns;
176 dev->next_event = ktime_add_ns(ktime_get(), delta);
177
178 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
179 return 0;
180
181 dev->retries++;
182 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
183 return dev->set_next_event((unsigned long) clc, dev);
184}
185
186#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
187
d316c57f
TG
188/**
189 * clockevents_program_event - Reprogram the clock event device.
d1748302 190 * @dev: device to program
d316c57f 191 * @expires: absolute expiry time (monotonic clock)
d1748302 192 * @force: program minimum delay if expires can not be set
d316c57f
TG
193 *
194 * Returns 0 on success, -ETIME when the event is in the past.
195 */
196int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
d1748302 197 bool force)
d316c57f
TG
198{
199 unsigned long long clc;
200 int64_t delta;
d1748302 201 int rc;
d316c57f 202
167b1de3
TG
203 if (unlikely(expires.tv64 < 0)) {
204 WARN_ON_ONCE(1);
205 return -ETIME;
206 }
207
d316c57f
TG
208 dev->next_event = expires;
209
210 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
211 return 0;
212
65516f8a
MS
213 /* Shortcut for clockevent devices that can deal with ktime. */
214 if (dev->features & CLOCK_EVT_FEAT_KTIME)
215 return dev->set_next_ktime(expires, dev);
216
d1748302
MS
217 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
218 if (delta <= 0)
219 return force ? clockevents_program_min_delta(dev) : -ETIME;
d316c57f 220
d1748302
MS
221 delta = min(delta, (int64_t) dev->max_delta_ns);
222 delta = max(delta, (int64_t) dev->min_delta_ns);
d316c57f 223
d1748302
MS
224 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
225 rc = dev->set_next_event((unsigned long) clc, dev);
226
227 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
d316c57f
TG
228}
229
d316c57f 230/*
3eb05676 231 * Called after a notify add to make devices available which were
d316c57f
TG
232 * released from the notifier call.
233 */
234static void clockevents_notify_released(void)
235{
236 struct clock_event_device *dev;
237
238 while (!list_empty(&clockevents_released)) {
239 dev = list_entry(clockevents_released.next,
240 struct clock_event_device, list);
241 list_del(&dev->list);
242 list_add(&dev->list, &clockevent_devices);
7172a286 243 tick_check_new_device(dev);
d316c57f
TG
244 }
245}
246
247/**
248 * clockevents_register_device - register a clock event device
249 * @dev: device to register
250 */
251void clockevents_register_device(struct clock_event_device *dev)
252{
f833bab8
SS
253 unsigned long flags;
254
d316c57f 255 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
1b054b67
TG
256 if (!dev->cpumask) {
257 WARN_ON(num_possible_cpus() > 1);
258 dev->cpumask = cpumask_of(smp_processor_id());
259 }
320ab2b0 260
b5f91da0 261 raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57f
TG
262
263 list_add(&dev->list, &clockevent_devices);
7172a286 264 tick_check_new_device(dev);
d316c57f
TG
265 clockevents_notify_released();
266
b5f91da0 267 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57f 268}
c81fc2c3 269EXPORT_SYMBOL_GPL(clockevents_register_device);
d316c57f 270
e5400321 271void clockevents_config(struct clock_event_device *dev, u32 freq)
57f0fcbe 272{
c0e299b1 273 u64 sec;
57f0fcbe
TG
274
275 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
276 return;
277
278 /*
279 * Calculate the maximum number of seconds we can sleep. Limit
280 * to 10 minutes for hardware which can program more than
281 * 32bit ticks so we still get reasonable conversion values.
282 */
283 sec = dev->max_delta_ticks;
284 do_div(sec, freq);
285 if (!sec)
286 sec = 1;
287 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
288 sec = 600;
289
290 clockevents_calc_mult_shift(dev, freq, sec);
291 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
292 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
293}
294
295/**
296 * clockevents_config_and_register - Configure and register a clock event device
297 * @dev: device to register
298 * @freq: The clock frequency
299 * @min_delta: The minimum clock ticks to program in oneshot mode
300 * @max_delta: The maximum clock ticks to program in oneshot mode
301 *
302 * min/max_delta can be 0 for devices which do not support oneshot mode.
303 */
304void clockevents_config_and_register(struct clock_event_device *dev,
305 u32 freq, unsigned long min_delta,
306 unsigned long max_delta)
307{
308 dev->min_delta_ticks = min_delta;
309 dev->max_delta_ticks = max_delta;
310 clockevents_config(dev, freq);
311 clockevents_register_device(dev);
312}
c35ef95c 313EXPORT_SYMBOL_GPL(clockevents_config_and_register);
57f0fcbe 314
80b816b7
TG
315/**
316 * clockevents_update_freq - Update frequency and reprogram a clock event device.
317 * @dev: device to modify
318 * @freq: new device frequency
319 *
320 * Reconfigure and reprogram a clock event device in oneshot
321 * mode. Must be called on the cpu for which the device delivers per
322 * cpu timer events with interrupts disabled! Returns 0 on success,
323 * -ETIME when the event is in the past.
324 */
325int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
326{
327 clockevents_config(dev, freq);
328
329 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
330 return 0;
331
d1748302 332 return clockevents_program_event(dev, dev->next_event, false);
80b816b7
TG
333}
334
d316c57f
TG
335/*
336 * Noop handler when we shut down an event device
337 */
7c1e7689 338void clockevents_handle_noop(struct clock_event_device *dev)
d316c57f
TG
339{
340}
341
342/**
343 * clockevents_exchange_device - release and request clock devices
344 * @old: device to release (can be NULL)
345 * @new: device to request (can be NULL)
346 *
347 * Called from the notifier chain. clockevents_lock is held already
348 */
349void clockevents_exchange_device(struct clock_event_device *old,
350 struct clock_event_device *new)
351{
352 unsigned long flags;
353
354 local_irq_save(flags);
355 /*
356 * Caller releases a clock event device. We queue it into the
357 * released list and do a notify add later.
358 */
359 if (old) {
ccf33d68 360 module_put(old->owner);
d316c57f
TG
361 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
362 list_del(&old->list);
363 list_add(&old->list, &clockevents_released);
364 }
365
366 if (new) {
367 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
2344abbc 368 clockevents_shutdown(new);
d316c57f
TG
369 }
370 local_irq_restore(flags);
371}
372
adc78e6b
RW
373/**
374 * clockevents_suspend - suspend clock devices
375 */
376void clockevents_suspend(void)
377{
378 struct clock_event_device *dev;
379
380 list_for_each_entry_reverse(dev, &clockevent_devices, list)
381 if (dev->suspend)
382 dev->suspend(dev);
383}
384
385/**
386 * clockevents_resume - resume clock devices
387 */
388void clockevents_resume(void)
389{
390 struct clock_event_device *dev;
391
392 list_for_each_entry(dev, &clockevent_devices, list)
393 if (dev->resume)
394 dev->resume(dev);
395}
396
de68d9b1 397#ifdef CONFIG_GENERIC_CLOCKEVENTS
d316c57f
TG
398/**
399 * clockevents_notify - notification about relevant events
400 */
401void clockevents_notify(unsigned long reason, void *arg)
402{
bb6eddf7 403 struct clock_event_device *dev, *tmp;
f833bab8 404 unsigned long flags;
bb6eddf7 405 int cpu;
0b858e6f 406
b5f91da0 407 raw_spin_lock_irqsave(&clockevents_lock, flags);
d316c57f
TG
408
409 switch (reason) {
8c53daf6
TG
410 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
411 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
412 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
413 tick_broadcast_on_off(reason, arg);
414 break;
415
416 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
417 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
418 tick_broadcast_oneshot_control(reason);
419 break;
420
421 case CLOCK_EVT_NOTIFY_CPU_DYING:
422 tick_handover_do_timer(arg);
423 break;
424
425 case CLOCK_EVT_NOTIFY_SUSPEND:
426 tick_suspend();
427 tick_suspend_broadcast();
428 break;
429
430 case CLOCK_EVT_NOTIFY_RESUME:
431 tick_resume();
432 break;
433
d316c57f 434 case CLOCK_EVT_NOTIFY_CPU_DEAD:
8c53daf6
TG
435 tick_shutdown_broadcast_oneshot(arg);
436 tick_shutdown_broadcast(arg);
437 tick_shutdown(arg);
d316c57f
TG
438 /*
439 * Unregister the clock event devices which were
440 * released from the users in the notify chain.
441 */
bb6eddf7
TG
442 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
443 list_del(&dev->list);
444 /*
445 * Now check whether the CPU has left unused per cpu devices
446 */
447 cpu = *((int *)arg);
448 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
449 if (cpumask_test_cpu(cpu, dev->cpumask) &&
ea9d8e3f
XF
450 cpumask_weight(dev->cpumask) == 1 &&
451 !tick_is_broadcast_device(dev)) {
bb6eddf7
TG
452 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
453 list_del(&dev->list);
454 }
455 }
d316c57f
TG
456 break;
457 default:
458 break;
459 }
b5f91da0 460 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
d316c57f
TG
461}
462EXPORT_SYMBOL_GPL(clockevents_notify);
de68d9b1 463#endif