1 /* $Id: timer-r0drv-linux.c $ */
3 * IPRT - Timers, Ring-0 Driver, Linux.
7 * Copyright (C) 2006-2017 Oracle Corporation
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
28 /*********************************************************************************************************************************
30 *********************************************************************************************************************************/
31 #include "the-linux-kernel.h"
32 #include "internal/iprt.h"
34 #include <iprt/timer.h>
35 #include <iprt/time.h>
37 #include <iprt/cpuset.h>
38 #include <iprt/spinlock.h>
41 #include <iprt/assert.h>
42 #include <iprt/alloc.h>
44 #include "internal/magics.h"
46 /** @def RTTIMER_LINUX_WITH_HRTIMER
47 * Whether to use high resolution timers. */
48 #if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
49 && defined(IPRT_LINUX_HAS_HRTIMER)
50 # define RTTIMER_LINUX_WITH_HRTIMER
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
54 # define mod_timer_pinned mod_timer
55 # define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
59 /*********************************************************************************************************************************
60 * Structures and Typedefs *
61 *********************************************************************************************************************************/
63 * Timer state machine.
65 * This is used to try handle the issues with MP events and
66 * timers that runs on all CPUs. It's relatively nasty :-/
68 typedef enum RTTIMERLNXSTATE
71 RTTIMERLNXSTATE_STOPPED
= 0,
72 /** Transient state; next ACTIVE. */
73 RTTIMERLNXSTATE_STARTING
,
74 /** Transient state; next ACTIVE. (not really necessary) */
75 RTTIMERLNXSTATE_MP_STARTING
,
77 RTTIMERLNXSTATE_ACTIVE
,
78 /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
79 RTTIMERLNXSTATE_CALLBACK
,
80 /** Stopped while in the callback; next STOPPED. */
81 RTTIMERLNXSTATE_CB_STOPPING
,
82 /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
83 RTTIMERLNXSTATE_CB_RESTARTING
,
84 /** The callback shall destroy the timer; next STOPPED. */
85 RTTIMERLNXSTATE_CB_DESTROYING
,
86 /** Transient state; next STOPPED. */
87 RTTIMERLNXSTATE_STOPPING
,
88 /** Transient state; next STOPPED. */
89 RTTIMERLNXSTATE_MP_STOPPING
,
90 /** The usual 32-bit hack. */
91 RTTIMERLNXSTATE_32BIT_HACK
= 0x7fffffff
98 typedef struct RTTIMERLNXSUBTIMER
100 /** Timer specific data. */
103 #if defined(RTTIMER_LINUX_WITH_HRTIMER)
104 /** High resolution timer. */
107 /** The linux timer structure. */
108 struct hrtimer LnxTimer
;
111 /** Standard timer. */
114 /** The linux timer structure. */
115 struct timer_list LnxTimer
;
116 /** The start of the current run (ns).
117 * This is used to calculate when the timer ought to fire the next time. */
119 /** The u64NextTS in jiffies. */
120 unsigned long ulNextJiffies
;
121 /** Set when starting or changing the timer so that u64StartTs
122 * and u64NextTS gets reinitialized (eliminating some jitter). */
123 bool volatile fFirstAfterChg
;
126 /** The current tick number. */
128 /** Restart the single shot timer at this specific time.
129 * Used when a single shot timer is restarted from the callback. */
130 uint64_t volatile uNsRestartAt
;
131 /** Pointer to the parent timer. */
133 /** The current sub-timer state. */
134 RTTIMERLNXSTATE
volatile enmState
;
135 } RTTIMERLNXSUBTIMER
;
136 /** Pointer to a linux sub-timer. */
137 typedef RTTIMERLNXSUBTIMER
*PRTTIMERLNXSUBTIMER
;
141 * The internal representation of an Linux timer handle.
143 typedef struct RTTIMER
146 * This is RTTIMER_MAGIC, but changes to something else before the timer
147 * is destroyed to indicate clearly that thread should exit. */
148 uint32_t volatile u32Magic
;
149 /** Spinlock synchronizing the fSuspended and MP event handling.
150 * This is NIL_RTSPINLOCK if cCpus == 1. */
151 RTSPINLOCK hSpinlock
;
152 /** Flag indicating that the timer is suspended. */
153 bool volatile fSuspended
;
154 /** Whether the timer must run on one specific CPU or not. */
157 /** Whether the timer must run on all CPUs or not. */
159 #endif /* else: All -> specific on non-SMP kernels */
160 /** Whether it is a high resolution timer or a standard one. */
162 /** The id of the CPU it must run on if fSpecificCpu is set. */
164 /** The number of CPUs this timer should run on. */
168 /** User argument. */
170 /** The timer interval. 0 if one-shot. */
171 uint64_t volatile u64NanoInterval
;
172 /** This is set to the number of jiffies between ticks if the interval is
173 * an exact number of jiffies. (Standard timers only.) */
174 unsigned long volatile cJiffies
;
175 /** The change interval spinlock for standard timers only. */
176 spinlock_t ChgIntLock
;
177 /** Workqueue item for delayed destruction. */
178 RTR0LNXWORKQUEUEITEM DtorWorkqueueItem
;
180 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
181 * an entry for all possible cpus. In that case the index will be the same as
182 * for the RTCpuSet. */
183 RTTIMERLNXSUBTIMER aSubTimers
[1];
188 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
190 typedef struct RTTIMERLINUXSTARTONCPUARGS
192 /** The current time (RTTimeSystemNanoTS). */
194 /** When to start firing (delta). */
196 } RTTIMERLINUXSTARTONCPUARGS
;
197 /** Pointer to a rtTimerLinuxStartOnCpu argument package. */
198 typedef RTTIMERLINUXSTARTONCPUARGS
*PRTTIMERLINUXSTARTONCPUARGS
;
201 /*********************************************************************************************************************************
202 * Internal Functions *
203 *********************************************************************************************************************************/
205 static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent
, RTCPUID idCpu
, void *pvUser
);
209 #define DEBUG_HACKING
210 #include <iprt/string.h>
211 #include <iprt/asm-amd64-x86.h>
212 static void myLogBackdoorPrintf(const char *pszFormat
, ...)
218 cb
= RTStrPrintf(szTmp
, sizeof(szTmp
) - 10, "%d: ", RTMpCpuId());
219 va_start(args
, pszFormat
);
220 cb
+= RTStrPrintfV(&szTmp
[cb
], sizeof(szTmp
) - cb
, pszFormat
, args
);
223 ASMOutStrU8(0x504, (uint8_t *)&szTmp
[0], cb
);
225 # define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
226 myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
227 # define RTAssertMsg2Weak myLogBackdoorPrintf
228 # define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
230 # define RTTIMERLNX_LOG(a) do { } while (0)
236 DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE
volatile *penmState
, RTTIMERLNXSTATE enmNewState
)
239 RTTIMERLNX_LOG(("set %d -> %d\n", *penmState
, enmNewState
));
241 ASMAtomicWriteU32((uint32_t volatile *)penmState
, enmNewState
);
246 * Sets the state if it has a certain value.
248 * @return true if xchg was done.
249 * @return false if xchg wasn't done.
252 #define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
253 static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE
volatile *penmState
, RTTIMERLNXSTATE enmNewState
,
254 RTTIMERLNXSTATE enmCurState
, uint32_t uLine
)
256 RTTIMERLNXSTATE enmOldState
= enmCurState
;
257 bool fRc
= ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState
, enmNewState
, enmCurState
, (uint32_t *)&enmOldState
);
258 RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState
, enmNewState
, fRc
, uLine
));
262 DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE
volatile *penmState
, RTTIMERLNXSTATE enmNewState
,
263 RTTIMERLNXSTATE enmCurState
)
265 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState
, enmNewState
, enmCurState
);
273 DECLINLINE(RTTIMERLNXSTATE
) rtTimerLnxGetState(RTTIMERLNXSTATE
volatile *penmState
)
275 return (RTTIMERLNXSTATE
)ASMAtomicUoReadU32((uint32_t volatile *)penmState
);
278 #ifdef RTTIMER_LINUX_WITH_HRTIMER
281 * Converts a nano second time stamp to ktime_t.
283 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
286 * @param cNanoSecs Nanoseconds.
288 DECLINLINE(ktime_t
) rtTimerLnxNanoToKt(uint64_t cNanoSecs
)
290 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
291 return ktime_set(cNanoSecs
/ 1000000000, cNanoSecs
% 1000000000);
295 * Converts ktime_t to a nano second time stamp.
297 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
299 * @returns nano second time stamp.
302 DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt
)
304 return ktime_to_ns(Kt
);
307 #endif /* RTTIMER_LINUX_WITH_HRTIMER */
310 * Converts a nano second interval to jiffies.
313 * @param cNanoSecs Nanoseconds.
315 DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs
)
317 /* this can be made even better... */
318 if (cNanoSecs
> (uint64_t)TICK_NSEC
* MAX_JIFFY_OFFSET
)
319 return MAX_JIFFY_OFFSET
;
321 if (RT_LIKELY(cNanoSecs
<= UINT32_MAX
))
322 return ((uint32_t)cNanoSecs
+ (TICK_NSEC
-1)) / TICK_NSEC
;
324 return (cNanoSecs
+ (TICK_NSEC
-1)) / TICK_NSEC
;
329 * Starts a sub-timer (RTTimerStart).
331 * @param pSubTimer The sub-timer to start.
332 * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
333 * @param u64First The interval from u64Now to the first time the timer should fire.
334 * @param fPinned true = timer pinned to a specific CPU,
335 * false = timer can migrate between CPUs
336 * @param fHighRes Whether the user requested a high resolution timer or not.
337 * @param enmOldState The old timer state.
339 static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer
, uint64_t u64Now
, uint64_t u64First
,
340 bool fPinned
, bool fHighRes
)
343 * Calc when it should start firing.
345 uint64_t u64NextTS
= u64Now
+ u64First
;
347 pSubTimer
->u
.Std
.u64NextTS
= u64NextTS
;
348 RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer
->pParent
));
350 pSubTimer
->iTick
= 0;
352 #ifdef RTTIMER_LINUX_WITH_HRTIMER
354 hrtimer_start(&pSubTimer
->u
.Hr
.LnxTimer
, rtTimerLnxNanoToKt(u64NextTS
),
355 fPinned
? HRTIMER_MODE_ABS_PINNED
: HRTIMER_MODE_ABS
);
359 unsigned long cJiffies
= !u64First
? 0 : rtTimerLnxNanoToJiffies(u64First
);
360 pSubTimer
->u
.Std
.ulNextJiffies
= jiffies
+ cJiffies
;
361 pSubTimer
->u
.Std
.fFirstAfterChg
= true;
365 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
366 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
368 mod_timer_pinned(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
373 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
376 /* Be a bit careful here since we could be racing the callback. */
377 if (!rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_STARTING
))
378 rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_MP_STARTING
);
383 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
385 * The caller has already changed the state, so we will not be in a callback
386 * situation wrt to the calling thread.
388 * @param pSubTimer The sub-timer.
389 * @param fHighRes Whether the user requested a high resolution timer or not.
391 static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer
, bool fHighRes
)
393 RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer
->pParent
, fHighRes
));
394 #ifdef RTTIMER_LINUX_WITH_HRTIMER
397 /* There is no equivalent to del_timer in the hrtimer API,
398 hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
399 del_timer_sync() asserts, waiting for a timer callback to complete
400 is deadlock prone, so don't do it. */
401 int rc
= hrtimer_try_to_cancel(&pSubTimer
->u
.Hr
.LnxTimer
);
404 hrtimer_start(&pSubTimer
->u
.Hr
.LnxTimer
, ktime_set(KTIME_SEC_MAX
, 0), HRTIMER_MODE_ABS
);
405 hrtimer_try_to_cancel(&pSubTimer
->u
.Hr
.LnxTimer
);
410 del_timer(&pSubTimer
->u
.Std
.LnxTimer
);
412 rtTimerLnxSetState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
);
417 * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
419 * @param pTimer The timer in question.
421 static void rtTimerLnxDestroyIt(PRTTIMER pTimer
)
423 RTSPINLOCK hSpinlock
= pTimer
->hSpinlock
;
425 Assert(pTimer
->fSuspended
);
426 RTTIMERLNX_LOG(("destroyit %p\n", pTimer
));
429 * Remove the MP notifications first because it'll reduce the risk of
430 * us overtaking any MP event that might theoretically be racing us here.
433 if ( pTimer
->cCpus
> 1
434 && hSpinlock
!= NIL_RTSPINLOCK
)
436 int rc
= RTMpNotificationDeregister(rtTimerLinuxMpEvent
, pTimer
);
439 #endif /* CONFIG_SMP */
442 * Invalidate the handle.
444 ASMAtomicWriteU32(&pTimer
->u32Magic
, ~RTTIMER_MAGIC
);
447 * Make sure all timers have stopped executing since we're stopping them in
448 * an asynchronous manner up in rtTimerLnxStopSubTimer.
450 iCpu
= pTimer
->cCpus
;
453 #ifdef RTTIMER_LINUX_WITH_HRTIMER
454 if (pTimer
->fHighRes
)
455 hrtimer_cancel(&pTimer
->aSubTimers
[iCpu
].u
.Hr
.LnxTimer
);
458 del_timer_sync(&pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
);
462 * Finally, free the resources.
464 RTMemFreeEx(pTimer
, RT_OFFSETOF(RTTIMER
, aSubTimers
[pTimer
->cCpus
]));
465 if (hSpinlock
!= NIL_RTSPINLOCK
)
466 RTSpinlockDestroy(hSpinlock
);
471 * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
473 * @param pWork Pointer to the DtorWorkqueueItem member of our timer
476 static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM
*pWork
)
478 PRTTIMER pTimer
= RT_FROM_MEMBER(pWork
, RTTIMER
, DtorWorkqueueItem
);
479 rtTimerLnxDestroyIt(pTimer
);
484 * Called when the timer was destroyed by the callback function.
486 * @param pTimer The timer.
487 * @param pSubTimer The sub-timer which we're handling, the state of this
488 * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
490 static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer
, PRTTIMERLNXSUBTIMER pSubTimer
)
493 * If it's an omni timer, the last dude does the destroying.
495 if (pTimer
->cCpus
> 1)
497 uint32_t iCpu
= pTimer
->cCpus
;
498 RTSpinlockAcquire(pTimer
->hSpinlock
);
500 Assert(pSubTimer
->enmState
== RTTIMERLNXSTATE_CB_DESTROYING
);
501 rtTimerLnxSetState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
);
504 if (rtTimerLnxGetState(&pTimer
->aSubTimers
[iCpu
].enmState
) != RTTIMERLNXSTATE_STOPPED
)
506 RTSpinlockRelease(pTimer
->hSpinlock
);
510 RTSpinlockRelease(pTimer
->hSpinlock
);
514 * Destroying a timer from the callback is unsafe since the callout code
515 * might be touching the timer structure upon return (hrtimer does!). So,
516 * we have to defer the actual destruction to the IRPT workqueue.
518 rtR0LnxWorkqueuePush(&pTimer
->DtorWorkqueueItem
, rtTimerLnxDestroyDeferred
);
524 * Deal with a sub-timer that has migrated.
526 * @param pTimer The timer.
527 * @param pSubTimer The sub-timer.
529 static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer
, PRTTIMERLNXSUBTIMER pSubTimer
)
531 RTTIMERLNXSTATE enmState
;
532 if (pTimer
->cCpus
> 1)
533 RTSpinlockAcquire(pTimer
->hSpinlock
);
537 enmState
= rtTimerLnxGetState(&pSubTimer
->enmState
);
540 case RTTIMERLNXSTATE_STOPPING
:
541 case RTTIMERLNXSTATE_MP_STOPPING
:
542 enmState
= RTTIMERLNXSTATE_STOPPED
;
543 case RTTIMERLNXSTATE_STOPPED
:
547 AssertMsgFailed(("%d\n", enmState
));
548 case RTTIMERLNXSTATE_STARTING
:
549 case RTTIMERLNXSTATE_MP_STARTING
:
550 case RTTIMERLNXSTATE_ACTIVE
:
551 case RTTIMERLNXSTATE_CALLBACK
:
552 case RTTIMERLNXSTATE_CB_STOPPING
:
553 case RTTIMERLNXSTATE_CB_RESTARTING
:
554 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
, enmState
))
555 enmState
= RTTIMERLNXSTATE_STOPPED
;
558 case RTTIMERLNXSTATE_CB_DESTROYING
:
560 if (pTimer
->cCpus
> 1)
561 RTSpinlockRelease(pTimer
->hSpinlock
);
563 rtTimerLnxCallbackDestroy(pTimer
, pSubTimer
);
567 } while (enmState
!= RTTIMERLNXSTATE_STOPPED
);
569 if (pTimer
->cCpus
> 1)
570 RTSpinlockRelease(pTimer
->hSpinlock
);
572 #endif /* CONFIG_SMP */
576 * The slow path of rtTimerLnxChangeToCallbackState.
578 * @returns true if changed successfully, false if not.
579 * @param pSubTimer The sub-timer.
581 static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer
)
585 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pSubTimer
->enmState
);
588 case RTTIMERLNXSTATE_ACTIVE
:
589 case RTTIMERLNXSTATE_STARTING
:
590 case RTTIMERLNXSTATE_MP_STARTING
:
591 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_CALLBACK
, enmState
))
595 case RTTIMERLNXSTATE_CALLBACK
:
596 case RTTIMERLNXSTATE_CB_STOPPING
:
597 case RTTIMERLNXSTATE_CB_RESTARTING
:
598 case RTTIMERLNXSTATE_CB_DESTROYING
:
599 AssertMsgFailed(("%d\n", enmState
));
609 * Tries to change the sub-timer state to 'callback'.
611 * @returns true if changed successfully, false if not.
612 * @param pSubTimer The sub-timer.
614 DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer
)
616 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_CALLBACK
, RTTIMERLNXSTATE_ACTIVE
)))
618 return rtTimerLnxChangeToCallbackStateSlow(pSubTimer
);
622 #ifdef RTTIMER_LINUX_WITH_HRTIMER
624 * Timer callback function for high resolution timers.
626 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
627 * one-shot or interval timer.
628 * @param pHrTimer Pointer to the sub-timer structure.
630 static enum hrtimer_restart
rtTimerLinuxHrCallback(struct hrtimer
*pHrTimer
)
632 PRTTIMERLNXSUBTIMER pSubTimer
= RT_FROM_MEMBER(pHrTimer
, RTTIMERLNXSUBTIMER
, u
.Hr
.LnxTimer
);
633 PRTTIMER pTimer
= pSubTimer
->pParent
;
636 RTTIMERLNX_LOG(("hrcallback %p\n", pTimer
));
637 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer
)))
638 return HRTIMER_NORESTART
;
642 * Check for unwanted migration.
644 if (pTimer
->fAllCpus
|| pTimer
->fSpecificCpu
)
646 RTCPUID idCpu
= RTMpCpuId();
647 if (RT_UNLIKELY( pTimer
->fAllCpus
648 ? (RTCPUID
)(pSubTimer
- &pTimer
->aSubTimers
[0]) != idCpu
649 : pTimer
->idCpu
!= idCpu
))
651 rtTimerLnxCallbackHandleMigration(pTimer
, pSubTimer
);
652 return HRTIMER_NORESTART
;
657 if (pTimer
->u64NanoInterval
)
660 * Periodic timer, run it and update the native timer afterwards so
661 * we can handle RTTimerStop and RTTimerChangeInterval from the
662 * callback as well as a racing control thread.
664 pTimer
->pfnTimer(pTimer
, pTimer
->pvUser
, ++pSubTimer
->iTick
);
665 hrtimer_add_expires_ns(&pSubTimer
->u
.Hr
.LnxTimer
, ASMAtomicReadU64(&pTimer
->u64NanoInterval
));
666 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_CALLBACK
)))
667 return HRTIMER_RESTART
;
672 * One shot timer (no omni), stop it before dispatching it.
673 * Allow RTTimerStart as well as RTTimerDestroy to be called from
676 ASMAtomicWriteBool(&pTimer
->fSuspended
, true);
677 pTimer
->pfnTimer(pTimer
, pTimer
->pvUser
, ++pSubTimer
->iTick
);
678 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
, RTTIMERLNXSTATE_CALLBACK
)))
679 return HRTIMER_NORESTART
;
683 * Some state change occurred while we were in the callback routine.
687 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pSubTimer
->enmState
);
690 case RTTIMERLNXSTATE_CB_DESTROYING
:
691 rtTimerLnxCallbackDestroy(pTimer
, pSubTimer
);
692 return HRTIMER_NORESTART
;
694 case RTTIMERLNXSTATE_CB_STOPPING
:
695 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
, RTTIMERLNXSTATE_CB_STOPPING
))
696 return HRTIMER_NORESTART
;
699 case RTTIMERLNXSTATE_CB_RESTARTING
:
700 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_CB_RESTARTING
))
702 pSubTimer
->iTick
= 0;
703 hrtimer_set_expires(&pSubTimer
->u
.Hr
.LnxTimer
, rtTimerLnxNanoToKt(pSubTimer
->uNsRestartAt
));
704 return HRTIMER_RESTART
;
709 AssertMsgFailed(("%d\n", enmState
));
710 return HRTIMER_NORESTART
;
715 #endif /* RTTIMER_LINUX_WITH_HRTIMER */
718 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
720 * Timer callback for kernels 4.15 and later
722 static void rtTimerLinuxStdCallback(struct timer_list
*t
)
724 PRTTIMERLNXSUBTIMER pSubTimer
= from_timer(pSubTimer
, t
, u
.Std
.LnxTimer
);
727 * Timer callback function for standard timers.
729 * @param ulUser Address of the sub-timer structure.
731 static void rtTimerLinuxStdCallback(unsigned long ulUser
)
733 PRTTIMERLNXSUBTIMER pSubTimer
= (PRTTIMERLNXSUBTIMER
)ulUser
;
735 PRTTIMER pTimer
= pSubTimer
->pParent
;
737 RTTIMERLNX_LOG(("stdcallback %p\n", pTimer
));
738 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer
)))
743 * Check for unwanted migration.
745 if (pTimer
->fAllCpus
|| pTimer
->fSpecificCpu
)
747 RTCPUID idCpu
= RTMpCpuId();
748 if (RT_UNLIKELY( pTimer
->fAllCpus
749 ? (RTCPUID
)(pSubTimer
- &pTimer
->aSubTimers
[0]) != idCpu
750 : pTimer
->idCpu
!= idCpu
))
752 rtTimerLnxCallbackHandleMigration(pTimer
, pSubTimer
);
758 if (pTimer
->u64NanoInterval
)
761 * Interval timer, calculate the next timeout.
763 * The first time around, we'll re-adjust the u.Std.u64NextTS to
764 * try prevent some jittering if we were started at a bad time.
766 const uint64_t iTick
= ++pSubTimer
->iTick
;
767 uint64_t u64NanoInterval
;
768 unsigned long cJiffies
;
769 unsigned long flFlags
;
771 spin_lock_irqsave(&pTimer
->ChgIntLock
, flFlags
);
772 u64NanoInterval
= pTimer
->u64NanoInterval
;
773 cJiffies
= pTimer
->cJiffies
;
774 if (RT_UNLIKELY(pSubTimer
->u
.Std
.fFirstAfterChg
))
776 pSubTimer
->u
.Std
.fFirstAfterChg
= false;
777 pSubTimer
->u
.Std
.u64NextTS
= RTTimeSystemNanoTS();
778 pSubTimer
->u
.Std
.ulNextJiffies
= jiffies
;
780 spin_unlock_irqrestore(&pTimer
->ChgIntLock
, flFlags
);
782 pSubTimer
->u
.Std
.u64NextTS
+= u64NanoInterval
;
785 pSubTimer
->u
.Std
.ulNextJiffies
+= cJiffies
;
786 /* Prevent overflows when the jiffies counter wraps around.
787 * Special thanks to Ken Preslan for helping debugging! */
788 while (time_before(pSubTimer
->u
.Std
.ulNextJiffies
, jiffies
))
790 pSubTimer
->u
.Std
.ulNextJiffies
+= cJiffies
;
791 pSubTimer
->u
.Std
.u64NextTS
+= u64NanoInterval
;
796 const uint64_t u64NanoTS
= RTTimeSystemNanoTS();
797 while (pSubTimer
->u
.Std
.u64NextTS
< u64NanoTS
)
798 pSubTimer
->u
.Std
.u64NextTS
+= u64NanoInterval
;
799 pSubTimer
->u
.Std
.ulNextJiffies
= jiffies
+ rtTimerLnxNanoToJiffies(pSubTimer
->u
.Std
.u64NextTS
- u64NanoTS
);
803 * Run the timer and re-arm it unless the state changed .
805 * We must re-arm it afterwards as we're not in a position to undo this .
806 * operation if for instance someone stopped or destroyed us while we .
807 * were in the callback. (Linux takes care of any races here.)
809 pTimer
->pfnTimer(pTimer
, pTimer
->pvUser
, iTick
);
810 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_CALLBACK
)))
813 if (pTimer
->fSpecificCpu
|| pTimer
->fAllCpus
)
815 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
816 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
818 mod_timer_pinned(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
823 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
830 * One shot timer, stop it before dispatching it.
831 * Allow RTTimerStart as well as RTTimerDestroy to be called from
834 ASMAtomicWriteBool(&pTimer
->fSuspended
, true);
835 pTimer
->pfnTimer(pTimer
, pTimer
->pvUser
, ++pSubTimer
->iTick
);
836 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
, RTTIMERLNXSTATE_CALLBACK
)))
841 * Some state change occurred while we were in the callback routine.
845 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pSubTimer
->enmState
);
848 case RTTIMERLNXSTATE_CB_DESTROYING
:
849 rtTimerLnxCallbackDestroy(pTimer
, pSubTimer
);
852 case RTTIMERLNXSTATE_CB_STOPPING
:
853 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
, RTTIMERLNXSTATE_CB_STOPPING
))
857 case RTTIMERLNXSTATE_CB_RESTARTING
:
858 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_ACTIVE
, RTTIMERLNXSTATE_CB_RESTARTING
))
862 unsigned long flFlags
;
864 spin_lock_irqsave(&pTimer
->ChgIntLock
, flFlags
);
865 u64NextTS
= pSubTimer
->uNsRestartAt
;
866 u64NanoTS
= RTTimeSystemNanoTS();
867 pSubTimer
->iTick
= 0;
868 pSubTimer
->u
.Std
.u64NextTS
= u64NextTS
;
869 pSubTimer
->u
.Std
.fFirstAfterChg
= true;
870 pSubTimer
->u
.Std
.ulNextJiffies
= u64NextTS
> u64NanoTS
871 ? jiffies
+ rtTimerLnxNanoToJiffies(u64NextTS
- u64NanoTS
)
873 spin_unlock_irqrestore(&pTimer
->ChgIntLock
, flFlags
);
876 if (pTimer
->fSpecificCpu
|| pTimer
->fAllCpus
)
878 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
879 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
881 mod_timer_pinned(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
886 mod_timer(&pSubTimer
->u
.Std
.LnxTimer
, pSubTimer
->u
.Std
.ulNextJiffies
);
892 AssertMsgFailed(("%d\n", enmState
));
903 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
905 * @param idCpu The current CPU.
906 * @param pvUser1 Pointer to the timer.
907 * @param pvUser2 Pointer to the argument structure.
909 static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu
, void *pvUser1
, void *pvUser2
)
911 PRTTIMERLINUXSTARTONCPUARGS pArgs
= (PRTTIMERLINUXSTARTONCPUARGS
)pvUser2
;
912 PRTTIMER pTimer
= (PRTTIMER
)pvUser1
;
913 Assert(idCpu
< pTimer
->cCpus
);
914 rtTimerLnxStartSubTimer(&pTimer
->aSubTimers
[idCpu
], pArgs
->u64Now
, pArgs
->u64First
, true /*fPinned*/, pTimer
->fHighRes
);
919 * Worker for RTTimerStart() that takes care of the ugly bits.
921 * @returns RTTimerStart() return value.
922 * @param pTimer The timer.
923 * @param pArgs The argument structure.
925 static int rtTimerLnxOmniStart(PRTTIMER pTimer
, PRTTIMERLINUXSTARTONCPUARGS pArgs
)
933 * Prepare all the sub-timers for the startup and then flag the timer
934 * as a whole as non-suspended, make sure we get them all before
935 * clearing fSuspended as the MP handler will be waiting on this
936 * should something happen while we're looping.
938 RTSpinlockAcquire(pTimer
->hSpinlock
);
940 /* Just make it a omni timer restriction that no stop/start races are allowed. */
941 for (iCpu
= 0; iCpu
< pTimer
->cCpus
; iCpu
++)
942 if (rtTimerLnxGetState(&pTimer
->aSubTimers
[iCpu
].enmState
) != RTTIMERLNXSTATE_STOPPED
)
944 RTSpinlockRelease(pTimer
->hSpinlock
);
945 return VERR_TIMER_BUSY
;
950 RTMpGetOnlineSet(&OnlineSet
);
951 for (iCpu
= 0; iCpu
< pTimer
->cCpus
; iCpu
++)
953 Assert(pTimer
->aSubTimers
[iCpu
].enmState
!= RTTIMERLNXSTATE_MP_STOPPING
);
954 rtTimerLnxSetState(&pTimer
->aSubTimers
[iCpu
].enmState
,
955 RTCpuSetIsMember(&OnlineSet
, iCpu
)
956 ? RTTIMERLNXSTATE_STARTING
957 : RTTIMERLNXSTATE_STOPPED
);
959 } while (!RTCpuSetIsEqual(&OnlineSet
, RTMpGetOnlineSet(&OnlineSet2
)));
961 ASMAtomicWriteBool(&pTimer
->fSuspended
, false);
963 RTSpinlockRelease(pTimer
->hSpinlock
);
966 * Start them (can't find any exported function that allows me to
967 * do this without the cross calls).
969 pArgs
->u64Now
= RTTimeSystemNanoTS();
970 rc2
= RTMpOnAll(rtTimerLnxStartAllOnCpu
, pTimer
, pArgs
);
971 AssertRC(rc2
); /* screw this if it fails. */
974 * Reset the sub-timers who didn't start up (ALL CPUs case).
976 RTSpinlockAcquire(pTimer
->hSpinlock
);
978 for (iCpu
= 0; iCpu
< pTimer
->cCpus
; iCpu
++)
979 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[iCpu
].enmState
, RTTIMERLNXSTATE_STOPPED
, RTTIMERLNXSTATE_STARTING
))
981 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
982 * we were between calls needs to nudged as the MP handler will ignore events for
983 * them because of the STARTING state. This is an extremely unlikely case - not that
984 * that means anything in my experience... ;-) */
985 RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu
));
988 RTSpinlockRelease(pTimer
->hSpinlock
);
995 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
997 * @returns true if there was any active callbacks, false if not.
998 * @param pTimer The timer (valid).
999 * @param fForDestroy Whether this is for RTTimerDestroy or not.
1001 static bool rtTimerLnxOmniStop(PRTTIMER pTimer
, bool fForDestroy
)
1003 bool fActiveCallbacks
= false;
1005 RTTIMERLNXSTATE enmState
;
1009 * Mark the timer as suspended and flag all timers as stopping, except
1010 * for those being stopped by an MP event.
1012 RTSpinlockAcquire(pTimer
->hSpinlock
);
1014 ASMAtomicWriteBool(&pTimer
->fSuspended
, true);
1015 for (iCpu
= 0; iCpu
< pTimer
->cCpus
; iCpu
++)
1019 enmState
= rtTimerLnxGetState(&pTimer
->aSubTimers
[iCpu
].enmState
);
1020 if ( enmState
== RTTIMERLNXSTATE_STOPPED
1021 || enmState
== RTTIMERLNXSTATE_MP_STOPPING
)
1023 if ( enmState
== RTTIMERLNXSTATE_CALLBACK
1024 || enmState
== RTTIMERLNXSTATE_CB_STOPPING
1025 || enmState
== RTTIMERLNXSTATE_CB_RESTARTING
)
1027 Assert(enmState
!= RTTIMERLNXSTATE_CB_STOPPING
|| fForDestroy
);
1028 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[iCpu
].enmState
,
1029 !fForDestroy
? RTTIMERLNXSTATE_CB_STOPPING
: RTTIMERLNXSTATE_CB_DESTROYING
,
1032 fActiveCallbacks
= true;
1038 Assert(enmState
== RTTIMERLNXSTATE_ACTIVE
);
1039 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[iCpu
].enmState
, RTTIMERLNXSTATE_STOPPING
, enmState
))
1046 RTSpinlockRelease(pTimer
->hSpinlock
);
1049 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
1050 * Unfortunately it cannot be done synchronously.
1052 for (iCpu
= 0; iCpu
< pTimer
->cCpus
; iCpu
++)
1053 if (rtTimerLnxGetState(&pTimer
->aSubTimers
[iCpu
].enmState
) == RTTIMERLNXSTATE_STOPPING
)
1054 rtTimerLnxStopSubTimer(&pTimer
->aSubTimers
[iCpu
], pTimer
->fHighRes
);
1056 return fActiveCallbacks
;
1061 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
1062 * to start a sub-timer on a cpu that just have come online.
1064 * @param idCpu The current CPU.
1065 * @param pvUser1 Pointer to the timer.
1066 * @param pvUser2 Pointer to the argument structure.
1068 static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu
, void *pvUser1
, void *pvUser2
)
1070 PRTTIMERLINUXSTARTONCPUARGS pArgs
= (PRTTIMERLINUXSTARTONCPUARGS
)pvUser2
;
1071 PRTTIMER pTimer
= (PRTTIMER
)pvUser1
;
1072 RTSPINLOCK hSpinlock
;
1073 Assert(idCpu
< pTimer
->cCpus
);
1076 * We have to be kind of careful here as we might be racing RTTimerStop
1077 * (and/or RTTimerDestroy, thus the paranoia.
1079 hSpinlock
= pTimer
->hSpinlock
;
1080 if ( hSpinlock
!= NIL_RTSPINLOCK
1081 && pTimer
->u32Magic
== RTTIMER_MAGIC
)
1083 RTSpinlockAcquire(hSpinlock
);
1085 if ( !ASMAtomicUoReadBool(&pTimer
->fSuspended
)
1086 && pTimer
->u32Magic
== RTTIMER_MAGIC
)
1088 /* We're sane and the timer is not suspended yet. */
1089 PRTTIMERLNXSUBTIMER pSubTimer
= &pTimer
->aSubTimers
[idCpu
];
1090 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_MP_STARTING
, RTTIMERLNXSTATE_STOPPED
))
1091 rtTimerLnxStartSubTimer(pSubTimer
, pArgs
->u64Now
, pArgs
->u64First
, true /*fPinned*/, pTimer
->fHighRes
);
1094 RTSpinlockRelease(hSpinlock
);
1100 * MP event notification callback.
1102 * @param enmEvent The event.
1103 * @param idCpu The cpu it applies to.
1104 * @param pvUser The timer.
1106 static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent
, RTCPUID idCpu
, void *pvUser
)
1108 PRTTIMER pTimer
= (PRTTIMER
)pvUser
;
1109 PRTTIMERLNXSUBTIMER pSubTimer
= &pTimer
->aSubTimers
[idCpu
];
1110 RTSPINLOCK hSpinlock
;
1112 Assert(idCpu
< pTimer
->cCpus
);
1115 * Some initial paranoia.
1117 if (pTimer
->u32Magic
!= RTTIMER_MAGIC
)
1119 hSpinlock
= pTimer
->hSpinlock
;
1120 if (hSpinlock
== NIL_RTSPINLOCK
)
1123 RTSpinlockAcquire(hSpinlock
);
1126 if ( !ASMAtomicUoReadBool(&pTimer
->fSuspended
)
1127 && pTimer
->u32Magic
== RTTIMER_MAGIC
)
1132 * Try do it without leaving the spin lock, but if we have to, retake it
1133 * when we're on the right cpu.
1135 case RTMPEVENT_ONLINE
:
1136 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_MP_STARTING
, RTTIMERLNXSTATE_STOPPED
))
1138 RTTIMERLINUXSTARTONCPUARGS Args
;
1139 Args
.u64Now
= RTTimeSystemNanoTS();
1142 if (RTMpCpuId() == idCpu
)
1143 rtTimerLnxStartSubTimer(pSubTimer
, Args
.u64Now
, Args
.u64First
, true /*fPinned*/, pTimer
->fHighRes
);
1146 rtTimerLnxSetState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_STOPPED
); /* we'll recheck it. */
1147 RTSpinlockRelease(hSpinlock
);
1149 RTMpOnSpecific(idCpu
, rtTimerLinuxMpStartOnCpu
, pTimer
, &Args
);
1150 return; /* we've left the spinlock */
1156 * The CPU is (going) offline, make sure the sub-timer is stopped.
1158 * Linux will migrate it to a different CPU, but we don't want this. The
1159 * timer function is checking for this.
1161 case RTMPEVENT_OFFLINE
:
1163 RTTIMERLNXSTATE enmState
;
1164 while ( (enmState
= rtTimerLnxGetState(&pSubTimer
->enmState
)) == RTTIMERLNXSTATE_ACTIVE
1165 || enmState
== RTTIMERLNXSTATE_CALLBACK
1166 || enmState
== RTTIMERLNXSTATE_CB_RESTARTING
)
1168 if (enmState
== RTTIMERLNXSTATE_ACTIVE
)
1170 if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_MP_STOPPING
, RTTIMERLNXSTATE_ACTIVE
))
1172 RTSpinlockRelease(hSpinlock
);
1174 rtTimerLnxStopSubTimer(pSubTimer
, pTimer
->fHighRes
);
1175 return; /* we've left the spinlock */
1178 else if (rtTimerLnxCmpXchgState(&pSubTimer
->enmState
, RTTIMERLNXSTATE_CB_STOPPING
, enmState
))
1181 /* State not stable, try again. */
1189 RTSpinlockRelease(hSpinlock
);
1192 #endif /* CONFIG_SMP */
1196 * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
1197 * running on a specific CPU.
1199 * @param idCpu The current CPU.
1200 * @param pvUser1 Pointer to the timer.
1201 * @param pvUser2 Pointer to the argument structure.
1203 static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu
, void *pvUser1
, void *pvUser2
)
1205 PRTTIMERLINUXSTARTONCPUARGS pArgs
= (PRTTIMERLINUXSTARTONCPUARGS
)pvUser2
;
1206 PRTTIMER pTimer
= (PRTTIMER
)pvUser1
;
1208 rtTimerLnxStartSubTimer(&pTimer
->aSubTimers
[0], pArgs
->u64Now
, pArgs
->u64First
, true /*fPinned*/, pTimer
->fHighRes
);
1212 RTDECL(int) RTTimerStart(PRTTIMER pTimer
, uint64_t u64First
)
1214 RTTIMERLINUXSTARTONCPUARGS Args
;
1216 IPRT_LINUX_SAVE_EFL_AC();
1221 AssertPtrReturn(pTimer
, VERR_INVALID_HANDLE
);
1222 AssertReturn(pTimer
->u32Magic
== RTTIMER_MAGIC
, VERR_INVALID_HANDLE
);
1224 if (!ASMAtomicUoReadBool(&pTimer
->fSuspended
))
1225 return VERR_TIMER_ACTIVE
;
1226 RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer
, pTimer
->cCpus
));
1228 Args
.u64First
= u64First
;
1233 if (pTimer
->fAllCpus
)
1235 rc2
= rtTimerLnxOmniStart(pTimer
, &Args
);
1236 IPRT_LINUX_RESTORE_EFL_AC();
1242 * Simple timer - Pretty straight forward if it wasn't for restarting.
1244 Args
.u64Now
= RTTimeSystemNanoTS();
1245 ASMAtomicWriteU64(&pTimer
->aSubTimers
[0].uNsRestartAt
, Args
.u64Now
+ u64First
);
1248 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pTimer
->aSubTimers
[0].enmState
);
1251 case RTTIMERLNXSTATE_STOPPED
:
1252 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[0].enmState
, RTTIMERLNXSTATE_STARTING
, RTTIMERLNXSTATE_STOPPED
))
1254 ASMAtomicWriteBool(&pTimer
->fSuspended
, false);
1255 if (!pTimer
->fSpecificCpu
)
1256 rtTimerLnxStartSubTimer(&pTimer
->aSubTimers
[0], Args
.u64Now
, Args
.u64First
,
1257 false /*fPinned*/, pTimer
->fHighRes
);
1260 rc2
= RTMpOnSpecific(pTimer
->idCpu
, rtTimerLnxStartOnSpecificCpu
, pTimer
, &Args
);
1261 if (RT_FAILURE(rc2
))
1263 /* Suspend it, the cpu id is probably invalid or offline. */
1264 ASMAtomicWriteBool(&pTimer
->fSuspended
, true);
1265 rtTimerLnxSetState(&pTimer
->aSubTimers
[0].enmState
, RTTIMERLNXSTATE_STOPPED
);
1269 IPRT_LINUX_RESTORE_EFL_AC();
1270 return VINF_SUCCESS
;
1274 case RTTIMERLNXSTATE_CALLBACK
:
1275 case RTTIMERLNXSTATE_CB_STOPPING
:
1276 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[0].enmState
, RTTIMERLNXSTATE_CB_RESTARTING
, enmState
))
1278 ASMAtomicWriteBool(&pTimer
->fSuspended
, false);
1279 IPRT_LINUX_RESTORE_EFL_AC();
1280 return VINF_SUCCESS
;
1285 AssertMsgFailed(("%d\n", enmState
));
1286 IPRT_LINUX_RESTORE_EFL_AC();
1287 return VERR_INTERNAL_ERROR_4
;
1292 RT_EXPORT_SYMBOL(RTTimerStart
);
1296 * Common worker for RTTimerStop and RTTimerDestroy.
1298 * @returns true if there was any active callbacks, false if not.
1299 * @param pTimer The timer to stop.
1300 * @param fForDestroy Whether it's RTTimerDestroy calling or not.
1302 static bool rtTimerLnxStop(PRTTIMER pTimer
, bool fForDestroy
)
1304 RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer
, fForDestroy
));
1309 if (pTimer
->fAllCpus
)
1310 return rtTimerLnxOmniStop(pTimer
, fForDestroy
);
1316 ASMAtomicWriteBool(&pTimer
->fSuspended
, true);
1319 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pTimer
->aSubTimers
[0].enmState
);
1322 case RTTIMERLNXSTATE_ACTIVE
:
1323 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[0].enmState
, RTTIMERLNXSTATE_STOPPING
, RTTIMERLNXSTATE_ACTIVE
))
1325 rtTimerLnxStopSubTimer(&pTimer
->aSubTimers
[0], pTimer
->fHighRes
);
1330 case RTTIMERLNXSTATE_CALLBACK
:
1331 case RTTIMERLNXSTATE_CB_RESTARTING
:
1332 case RTTIMERLNXSTATE_CB_STOPPING
:
1333 Assert(enmState
!= RTTIMERLNXSTATE_CB_STOPPING
|| fForDestroy
);
1334 if (rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[0].enmState
,
1335 !fForDestroy
? RTTIMERLNXSTATE_CB_STOPPING
: RTTIMERLNXSTATE_CB_DESTROYING
,
1340 case RTTIMERLNXSTATE_STOPPED
:
1341 return VINF_SUCCESS
;
1343 case RTTIMERLNXSTATE_CB_DESTROYING
:
1344 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState
, pTimer
));
1348 case RTTIMERLNXSTATE_STARTING
:
1349 case RTTIMERLNXSTATE_MP_STARTING
:
1350 case RTTIMERLNXSTATE_STOPPING
:
1351 case RTTIMERLNXSTATE_MP_STOPPING
:
1352 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState
, pTimer
));
1356 /* State not stable, try again. */
1362 RTDECL(int) RTTimerStop(PRTTIMER pTimer
)
1367 IPRT_LINUX_SAVE_EFL_AC();
1368 AssertPtrReturn(pTimer
, VERR_INVALID_HANDLE
);
1369 AssertReturn(pTimer
->u32Magic
== RTTIMER_MAGIC
, VERR_INVALID_HANDLE
);
1370 RTTIMERLNX_LOG(("stop %p\n", pTimer
));
1372 if (ASMAtomicUoReadBool(&pTimer
->fSuspended
))
1373 return VERR_TIMER_SUSPENDED
;
1375 rtTimerLnxStop(pTimer
, false /*fForDestroy*/);
1377 IPRT_LINUX_RESTORE_EFL_AC();
1378 return VINF_SUCCESS
;
1380 RT_EXPORT_SYMBOL(RTTimerStop
);
1383 RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer
, uint64_t u64NanoInterval
)
1385 unsigned long cJiffies
;
1386 unsigned long flFlags
;
1387 IPRT_LINUX_SAVE_EFL_AC();
1392 AssertPtrReturn(pTimer
, VERR_INVALID_HANDLE
);
1393 AssertReturn(pTimer
->u32Magic
== RTTIMER_MAGIC
, VERR_INVALID_HANDLE
);
1394 AssertReturn(u64NanoInterval
, VERR_INVALID_PARAMETER
);
1395 AssertReturn(u64NanoInterval
< UINT64_MAX
/ 8, VERR_INVALID_PARAMETER
);
1396 AssertReturn(pTimer
->u64NanoInterval
, VERR_INVALID_STATE
);
1397 RTTIMERLNX_LOG(("change %p %llu\n", pTimer
, u64NanoInterval
));
1399 #ifdef RTTIMER_LINUX_WITH_HRTIMER
1401 * For the high resolution timers it is easy since we don't care so much
1402 * about when it is applied to the sub-timers.
1404 if (pTimer
->fHighRes
)
1406 ASMAtomicWriteU64(&pTimer
->u64NanoInterval
, u64NanoInterval
);
1407 IPRT_LINUX_RESTORE_EFL_AC();
1408 return VINF_SUCCESS
;
1413 * Standard timers have a bit more complicated way of calculating
1414 * their interval and such. So, forget omni timers for now.
1416 if (pTimer
->cCpus
> 1)
1417 return VERR_NOT_SUPPORTED
;
1419 cJiffies
= u64NanoInterval
/ RTTimerGetSystemGranularity();
1420 if (cJiffies
* RTTimerGetSystemGranularity() != u64NanoInterval
)
1423 spin_lock_irqsave(&pTimer
->ChgIntLock
, flFlags
);
1424 pTimer
->aSubTimers
[0].u
.Std
.fFirstAfterChg
= true;
1425 pTimer
->cJiffies
= cJiffies
;
1426 ASMAtomicWriteU64(&pTimer
->u64NanoInterval
, u64NanoInterval
);
1427 spin_unlock_irqrestore(&pTimer
->ChgIntLock
, flFlags
);
1428 IPRT_LINUX_RESTORE_EFL_AC();
1429 return VINF_SUCCESS
;
1431 RT_EXPORT_SYMBOL(RTTimerChangeInterval
);
1434 RTDECL(int) RTTimerDestroy(PRTTIMER pTimer
)
1437 IPRT_LINUX_SAVE_EFL_AC();
1440 * Validate. It's ok to pass NULL pointer.
1442 if (pTimer
== /*NIL_RTTIMER*/ NULL
)
1443 return VINF_SUCCESS
;
1444 AssertPtrReturn(pTimer
, VERR_INVALID_HANDLE
);
1445 AssertReturn(pTimer
->u32Magic
== RTTIMER_MAGIC
, VERR_INVALID_HANDLE
);
1446 RTTIMERLNX_LOG(("destroy %p\n", pTimer
));
1447 /** @todo We should invalidate the magic here! */
1450 * Stop the timer if it's still active, then destroy it if we can.
1452 if (!ASMAtomicUoReadBool(&pTimer
->fSuspended
))
1453 fCanDestroy
= rtTimerLnxStop(pTimer
, true /*fForDestroy*/);
1456 uint32_t iCpu
= pTimer
->cCpus
;
1457 if (pTimer
->cCpus
> 1)
1458 RTSpinlockAcquire(pTimer
->hSpinlock
);
1465 RTTIMERLNXSTATE enmState
= rtTimerLnxGetState(&pTimer
->aSubTimers
[iCpu
].enmState
);
1468 case RTTIMERLNXSTATE_CALLBACK
:
1469 case RTTIMERLNXSTATE_CB_RESTARTING
:
1470 case RTTIMERLNXSTATE_CB_STOPPING
:
1471 if (!rtTimerLnxCmpXchgState(&pTimer
->aSubTimers
[iCpu
].enmState
, RTTIMERLNXSTATE_CB_DESTROYING
, enmState
))
1473 fCanDestroy
= false;
1476 case RTTIMERLNXSTATE_CB_DESTROYING
:
1477 AssertMsgFailed(("%d\n", enmState
));
1478 fCanDestroy
= false;
1487 if (pTimer
->cCpus
> 1)
1488 RTSpinlockRelease(pTimer
->hSpinlock
);
1493 /* For paranoid reasons, defer actually destroying the semaphore when
1494 in atomic or interrupt context. */
1495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
1496 if (in_atomic() || in_interrupt())
1500 rtR0LnxWorkqueuePush(&pTimer
->DtorWorkqueueItem
, rtTimerLnxDestroyDeferred
);
1502 rtTimerLnxDestroyIt(pTimer
);
1505 IPRT_LINUX_RESTORE_EFL_AC();
1506 return VINF_SUCCESS
;
1508 RT_EXPORT_SYMBOL(RTTimerDestroy
);
1511 RTDECL(int) RTTimerCreateEx(PRTTIMER
*ppTimer
, uint64_t u64NanoInterval
, uint32_t fFlags
, PFNRTTIMER pfnTimer
, void *pvUser
)
1517 IPRT_LINUX_SAVE_EFL_AC();
1519 rtR0LnxWorkqueueFlush(); /* for 2.4 */
1525 if (!RTTIMER_FLAGS_ARE_VALID(fFlags
))
1527 IPRT_LINUX_RESTORE_EFL_AC();
1528 return VERR_INVALID_PARAMETER
;
1530 if ( (fFlags
& RTTIMER_FLAGS_CPU_SPECIFIC
)
1531 && (fFlags
& RTTIMER_FLAGS_CPU_ALL
) != RTTIMER_FLAGS_CPU_ALL
1532 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags
& RTTIMER_FLAGS_CPU_MASK
)))
1534 IPRT_LINUX_RESTORE_EFL_AC();
1535 return VERR_CPU_NOT_FOUND
;
1539 * Allocate the timer handler.
1543 if ((fFlags
& RTTIMER_FLAGS_CPU_ALL
) == RTTIMER_FLAGS_CPU_ALL
)
1545 cCpus
= RTMpGetMaxCpuId() + 1;
1546 Assert(cCpus
<= RTCPUSET_MAX_CPUS
); /* On linux we have a 1:1 relationship between cpuid and set index. */
1547 AssertReturnStmt(u64NanoInterval
, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED
); /* We don't implement single shot on all cpus, sorry. */
1551 rc
= RTMemAllocEx(RT_OFFSETOF(RTTIMER
, aSubTimers
[cCpus
]), 0,
1552 RTMEMALLOCEX_FLAGS_ZEROED
| RTMEMALLOCEX_FLAGS_ANY_CTX_FREE
, (void **)&pTimer
);
1555 IPRT_LINUX_RESTORE_EFL_AC();
1562 pTimer
->u32Magic
= RTTIMER_MAGIC
;
1563 pTimer
->hSpinlock
= NIL_RTSPINLOCK
;
1564 pTimer
->fSuspended
= true;
1565 pTimer
->fHighRes
= !!(fFlags
& RTTIMER_FLAGS_HIGH_RES
);
1567 pTimer
->fSpecificCpu
= (fFlags
& RTTIMER_FLAGS_CPU_SPECIFIC
) && (fFlags
& RTTIMER_FLAGS_CPU_ALL
) != RTTIMER_FLAGS_CPU_ALL
;
1568 pTimer
->fAllCpus
= (fFlags
& RTTIMER_FLAGS_CPU_ALL
) == RTTIMER_FLAGS_CPU_ALL
;
1569 pTimer
->idCpu
= pTimer
->fSpecificCpu
1570 ? RTMpCpuIdFromSetIndex(fFlags
& RTTIMER_FLAGS_CPU_MASK
)
1573 pTimer
->fSpecificCpu
= !!(fFlags
& RTTIMER_FLAGS_CPU_SPECIFIC
);
1574 pTimer
->idCpu
= RTMpCpuId();
1576 pTimer
->cCpus
= cCpus
;
1577 pTimer
->pfnTimer
= pfnTimer
;
1578 pTimer
->pvUser
= pvUser
;
1579 pTimer
->u64NanoInterval
= u64NanoInterval
;
1580 pTimer
->cJiffies
= u64NanoInterval
/ RTTimerGetSystemGranularity();
1581 if (pTimer
->cJiffies
* RTTimerGetSystemGranularity() != u64NanoInterval
)
1582 pTimer
->cJiffies
= 0;
1583 spin_lock_init(&pTimer
->ChgIntLock
);
1585 for (iCpu
= 0; iCpu
< cCpus
; iCpu
++)
1587 #ifdef RTTIMER_LINUX_WITH_HRTIMER
1588 if (pTimer
->fHighRes
)
1590 hrtimer_init(&pTimer
->aSubTimers
[iCpu
].u
.Hr
.LnxTimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
1591 pTimer
->aSubTimers
[iCpu
].u
.Hr
.LnxTimer
.function
= rtTimerLinuxHrCallback
;
1596 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1597 timer_setup(&pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
,rtTimerLinuxStdCallback
, TIMER_PINNED
);
1598 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
1599 init_timer_pinned(&pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
);
1601 init_timer(&pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
);
1603 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
1604 pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
.data
= (unsigned long)&pTimer
->aSubTimers
[iCpu
];
1605 pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
.function
= rtTimerLinuxStdCallback
;
1607 pTimer
->aSubTimers
[iCpu
].u
.Std
.LnxTimer
.expires
= jiffies
;
1608 pTimer
->aSubTimers
[iCpu
].u
.Std
.u64NextTS
= 0;
1610 pTimer
->aSubTimers
[iCpu
].iTick
= 0;
1611 pTimer
->aSubTimers
[iCpu
].pParent
= pTimer
;
1612 pTimer
->aSubTimers
[iCpu
].enmState
= RTTIMERLNXSTATE_STOPPED
;
1617 * If this is running on ALL cpus, we'll have to register a callback
1618 * for MP events (so timers can be started/stopped on cpus going
1619 * online/offline). We also create the spinlock for synchronizing
1620 * stop/start/mp-event.
1624 int rc
= RTSpinlockCreate(&pTimer
->hSpinlock
, RTSPINLOCK_FLAGS_INTERRUPT_SAFE
, "RTTimerLnx");
1626 rc
= RTMpNotificationRegister(rtTimerLinuxMpEvent
, pTimer
);
1628 pTimer
->hSpinlock
= NIL_RTSPINLOCK
;
1631 RTTimerDestroy(pTimer
);
1632 IPRT_LINUX_RESTORE_EFL_AC();
1636 #endif /* CONFIG_SMP */
1638 RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer
, pTimer
->fHighRes
, fFlags
, cCpus
));
1640 IPRT_LINUX_RESTORE_EFL_AC();
1641 return VINF_SUCCESS
;
1643 RT_EXPORT_SYMBOL(RTTimerCreateEx
);
1646 RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
1648 #if 0 /** @todo Not sure if this is what we want or not... Add new API for
1649 * querying the resolution of the high res timers? */
1652 IPRT_LINUX_SAVE_EFL_AC();
1653 rc
= hrtimer_get_res(CLOCK_MONOTONIC
, &Ts
);
1654 IPRT_LINUX_RESTORE_EFL_AC();
1661 return RT_NS_1SEC
/ HZ
; /* ns */
1663 RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity
);
1666 RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request
, uint32_t *pu32Granted
)
1668 RT_NOREF_PV(u32Request
); RT_NOREF_PV(*pu32Granted
);
1669 return VERR_NOT_SUPPORTED
;
1671 RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity
);
1674 RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted
)
1676 RT_NOREF_PV(u32Granted
);
1677 return VERR_NOT_SUPPORTED
;
1679 RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity
);
1682 RTDECL(bool) RTTimerCanDoHighResolution(void)
1684 #ifdef RTTIMER_LINUX_WITH_HRTIMER
1690 RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution
);