1 /* $Id: mp-r0drv-linux.c $ */
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
7 * Copyright (C) 2008-2016 Oracle Corporation
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
28 /*********************************************************************************************************************************
30 *********************************************************************************************************************************/
31 #include "the-linux-kernel.h"
32 #include "internal/iprt.h"
35 #include <iprt/cpuset.h>
38 #include <iprt/thread.h>
39 #include "r0drv/mp-r0drv.h"
42 RTDECL(RTCPUID
) RTMpCpuId(void)
44 return smp_processor_id();
46 RT_EXPORT_SYMBOL(RTMpCpuId
);
49 RTDECL(int) RTMpCurSetIndex(void)
51 return smp_processor_id();
53 RT_EXPORT_SYMBOL(RTMpCurSetIndex
);
56 RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu
)
58 return *pidCpu
= smp_processor_id();
60 RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId
);
63 RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu
)
65 return idCpu
< RTCPUSET_MAX_CPUS
&& idCpu
< NR_CPUS
? (int)idCpu
: -1;
67 RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex
);
70 RTDECL(RTCPUID
) RTMpCpuIdFromSetIndex(int iCpu
)
72 return iCpu
< NR_CPUS
? (RTCPUID
)iCpu
: NIL_RTCPUID
;
74 RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex
);
77 RTDECL(RTCPUID
) RTMpGetMaxCpuId(void)
79 return NR_CPUS
- 1; //???
81 RT_EXPORT_SYMBOL(RTMpGetMaxCpuId
);
84 RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu
)
86 #if defined(CONFIG_SMP)
87 if (RT_UNLIKELY(idCpu
>= NR_CPUS
))
90 # if defined(cpu_possible)
91 return cpu_possible(idCpu
);
93 return idCpu
< (RTCPUID
)smp_num_cpus
;
96 return idCpu
== RTMpCpuId();
99 RT_EXPORT_SYMBOL(RTMpIsCpuPossible
);
102 RTDECL(PRTCPUSET
) RTMpGetSet(PRTCPUSET pSet
)
107 idCpu
= RTMpGetMaxCpuId();
110 if (RTMpIsCpuPossible(idCpu
))
111 RTCpuSetAdd(pSet
, idCpu
);
112 } while (idCpu
-- > 0);
115 RT_EXPORT_SYMBOL(RTMpGetSet
);
118 RTDECL(RTCPUID
) RTMpGetCount(void)
121 # if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
122 return num_present_cpus();
123 # elif defined(num_possible_cpus)
124 return num_possible_cpus();
125 # elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
130 return RTCpuSetCount(&Set
);
136 RT_EXPORT_SYMBOL(RTMpGetCount
);
139 RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu
)
142 if (RT_UNLIKELY(idCpu
>= NR_CPUS
))
145 return cpu_online(idCpu
);
147 return cpu_online_map
& RT_BIT_64(idCpu
);
150 return idCpu
== RTMpCpuId();
153 RT_EXPORT_SYMBOL(RTMpIsCpuOnline
);
156 RTDECL(PRTCPUSET
) RTMpGetOnlineSet(PRTCPUSET pSet
)
162 idCpu
= RTMpGetMaxCpuId();
165 if (RTMpIsCpuOnline(idCpu
))
166 RTCpuSetAdd(pSet
, idCpu
);
167 } while (idCpu
-- > 0);
170 RTCpuSetAdd(pSet
, RTMpCpuId());
174 RT_EXPORT_SYMBOL(RTMpGetOnlineSet
);
177 RTDECL(RTCPUID
) RTMpGetOnlineCount(void)
180 # if defined(num_online_cpus)
181 return num_online_cpus();
184 RTMpGetOnlineSet(&Set
);
185 return RTCpuSetCount(&Set
);
191 RT_EXPORT_SYMBOL(RTMpGetOnlineCount
);
194 RTDECL(bool) RTMpIsCpuWorkPending(void)
196 /** @todo (not used on non-Windows platforms yet). */
199 RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending
);
203 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
205 * @param pvInfo Pointer to the RTMPARGS package.
207 static void rtmpLinuxWrapper(void *pvInfo
)
209 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
210 ASMAtomicIncU32(&pArgs
->cHits
);
211 pArgs
->pfnWorker(RTMpCpuId(), pArgs
->pvUser1
, pArgs
->pvUser2
);
216 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
217 * increment after calling the worker.
219 * @param pvInfo Pointer to the RTMPARGS package.
221 static void rtmpLinuxWrapperPostInc(void *pvInfo
)
223 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
224 pArgs
->pfnWorker(RTMpCpuId(), pArgs
->pvUser1
, pArgs
->pvUser2
);
225 ASMAtomicIncU32(&pArgs
->cHits
);
230 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
232 * @param pvInfo Pointer to the RTMPARGS package.
234 static void rtmpLinuxAllWrapper(void *pvInfo
)
236 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
237 PRTCPUSET pWorkerSet
= pArgs
->pWorkerSet
;
238 RTCPUID idCpu
= RTMpCpuId();
239 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD
));
241 if (RTCpuSetIsMember(pWorkerSet
, idCpu
))
243 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
244 RTCpuSetDel(pWorkerSet
, idCpu
);
249 RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
251 IPRT_LINUX_SAVE_EFL_AC();
258 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
260 Args
.pfnWorker
= pfnWorker
;
261 Args
.pvUser1
= pvUser1
;
262 Args
.pvUser2
= pvUser2
;
263 Args
.idCpu
= NIL_RTCPUID
;
266 RTThreadPreemptDisable(&PreemptState
);
267 RTMpGetOnlineSet(&OnlineSet
);
268 Args
.pWorkerSet
= &OnlineSet
;
271 if (RTCpuSetCount(&OnlineSet
) > 1)
273 /* Fire the function on all other CPUs without waiting for completion. */
274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
275 rc
= smp_call_function(rtmpLinuxAllWrapper
, &Args
, 0 /* wait */);
277 rc
= smp_call_function(rtmpLinuxAllWrapper
, &Args
, 0 /* retry */, 0 /* wait */);
279 Assert(!rc
); NOREF(rc
);
282 /* Fire the function on this CPU. */
283 Args
.pfnWorker(idCpu
, Args
.pvUser1
, Args
.pvUser2
);
284 RTCpuSetDel(Args
.pWorkerSet
, idCpu
);
286 /* Wait for all of them finish. */
288 while (!RTCpuSetIsEmpty(Args
.pWorkerSet
))
290 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
293 RTCPUSET OnlineSetNow
;
294 RTMpGetOnlineSet(&OnlineSetNow
);
295 RTCpuSetAnd(Args
.pWorkerSet
, &OnlineSetNow
);
303 RTThreadPreemptRestore(&PreemptState
);
304 IPRT_LINUX_RESTORE_EFL_AC();
307 RT_EXPORT_SYMBOL(RTMpOnAll
);
310 RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
312 IPRT_LINUX_SAVE_EFL_AC();
316 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
317 Args
.pfnWorker
= pfnWorker
;
318 Args
.pvUser1
= pvUser1
;
319 Args
.pvUser2
= pvUser2
;
320 Args
.idCpu
= NIL_RTCPUID
;
323 RTThreadPreemptDisable(&PreemptState
);
324 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
325 rc
= smp_call_function(rtmpLinuxWrapper
, &Args
, 1 /* wait */);
326 #else /* older kernels */
327 rc
= smp_call_function(rtmpLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
328 #endif /* older kernels */
329 RTThreadPreemptRestore(&PreemptState
);
331 Assert(rc
== 0); NOREF(rc
);
332 IPRT_LINUX_RESTORE_EFL_AC();
335 RT_EXPORT_SYMBOL(RTMpOnOthers
);
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
340 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
341 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
343 * @param pvInfo Pointer to the RTMPARGS package.
345 static void rtMpLinuxOnPairWrapper(void *pvInfo
)
347 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
348 RTCPUID idCpu
= RTMpCpuId();
350 if ( idCpu
== pArgs
->idCpu
351 || idCpu
== pArgs
->idCpu2
)
353 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
354 ASMAtomicIncU32(&pArgs
->cHits
);
360 RTDECL(int) RTMpOnPair(RTCPUID idCpu1
, RTCPUID idCpu2
, uint32_t fFlags
, PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
362 IPRT_LINUX_SAVE_EFL_AC();
364 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
366 AssertReturn(idCpu1
!= idCpu2
, VERR_INVALID_PARAMETER
);
367 AssertReturn(!(fFlags
& RTMPON_F_VALID_MASK
), VERR_INVALID_FLAGS
);
370 * Check that both CPUs are online before doing the broadcast call.
372 RTThreadPreemptDisable(&PreemptState
);
373 if ( RTMpIsCpuOnline(idCpu1
)
374 && RTMpIsCpuOnline(idCpu2
))
377 * Use the smp_call_function variant taking a cpu mask where available,
378 * falling back on broadcast with filter. Slight snag if one of the
379 * CPUs is the one we're running on, we must do the call and the post
380 * call wait ourselves.
382 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
383 cpumask_t DstCpuMask
;
385 RTCPUID idCpuSelf
= RTMpCpuId();
386 bool const fCallSelf
= idCpuSelf
== idCpu1
|| idCpuSelf
== idCpu2
;
388 Args
.pfnWorker
= pfnWorker
;
389 Args
.pvUser1
= pvUser1
;
390 Args
.pvUser2
= pvUser2
;
392 Args
.idCpu2
= idCpu2
;
395 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
396 cpumask_clear(&DstCpuMask
);
397 cpumask_set_cpu(idCpu1
, &DstCpuMask
);
398 cpumask_set_cpu(idCpu2
, &DstCpuMask
);
399 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
400 cpus_clear(DstCpuMask
);
401 cpu_set(idCpu1
, DstCpuMask
);
402 cpu_set(idCpu2
, DstCpuMask
);
405 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
406 smp_call_function_many(&DstCpuMask
, rtmpLinuxWrapperPostInc
, &Args
, !fCallSelf
/* wait */);
408 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
409 rc
= smp_call_function_many(&DstCpuMask
, rtmpLinuxWrapperPostInc
, &Args
, !fCallSelf
/* wait */);
410 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
411 rc
= smp_call_function_mask(DstCpuMask
, rtmpLinuxWrapperPostInc
, &Args
, !fCallSelf
/* wait */);
412 #else /* older kernels */
413 rc
= smp_call_function(rtMpLinuxOnPairWrapper
, &Args
, 0 /* retry */, !fCallSelf
/* wait */);
414 #endif /* older kernels */
417 /* Call ourselves if necessary and wait for the other party to be done. */
421 rtmpLinuxWrapper(&Args
);
422 while (ASMAtomicReadU32(&Args
.cHits
) < 2)
424 if ((cLoops
& 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf
== idCpu1
? idCpu2
: idCpu1
))
431 Assert(Args
.cHits
<= 2);
434 else if (Args
.cHits
== 1)
435 rc
= VERR_NOT_ALL_CPUS_SHOWED
;
436 else if (Args
.cHits
== 0)
437 rc
= VERR_CPU_OFFLINE
;
442 * A CPU must be present to be considered just offline.
444 else if ( RTMpIsCpuPresent(idCpu1
)
445 && RTMpIsCpuPresent(idCpu2
))
446 rc
= VERR_CPU_OFFLINE
;
448 rc
= VERR_CPU_NOT_FOUND
;
449 RTThreadPreemptRestore(&PreemptState
);;
450 IPRT_LINUX_RESTORE_EFL_AC();
453 RT_EXPORT_SYMBOL(RTMpOnPair
);
456 RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
460 RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported
);
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
465 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
466 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
468 * @param pvInfo Pointer to the RTMPARGS package.
470 static void rtmpOnSpecificLinuxWrapper(void *pvInfo
)
472 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
473 RTCPUID idCpu
= RTMpCpuId();
475 if (idCpu
== pArgs
->idCpu
)
477 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
478 ASMAtomicIncU32(&pArgs
->cHits
);
484 RTDECL(int) RTMpOnSpecific(RTCPUID idCpu
, PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
486 IPRT_LINUX_SAVE_EFL_AC();
490 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
491 Args
.pfnWorker
= pfnWorker
;
492 Args
.pvUser1
= pvUser1
;
493 Args
.pvUser2
= pvUser2
;
497 if (!RTMpIsCpuPossible(idCpu
))
498 return VERR_CPU_NOT_FOUND
;
500 RTThreadPreemptDisable(&PreemptState
);
501 if (idCpu
!= RTMpCpuId())
503 if (RTMpIsCpuOnline(idCpu
))
505 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
506 rc
= smp_call_function_single(idCpu
, rtmpLinuxWrapper
, &Args
, 1 /* wait */);
507 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
508 rc
= smp_call_function_single(idCpu
, rtmpLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
509 #else /* older kernels */
510 rc
= smp_call_function(rtmpOnSpecificLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
511 #endif /* older kernels */
513 rc
= Args
.cHits
? VINF_SUCCESS
: VERR_CPU_OFFLINE
;
516 rc
= VERR_CPU_OFFLINE
;
520 rtmpLinuxWrapper(&Args
);
523 RTThreadPreemptRestore(&PreemptState
);;
526 IPRT_LINUX_RESTORE_EFL_AC();
529 RT_EXPORT_SYMBOL(RTMpOnSpecific
);
532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
534 * Dummy callback used by RTMpPokeCpu.
536 * @param pvInfo Ignored.
538 static void rtmpLinuxPokeCpuCallback(void *pvInfo
)
545 RTDECL(int) RTMpPokeCpu(RTCPUID idCpu
)
547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
549 IPRT_LINUX_SAVE_EFL_AC();
551 if (!RTMpIsCpuPossible(idCpu
))
552 return VERR_CPU_NOT_FOUND
;
553 if (!RTMpIsCpuOnline(idCpu
))
554 return VERR_CPU_OFFLINE
;
556 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
557 rc
= smp_call_function_single(idCpu
, rtmpLinuxPokeCpuCallback
, NULL
, 0 /* wait */);
558 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
559 rc
= smp_call_function_single(idCpu
, rtmpLinuxPokeCpuCallback
, NULL
, 0 /* retry */, 0 /* wait */);
560 # else /* older kernels */
562 # endif /* older kernels */
565 IPRT_LINUX_RESTORE_EFL_AC();
568 #else /* older kernels */
569 /* no unicast here? */
570 return VERR_NOT_SUPPORTED
;
571 #endif /* older kernels */
573 RT_EXPORT_SYMBOL(RTMpPokeCpu
);
576 RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
580 RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe
);