1 /* $Id: mp-r0drv-linux.c $ */
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
7 * Copyright (C) 2008-2016 Oracle Corporation
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
28 /*********************************************************************************************************************************
30 *********************************************************************************************************************************/
31 #include "the-linux-kernel.h"
32 #include "internal/iprt.h"
35 #include <iprt/cpuset.h>
38 #include <iprt/thread.h>
39 #include "r0drv/mp-r0drv.h"
41 #ifdef nr_cpumask_bits
42 # define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
44 # define VBOX_NR_CPUMASK_BITS NR_CPUS
47 RTDECL(RTCPUID
) RTMpCpuId(void)
49 return smp_processor_id();
51 RT_EXPORT_SYMBOL(RTMpCpuId
);
54 RTDECL(int) RTMpCurSetIndex(void)
56 return smp_processor_id();
58 RT_EXPORT_SYMBOL(RTMpCurSetIndex
);
61 RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu
)
63 return *pidCpu
= smp_processor_id();
65 RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId
);
68 RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu
)
70 return idCpu
< RTCPUSET_MAX_CPUS
&& idCpu
< VBOX_NR_CPUMASK_BITS
? (int)idCpu
: -1;
72 RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex
);
75 RTDECL(RTCPUID
) RTMpCpuIdFromSetIndex(int iCpu
)
77 return iCpu
< VBOX_NR_CPUMASK_BITS
? (RTCPUID
)iCpu
: NIL_RTCPUID
;
79 RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex
);
82 RTDECL(RTCPUID
) RTMpGetMaxCpuId(void)
84 return VBOX_NR_CPUMASK_BITS
- 1; //???
86 RT_EXPORT_SYMBOL(RTMpGetMaxCpuId
);
89 RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu
)
91 #if defined(CONFIG_SMP)
92 if (RT_UNLIKELY(idCpu
>= VBOX_NR_CPUMASK_BITS
))
95 # if defined(cpu_possible)
96 return cpu_possible(idCpu
);
98 return idCpu
< (RTCPUID
)smp_num_cpus
;
101 return idCpu
== RTMpCpuId();
104 RT_EXPORT_SYMBOL(RTMpIsCpuPossible
);
107 RTDECL(PRTCPUSET
) RTMpGetSet(PRTCPUSET pSet
)
112 idCpu
= RTMpGetMaxCpuId();
115 if (RTMpIsCpuPossible(idCpu
))
116 RTCpuSetAdd(pSet
, idCpu
);
117 } while (idCpu
-- > 0);
120 RT_EXPORT_SYMBOL(RTMpGetSet
);
123 RTDECL(RTCPUID
) RTMpGetCount(void)
126 # if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
127 return num_present_cpus();
128 # elif defined(num_possible_cpus)
129 return num_possible_cpus();
130 # elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
135 return RTCpuSetCount(&Set
);
141 RT_EXPORT_SYMBOL(RTMpGetCount
);
144 RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu
)
147 if (RT_UNLIKELY(idCpu
>= VBOX_NR_CPUMASK_BITS
))
150 return cpu_online(idCpu
);
152 return cpu_online_map
& RT_BIT_64(idCpu
);
155 return idCpu
== RTMpCpuId();
158 RT_EXPORT_SYMBOL(RTMpIsCpuOnline
);
161 RTDECL(PRTCPUSET
) RTMpGetOnlineSet(PRTCPUSET pSet
)
167 idCpu
= RTMpGetMaxCpuId();
170 if (RTMpIsCpuOnline(idCpu
))
171 RTCpuSetAdd(pSet
, idCpu
);
172 } while (idCpu
-- > 0);
175 RTCpuSetAdd(pSet
, RTMpCpuId());
179 RT_EXPORT_SYMBOL(RTMpGetOnlineSet
);
182 RTDECL(RTCPUID
) RTMpGetOnlineCount(void)
185 # if defined(num_online_cpus)
186 return num_online_cpus();
189 RTMpGetOnlineSet(&Set
);
190 return RTCpuSetCount(&Set
);
196 RT_EXPORT_SYMBOL(RTMpGetOnlineCount
);
199 RTDECL(bool) RTMpIsCpuWorkPending(void)
201 /** @todo (not used on non-Windows platforms yet). */
204 RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending
);
208 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
210 * @param pvInfo Pointer to the RTMPARGS package.
212 static void rtmpLinuxWrapper(void *pvInfo
)
214 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
215 ASMAtomicIncU32(&pArgs
->cHits
);
216 pArgs
->pfnWorker(RTMpCpuId(), pArgs
->pvUser1
, pArgs
->pvUser2
);
221 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
222 * increment after calling the worker.
224 * @param pvInfo Pointer to the RTMPARGS package.
226 static void rtmpLinuxWrapperPostInc(void *pvInfo
)
228 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
229 pArgs
->pfnWorker(RTMpCpuId(), pArgs
->pvUser1
, pArgs
->pvUser2
);
230 ASMAtomicIncU32(&pArgs
->cHits
);
235 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
237 * @param pvInfo Pointer to the RTMPARGS package.
239 static void rtmpLinuxAllWrapper(void *pvInfo
)
241 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
242 PRTCPUSET pWorkerSet
= pArgs
->pWorkerSet
;
243 RTCPUID idCpu
= RTMpCpuId();
244 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD
));
246 if (RTCpuSetIsMember(pWorkerSet
, idCpu
))
248 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
249 RTCpuSetDel(pWorkerSet
, idCpu
);
254 RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
256 IPRT_LINUX_SAVE_EFL_AC();
263 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
265 Args
.pfnWorker
= pfnWorker
;
266 Args
.pvUser1
= pvUser1
;
267 Args
.pvUser2
= pvUser2
;
268 Args
.idCpu
= NIL_RTCPUID
;
271 RTThreadPreemptDisable(&PreemptState
);
272 RTMpGetOnlineSet(&OnlineSet
);
273 Args
.pWorkerSet
= &OnlineSet
;
276 if (RTCpuSetCount(&OnlineSet
) > 1)
278 /* Fire the function on all other CPUs without waiting for completion. */
279 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
280 rc
= smp_call_function(rtmpLinuxAllWrapper
, &Args
, 0 /* wait */);
282 rc
= smp_call_function(rtmpLinuxAllWrapper
, &Args
, 0 /* retry */, 0 /* wait */);
284 Assert(!rc
); NOREF(rc
);
287 /* Fire the function on this CPU. */
288 Args
.pfnWorker(idCpu
, Args
.pvUser1
, Args
.pvUser2
);
289 RTCpuSetDel(Args
.pWorkerSet
, idCpu
);
291 /* Wait for all of them finish. */
293 while (!RTCpuSetIsEmpty(Args
.pWorkerSet
))
295 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
298 RTCPUSET OnlineSetNow
;
299 RTMpGetOnlineSet(&OnlineSetNow
);
300 RTCpuSetAnd(Args
.pWorkerSet
, &OnlineSetNow
);
308 RTThreadPreemptRestore(&PreemptState
);
309 IPRT_LINUX_RESTORE_EFL_AC();
312 RT_EXPORT_SYMBOL(RTMpOnAll
);
315 RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
317 IPRT_LINUX_SAVE_EFL_AC();
321 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
322 Args
.pfnWorker
= pfnWorker
;
323 Args
.pvUser1
= pvUser1
;
324 Args
.pvUser2
= pvUser2
;
325 Args
.idCpu
= NIL_RTCPUID
;
328 RTThreadPreemptDisable(&PreemptState
);
329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
330 rc
= smp_call_function(rtmpLinuxWrapper
, &Args
, 1 /* wait */);
331 #else /* older kernels */
332 rc
= smp_call_function(rtmpLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
333 #endif /* older kernels */
334 RTThreadPreemptRestore(&PreemptState
);
336 Assert(rc
== 0); NOREF(rc
);
337 IPRT_LINUX_RESTORE_EFL_AC();
340 RT_EXPORT_SYMBOL(RTMpOnOthers
);
343 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
345 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
346 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
348 * @param pvInfo Pointer to the RTMPARGS package.
350 static void rtMpLinuxOnPairWrapper(void *pvInfo
)
352 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
353 RTCPUID idCpu
= RTMpCpuId();
355 if ( idCpu
== pArgs
->idCpu
356 || idCpu
== pArgs
->idCpu2
)
358 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
359 ASMAtomicIncU32(&pArgs
->cHits
);
365 RTDECL(int) RTMpOnPair(RTCPUID idCpu1
, RTCPUID idCpu2
, uint32_t fFlags
, PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
367 IPRT_LINUX_SAVE_EFL_AC();
369 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
371 AssertReturn(idCpu1
!= idCpu2
, VERR_INVALID_PARAMETER
);
372 AssertReturn(!(fFlags
& RTMPON_F_VALID_MASK
), VERR_INVALID_FLAGS
);
375 * Check that both CPUs are online before doing the broadcast call.
377 RTThreadPreemptDisable(&PreemptState
);
378 if ( RTMpIsCpuOnline(idCpu1
)
379 && RTMpIsCpuOnline(idCpu2
))
382 * Use the smp_call_function variant taking a cpu mask where available,
383 * falling back on broadcast with filter. Slight snag if one of the
384 * CPUs is the one we're running on, we must do the call and the post
385 * call wait ourselves.
387 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
388 cpumask_t DstCpuMask
;
390 RTCPUID idCpuSelf
= RTMpCpuId();
391 bool const fCallSelf
= idCpuSelf
== idCpu1
|| idCpuSelf
== idCpu2
;
393 Args
.pfnWorker
= pfnWorker
;
394 Args
.pvUser1
= pvUser1
;
395 Args
.pvUser2
= pvUser2
;
397 Args
.idCpu2
= idCpu2
;
400 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
401 cpumask_clear(&DstCpuMask
);
402 cpumask_set_cpu(idCpu1
, &DstCpuMask
);
403 cpumask_set_cpu(idCpu2
, &DstCpuMask
);
404 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
405 cpus_clear(DstCpuMask
);
406 cpu_set(idCpu1
, DstCpuMask
);
407 cpu_set(idCpu2
, DstCpuMask
);
410 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
411 smp_call_function_many(&DstCpuMask
, rtmpLinuxWrapperPostInc
, &Args
, !fCallSelf
/* wait */);
413 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
414 rc
= smp_call_function_mask(DstCpuMask
, rtmpLinuxWrapperPostInc
, &Args
, !fCallSelf
/* wait */);
415 #else /* older kernels */
416 rc
= smp_call_function(rtMpLinuxOnPairWrapper
, &Args
, 0 /* retry */, !fCallSelf
/* wait */);
417 #endif /* older kernels */
420 /* Call ourselves if necessary and wait for the other party to be done. */
424 rtmpLinuxWrapper(&Args
);
425 while (ASMAtomicReadU32(&Args
.cHits
) < 2)
427 if ((cLoops
& 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf
== idCpu1
? idCpu2
: idCpu1
))
434 Assert(Args
.cHits
<= 2);
437 else if (Args
.cHits
== 1)
438 rc
= VERR_NOT_ALL_CPUS_SHOWED
;
439 else if (Args
.cHits
== 0)
440 rc
= VERR_CPU_OFFLINE
;
445 * A CPU must be present to be considered just offline.
447 else if ( RTMpIsCpuPresent(idCpu1
)
448 && RTMpIsCpuPresent(idCpu2
))
449 rc
= VERR_CPU_OFFLINE
;
451 rc
= VERR_CPU_NOT_FOUND
;
452 RTThreadPreemptRestore(&PreemptState
);;
453 IPRT_LINUX_RESTORE_EFL_AC();
456 RT_EXPORT_SYMBOL(RTMpOnPair
);
459 RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
463 RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported
);
466 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
468 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
469 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
471 * @param pvInfo Pointer to the RTMPARGS package.
473 static void rtmpOnSpecificLinuxWrapper(void *pvInfo
)
475 PRTMPARGS pArgs
= (PRTMPARGS
)pvInfo
;
476 RTCPUID idCpu
= RTMpCpuId();
478 if (idCpu
== pArgs
->idCpu
)
480 pArgs
->pfnWorker(idCpu
, pArgs
->pvUser1
, pArgs
->pvUser2
);
481 ASMAtomicIncU32(&pArgs
->cHits
);
487 RTDECL(int) RTMpOnSpecific(RTCPUID idCpu
, PFNRTMPWORKER pfnWorker
, void *pvUser1
, void *pvUser2
)
489 IPRT_LINUX_SAVE_EFL_AC();
493 RTTHREADPREEMPTSTATE PreemptState
= RTTHREADPREEMPTSTATE_INITIALIZER
;
494 Args
.pfnWorker
= pfnWorker
;
495 Args
.pvUser1
= pvUser1
;
496 Args
.pvUser2
= pvUser2
;
500 if (!RTMpIsCpuPossible(idCpu
))
501 return VERR_CPU_NOT_FOUND
;
503 RTThreadPreemptDisable(&PreemptState
);
504 if (idCpu
!= RTMpCpuId())
506 if (RTMpIsCpuOnline(idCpu
))
508 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
509 rc
= smp_call_function_single(idCpu
, rtmpLinuxWrapper
, &Args
, 1 /* wait */);
510 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
511 rc
= smp_call_function_single(idCpu
, rtmpLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
512 #else /* older kernels */
513 rc
= smp_call_function(rtmpOnSpecificLinuxWrapper
, &Args
, 0 /* retry */, 1 /* wait */);
514 #endif /* older kernels */
516 rc
= Args
.cHits
? VINF_SUCCESS
: VERR_CPU_OFFLINE
;
519 rc
= VERR_CPU_OFFLINE
;
523 rtmpLinuxWrapper(&Args
);
526 RTThreadPreemptRestore(&PreemptState
);;
529 IPRT_LINUX_RESTORE_EFL_AC();
532 RT_EXPORT_SYMBOL(RTMpOnSpecific
);
535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
537 * Dummy callback used by RTMpPokeCpu.
539 * @param pvInfo Ignored.
541 static void rtmpLinuxPokeCpuCallback(void *pvInfo
)
548 RTDECL(int) RTMpPokeCpu(RTCPUID idCpu
)
550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
552 IPRT_LINUX_SAVE_EFL_AC();
554 if (!RTMpIsCpuPossible(idCpu
))
555 return VERR_CPU_NOT_FOUND
;
556 if (!RTMpIsCpuOnline(idCpu
))
557 return VERR_CPU_OFFLINE
;
559 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
560 rc
= smp_call_function_single(idCpu
, rtmpLinuxPokeCpuCallback
, NULL
, 0 /* wait */);
561 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
562 rc
= smp_call_function_single(idCpu
, rtmpLinuxPokeCpuCallback
, NULL
, 0 /* retry */, 0 /* wait */);
563 # else /* older kernels */
565 # endif /* older kernels */
568 IPRT_LINUX_RESTORE_EFL_AC();
571 #else /* older kernels */
572 /* no unicast here? */
573 return VERR_NOT_SUPPORTED
;
574 #endif /* older kernels */
576 RT_EXPORT_SYMBOL(RTMpPokeCpu
);
579 RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
583 RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe
);