]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - ubuntu/vbox/r0drv/linux/mp-r0drv-linux.c
UBUNTU: ubuntu: vbox -- Update to 5.1.14-dfsg-1
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / r0drv / linux / mp-r0drv-linux.c
CommitLineData
39adb5c3
TG
1/* $Id: mp-r0drv-linux.c $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
5410645a
SF
41#ifdef nr_cpumask_bits
42# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
43#else
44# define VBOX_NR_CPUMASK_BITS NR_CPUS
45#endif
39adb5c3
TG
46
47RTDECL(RTCPUID) RTMpCpuId(void)
48{
49 return smp_processor_id();
50}
51RT_EXPORT_SYMBOL(RTMpCpuId);
52
53
54RTDECL(int) RTMpCurSetIndex(void)
55{
56 return smp_processor_id();
57}
58RT_EXPORT_SYMBOL(RTMpCurSetIndex);
59
60
61RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
62{
63 return *pidCpu = smp_processor_id();
64}
65RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
66
67
68RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
69{
5410645a 70 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
39adb5c3
TG
71}
72RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
73
74
75RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
76{
5410645a 77 return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
39adb5c3
TG
78}
79RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
80
81
82RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
83{
5410645a 84 return VBOX_NR_CPUMASK_BITS - 1; //???
39adb5c3
TG
85}
86RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
87
88
89RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
90{
91#if defined(CONFIG_SMP)
5410645a 92 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
39adb5c3
TG
93 return false;
94
95# if defined(cpu_possible)
96 return cpu_possible(idCpu);
97# else /* < 2.5.29 */
98 return idCpu < (RTCPUID)smp_num_cpus;
99# endif
100#else
101 return idCpu == RTMpCpuId();
102#endif
103}
104RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
105
106
107RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
108{
109 RTCPUID idCpu;
110
111 RTCpuSetEmpty(pSet);
112 idCpu = RTMpGetMaxCpuId();
113 do
114 {
115 if (RTMpIsCpuPossible(idCpu))
116 RTCpuSetAdd(pSet, idCpu);
117 } while (idCpu-- > 0);
118 return pSet;
119}
120RT_EXPORT_SYMBOL(RTMpGetSet);
121
122
123RTDECL(RTCPUID) RTMpGetCount(void)
124{
125#ifdef CONFIG_SMP
126# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
127 return num_present_cpus();
128# elif defined(num_possible_cpus)
129 return num_possible_cpus();
130# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
131 return smp_num_cpus;
132# else
133 RTCPUSET Set;
134 RTMpGetSet(&Set);
135 return RTCpuSetCount(&Set);
136# endif
137#else
138 return 1;
139#endif
140}
141RT_EXPORT_SYMBOL(RTMpGetCount);
142
143
144RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
145{
146#ifdef CONFIG_SMP
5410645a 147 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
39adb5c3
TG
148 return false;
149# ifdef cpu_online
150 return cpu_online(idCpu);
151# else /* 2.4: */
152 return cpu_online_map & RT_BIT_64(idCpu);
153# endif
154#else
155 return idCpu == RTMpCpuId();
156#endif
157}
158RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
159
160
161RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
162{
163#ifdef CONFIG_SMP
164 RTCPUID idCpu;
165
166 RTCpuSetEmpty(pSet);
167 idCpu = RTMpGetMaxCpuId();
168 do
169 {
170 if (RTMpIsCpuOnline(idCpu))
171 RTCpuSetAdd(pSet, idCpu);
172 } while (idCpu-- > 0);
173#else
174 RTCpuSetEmpty(pSet);
175 RTCpuSetAdd(pSet, RTMpCpuId());
176#endif
177 return pSet;
178}
179RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
180
181
182RTDECL(RTCPUID) RTMpGetOnlineCount(void)
183{
184#ifdef CONFIG_SMP
185# if defined(num_online_cpus)
186 return num_online_cpus();
187# else
188 RTCPUSET Set;
189 RTMpGetOnlineSet(&Set);
190 return RTCpuSetCount(&Set);
191# endif
192#else
193 return 1;
194#endif
195}
196RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
197
198
199RTDECL(bool) RTMpIsCpuWorkPending(void)
200{
201 /** @todo (not used on non-Windows platforms yet). */
202 return false;
203}
204RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
205
206
207/**
208 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
209 *
210 * @param pvInfo Pointer to the RTMPARGS package.
211 */
212static void rtmpLinuxWrapper(void *pvInfo)
213{
214 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
215 ASMAtomicIncU32(&pArgs->cHits);
216 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
217}
218
219
220/**
221 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
222 * increment after calling the worker.
223 *
224 * @param pvInfo Pointer to the RTMPARGS package.
225 */
226static void rtmpLinuxWrapperPostInc(void *pvInfo)
227{
228 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
229 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
230 ASMAtomicIncU32(&pArgs->cHits);
231}
232
233
234/**
235 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
236 *
237 * @param pvInfo Pointer to the RTMPARGS package.
238 */
239static void rtmpLinuxAllWrapper(void *pvInfo)
240{
241 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
242 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
243 RTCPUID idCpu = RTMpCpuId();
244 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
245
246 if (RTCpuSetIsMember(pWorkerSet, idCpu))
247 {
248 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
249 RTCpuSetDel(pWorkerSet, idCpu);
250 }
251}
252
253
254RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
255{
256 IPRT_LINUX_SAVE_EFL_AC();
257 int rc;
258 RTMPARGS Args;
259 RTCPUSET OnlineSet;
260 RTCPUID idCpu;
261 uint32_t cLoops;
262
263 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
264
265 Args.pfnWorker = pfnWorker;
266 Args.pvUser1 = pvUser1;
267 Args.pvUser2 = pvUser2;
268 Args.idCpu = NIL_RTCPUID;
269 Args.cHits = 0;
270
271 RTThreadPreemptDisable(&PreemptState);
272 RTMpGetOnlineSet(&OnlineSet);
273 Args.pWorkerSet = &OnlineSet;
274 idCpu = RTMpCpuId();
275
276 if (RTCpuSetCount(&OnlineSet) > 1)
277 {
278 /* Fire the function on all other CPUs without waiting for completion. */
279#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
280 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
281#else
282 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
283#endif
284 Assert(!rc); NOREF(rc);
285 }
286
287 /* Fire the function on this CPU. */
288 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
289 RTCpuSetDel(Args.pWorkerSet, idCpu);
290
291 /* Wait for all of them finish. */
292 cLoops = 64000;
293 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
294 {
295 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
296 if (!cLoops--)
297 {
298 RTCPUSET OnlineSetNow;
299 RTMpGetOnlineSet(&OnlineSetNow);
300 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
301
302 cLoops = 64000;
303 }
304
305 ASMNopPause();
306 }
307
308 RTThreadPreemptRestore(&PreemptState);
309 IPRT_LINUX_RESTORE_EFL_AC();
310 return VINF_SUCCESS;
311}
312RT_EXPORT_SYMBOL(RTMpOnAll);
313
314
315RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
316{
317 IPRT_LINUX_SAVE_EFL_AC();
318 int rc;
319 RTMPARGS Args;
320
321 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
322 Args.pfnWorker = pfnWorker;
323 Args.pvUser1 = pvUser1;
324 Args.pvUser2 = pvUser2;
325 Args.idCpu = NIL_RTCPUID;
326 Args.cHits = 0;
327
328 RTThreadPreemptDisable(&PreemptState);
329#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
330 rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
331#else /* older kernels */
332 rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
333#endif /* older kernels */
334 RTThreadPreemptRestore(&PreemptState);
335
336 Assert(rc == 0); NOREF(rc);
337 IPRT_LINUX_RESTORE_EFL_AC();
338 return VINF_SUCCESS;
339}
340RT_EXPORT_SYMBOL(RTMpOnOthers);
341
342
343#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
344/**
345 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
346 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
347 *
348 * @param pvInfo Pointer to the RTMPARGS package.
349 */
350static void rtMpLinuxOnPairWrapper(void *pvInfo)
351{
352 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
353 RTCPUID idCpu = RTMpCpuId();
354
355 if ( idCpu == pArgs->idCpu
356 || idCpu == pArgs->idCpu2)
357 {
358 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
359 ASMAtomicIncU32(&pArgs->cHits);
360 }
361}
362#endif
363
364
365RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
366{
367 IPRT_LINUX_SAVE_EFL_AC();
368 int rc;
369 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
370
371 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
372 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
373
374 /*
375 * Check that both CPUs are online before doing the broadcast call.
376 */
377 RTThreadPreemptDisable(&PreemptState);
378 if ( RTMpIsCpuOnline(idCpu1)
379 && RTMpIsCpuOnline(idCpu2))
380 {
381 /*
382 * Use the smp_call_function variant taking a cpu mask where available,
383 * falling back on broadcast with filter. Slight snag if one of the
384 * CPUs is the one we're running on, we must do the call and the post
385 * call wait ourselves.
386 */
387#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
388 cpumask_t DstCpuMask;
389#endif
390 RTCPUID idCpuSelf = RTMpCpuId();
391 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
392 RTMPARGS Args;
393 Args.pfnWorker = pfnWorker;
394 Args.pvUser1 = pvUser1;
395 Args.pvUser2 = pvUser2;
396 Args.idCpu = idCpu1;
397 Args.idCpu2 = idCpu2;
398 Args.cHits = 0;
399
400#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
401 cpumask_clear(&DstCpuMask);
402 cpumask_set_cpu(idCpu1, &DstCpuMask);
403 cpumask_set_cpu(idCpu2, &DstCpuMask);
404#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
405 cpus_clear(DstCpuMask);
406 cpu_set(idCpu1, DstCpuMask);
407 cpu_set(idCpu2, DstCpuMask);
408#endif
409
b94f26ec 410#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
39adb5c3
TG
411 smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
412 rc = 0;
39adb5c3
TG
413#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
414 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
415#else /* older kernels */
416 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
417#endif /* older kernels */
418 Assert(rc == 0);
419
420 /* Call ourselves if necessary and wait for the other party to be done. */
421 if (fCallSelf)
422 {
423 uint32_t cLoops = 0;
424 rtmpLinuxWrapper(&Args);
425 while (ASMAtomicReadU32(&Args.cHits) < 2)
426 {
427 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
428 break;
429 cLoops++;
430 ASMNopPause();
431 }
432 }
433
434 Assert(Args.cHits <= 2);
435 if (Args.cHits == 2)
436 rc = VINF_SUCCESS;
437 else if (Args.cHits == 1)
438 rc = VERR_NOT_ALL_CPUS_SHOWED;
439 else if (Args.cHits == 0)
440 rc = VERR_CPU_OFFLINE;
441 else
442 rc = VERR_CPU_IPE_1;
443 }
444 /*
445 * A CPU must be present to be considered just offline.
446 */
447 else if ( RTMpIsCpuPresent(idCpu1)
448 && RTMpIsCpuPresent(idCpu2))
449 rc = VERR_CPU_OFFLINE;
450 else
451 rc = VERR_CPU_NOT_FOUND;
452 RTThreadPreemptRestore(&PreemptState);;
453 IPRT_LINUX_RESTORE_EFL_AC();
454 return rc;
455}
456RT_EXPORT_SYMBOL(RTMpOnPair);
457
458
459RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
460{
461 return true;
462}
463RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
464
465
466#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
467/**
468 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
469 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
470 *
471 * @param pvInfo Pointer to the RTMPARGS package.
472 */
473static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
474{
475 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
476 RTCPUID idCpu = RTMpCpuId();
477
478 if (idCpu == pArgs->idCpu)
479 {
480 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
481 ASMAtomicIncU32(&pArgs->cHits);
482 }
483}
484#endif
485
486
487RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
488{
489 IPRT_LINUX_SAVE_EFL_AC();
490 int rc;
491 RTMPARGS Args;
492
493 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
494 Args.pfnWorker = pfnWorker;
495 Args.pvUser1 = pvUser1;
496 Args.pvUser2 = pvUser2;
497 Args.idCpu = idCpu;
498 Args.cHits = 0;
499
500 if (!RTMpIsCpuPossible(idCpu))
501 return VERR_CPU_NOT_FOUND;
502
503 RTThreadPreemptDisable(&PreemptState);
504 if (idCpu != RTMpCpuId())
505 {
506 if (RTMpIsCpuOnline(idCpu))
507 {
508#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
509 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
510#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
511 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
512#else /* older kernels */
513 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
514#endif /* older kernels */
515 Assert(rc == 0);
516 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
517 }
518 else
519 rc = VERR_CPU_OFFLINE;
520 }
521 else
522 {
523 rtmpLinuxWrapper(&Args);
524 rc = VINF_SUCCESS;
525 }
526 RTThreadPreemptRestore(&PreemptState);;
527
528 NOREF(rc);
529 IPRT_LINUX_RESTORE_EFL_AC();
530 return rc;
531}
532RT_EXPORT_SYMBOL(RTMpOnSpecific);
533
534
535#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
536/**
537 * Dummy callback used by RTMpPokeCpu.
538 *
539 * @param pvInfo Ignored.
540 */
541static void rtmpLinuxPokeCpuCallback(void *pvInfo)
542{
543 NOREF(pvInfo);
544}
545#endif
546
547
548RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
549{
550#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
551 int rc;
552 IPRT_LINUX_SAVE_EFL_AC();
553
554 if (!RTMpIsCpuPossible(idCpu))
555 return VERR_CPU_NOT_FOUND;
556 if (!RTMpIsCpuOnline(idCpu))
557 return VERR_CPU_OFFLINE;
558
559# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
560 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
561# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
562 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
563# else /* older kernels */
564# error oops
565# endif /* older kernels */
566 NOREF(rc);
567 Assert(rc == 0);
568 IPRT_LINUX_RESTORE_EFL_AC();
569 return VINF_SUCCESS;
570
571#else /* older kernels */
572 /* no unicast here? */
573 return VERR_NOT_SUPPORTED;
574#endif /* older kernels */
575}
576RT_EXPORT_SYMBOL(RTMpPokeCpu);
577
578
579RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
580{
581 return true;
582}
583RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
584