]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - ubuntu/vbox/vboxguest/r0drv/linux/mp-r0drv-linux.c
UBUNTU: ubuntu: vbox -- update to 5.2.0-dfsg-2
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxguest / r0drv / linux / mp-r0drv-linux.c
CommitLineData
056a1eb7
SF
1/* $Id: mp-r0drv-linux.c $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
6d209b23 7 * Copyright (C) 2008-2017 Oracle Corporation
056a1eb7
SF
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41#ifdef nr_cpumask_bits
42# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
43#else
44# define VBOX_NR_CPUMASK_BITS NR_CPUS
45#endif
46
47RTDECL(RTCPUID) RTMpCpuId(void)
48{
49 return smp_processor_id();
50}
51RT_EXPORT_SYMBOL(RTMpCpuId);
52
53
54RTDECL(int) RTMpCurSetIndex(void)
55{
56 return smp_processor_id();
57}
58RT_EXPORT_SYMBOL(RTMpCurSetIndex);
59
60
61RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
62{
63 return *pidCpu = smp_processor_id();
64}
65RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
66
67
68RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
69{
70 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
71}
72RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
73
74
75RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
76{
77 return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
78}
79RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
80
81
82RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
83{
84 return VBOX_NR_CPUMASK_BITS - 1; //???
85}
86RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
87
88
89RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
90{
91#if defined(CONFIG_SMP)
92 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
93 return false;
94
95# if defined(cpu_possible)
96 return cpu_possible(idCpu);
97# else /* < 2.5.29 */
98 return idCpu < (RTCPUID)smp_num_cpus;
99# endif
100#else
101 return idCpu == RTMpCpuId();
102#endif
103}
104RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
105
106
107RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
108{
109 RTCPUID idCpu;
110
111 RTCpuSetEmpty(pSet);
112 idCpu = RTMpGetMaxCpuId();
113 do
114 {
115 if (RTMpIsCpuPossible(idCpu))
116 RTCpuSetAdd(pSet, idCpu);
117 } while (idCpu-- > 0);
118 return pSet;
119}
120RT_EXPORT_SYMBOL(RTMpGetSet);
121
122
123RTDECL(RTCPUID) RTMpGetCount(void)
124{
125#ifdef CONFIG_SMP
126# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
127 return num_present_cpus();
128# elif defined(num_possible_cpus)
129 return num_possible_cpus();
130# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
131 return smp_num_cpus;
132# else
133 RTCPUSET Set;
134 RTMpGetSet(&Set);
135 return RTCpuSetCount(&Set);
136# endif
137#else
138 return 1;
139#endif
140}
141RT_EXPORT_SYMBOL(RTMpGetCount);
142
143
144RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
145{
146#ifdef CONFIG_SMP
147 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
148 return false;
149# ifdef cpu_online
150 return cpu_online(idCpu);
151# else /* 2.4: */
152 return cpu_online_map & RT_BIT_64(idCpu);
153# endif
154#else
155 return idCpu == RTMpCpuId();
156#endif
157}
158RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
159
160
161RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
162{
163#ifdef CONFIG_SMP
164 RTCPUID idCpu;
165
166 RTCpuSetEmpty(pSet);
167 idCpu = RTMpGetMaxCpuId();
168 do
169 {
170 if (RTMpIsCpuOnline(idCpu))
171 RTCpuSetAdd(pSet, idCpu);
172 } while (idCpu-- > 0);
173#else
174 RTCpuSetEmpty(pSet);
175 RTCpuSetAdd(pSet, RTMpCpuId());
176#endif
177 return pSet;
178}
179RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
180
181
182RTDECL(RTCPUID) RTMpGetOnlineCount(void)
183{
184#ifdef CONFIG_SMP
185# if defined(num_online_cpus)
186 return num_online_cpus();
187# else
188 RTCPUSET Set;
189 RTMpGetOnlineSet(&Set);
190 return RTCpuSetCount(&Set);
191# endif
192#else
193 return 1;
194#endif
195}
196RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
197
198
199RTDECL(bool) RTMpIsCpuWorkPending(void)
200{
201 /** @todo (not used on non-Windows platforms yet). */
202 return false;
203}
204RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
205
206
207/**
208 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
209 *
210 * @param pvInfo Pointer to the RTMPARGS package.
211 */
212static void rtmpLinuxWrapper(void *pvInfo)
213{
214 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
215 ASMAtomicIncU32(&pArgs->cHits);
216 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
217}
218
219
220/**
221 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
222 * increment after calling the worker.
223 *
224 * @param pvInfo Pointer to the RTMPARGS package.
225 */
226static void rtmpLinuxWrapperPostInc(void *pvInfo)
227{
228 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
229 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
230 ASMAtomicIncU32(&pArgs->cHits);
231}
232
233
234/**
235 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
236 *
237 * @param pvInfo Pointer to the RTMPARGS package.
238 */
239static void rtmpLinuxAllWrapper(void *pvInfo)
240{
241 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
242 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
243 RTCPUID idCpu = RTMpCpuId();
244 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
245
246 if (RTCpuSetIsMember(pWorkerSet, idCpu))
247 {
248 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
249 RTCpuSetDel(pWorkerSet, idCpu);
250 }
251}
252
253
254RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
255{
256 IPRT_LINUX_SAVE_EFL_AC();
257 int rc;
258 RTMPARGS Args;
259 RTCPUSET OnlineSet;
260 RTCPUID idCpu;
261 uint32_t cLoops;
262
263 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
264
265 Args.pfnWorker = pfnWorker;
266 Args.pvUser1 = pvUser1;
267 Args.pvUser2 = pvUser2;
268 Args.idCpu = NIL_RTCPUID;
269 Args.cHits = 0;
270
271 RTThreadPreemptDisable(&PreemptState);
272 RTMpGetOnlineSet(&OnlineSet);
273 Args.pWorkerSet = &OnlineSet;
274 idCpu = RTMpCpuId();
275
276 if (RTCpuSetCount(&OnlineSet) > 1)
277 {
278 /* Fire the function on all other CPUs without waiting for completion. */
279#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
280 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
281#else
282 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
283#endif
284 Assert(!rc); NOREF(rc);
285 }
286
287 /* Fire the function on this CPU. */
288 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
289 RTCpuSetDel(Args.pWorkerSet, idCpu);
290
291 /* Wait for all of them finish. */
292 cLoops = 64000;
293 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
294 {
295 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
296 if (!cLoops--)
297 {
298 RTCPUSET OnlineSetNow;
299 RTMpGetOnlineSet(&OnlineSetNow);
300 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
301
302 cLoops = 64000;
303 }
304
305 ASMNopPause();
306 }
307
308 RTThreadPreemptRestore(&PreemptState);
309 IPRT_LINUX_RESTORE_EFL_AC();
310 return VINF_SUCCESS;
311}
312RT_EXPORT_SYMBOL(RTMpOnAll);
313
314
315RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
316{
317 IPRT_LINUX_SAVE_EFL_AC();
318 int rc;
319 RTMPARGS Args;
320
321 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
322 Args.pfnWorker = pfnWorker;
323 Args.pvUser1 = pvUser1;
324 Args.pvUser2 = pvUser2;
325 Args.idCpu = NIL_RTCPUID;
326 Args.cHits = 0;
327
328 RTThreadPreemptDisable(&PreemptState);
329#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
330 rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
331#else /* older kernels */
332 rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
333#endif /* older kernels */
334 RTThreadPreemptRestore(&PreemptState);
335
336 Assert(rc == 0); NOREF(rc);
337 IPRT_LINUX_RESTORE_EFL_AC();
338 return VINF_SUCCESS;
339}
340RT_EXPORT_SYMBOL(RTMpOnOthers);
341
342
343#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
344/**
345 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
346 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
347 *
348 * @param pvInfo Pointer to the RTMPARGS package.
349 */
350static void rtMpLinuxOnPairWrapper(void *pvInfo)
351{
352 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
353 RTCPUID idCpu = RTMpCpuId();
354
355 if ( idCpu == pArgs->idCpu
356 || idCpu == pArgs->idCpu2)
357 {
358 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
359 ASMAtomicIncU32(&pArgs->cHits);
360 }
361}
362#endif
363
364
365RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
366{
367 IPRT_LINUX_SAVE_EFL_AC();
368 int rc;
369 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
370
371 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
372 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
373
374 /*
375 * Check that both CPUs are online before doing the broadcast call.
376 */
377 RTThreadPreemptDisable(&PreemptState);
378 if ( RTMpIsCpuOnline(idCpu1)
379 && RTMpIsCpuOnline(idCpu2))
380 {
381 /*
382 * Use the smp_call_function variant taking a cpu mask where available,
383 * falling back on broadcast with filter. Slight snag if one of the
384 * CPUs is the one we're running on, we must do the call and the post
385 * call wait ourselves.
386 */
387#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
388 /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
389 cpumask_var_t DstCpuMask;
390#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
391 cpumask_t DstCpuMask;
392#endif
393 RTCPUID idCpuSelf = RTMpCpuId();
394 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
395 RTMPARGS Args;
396 Args.pfnWorker = pfnWorker;
397 Args.pvUser1 = pvUser1;
398 Args.pvUser2 = pvUser2;
399 Args.idCpu = idCpu1;
400 Args.idCpu2 = idCpu2;
401 Args.cHits = 0;
402
403#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
404 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
405 return VERR_NO_MEMORY;
406 cpumask_set_cpu(idCpu1, DstCpuMask);
407 cpumask_set_cpu(idCpu2, DstCpuMask);
408#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
409 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
410 return VERR_NO_MEMORY;
411 cpumask_clear(DstCpuMask);
412 cpumask_set_cpu(idCpu1, DstCpuMask);
413 cpumask_set_cpu(idCpu2, DstCpuMask);
414#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
415 cpus_clear(DstCpuMask);
416 cpu_set(idCpu1, DstCpuMask);
417 cpu_set(idCpu2, DstCpuMask);
418#endif
419
420#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
421 smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
422 rc = 0;
423#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
424 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
425#else /* older kernels */
426 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
427#endif /* older kernels */
428 Assert(rc == 0);
429
430 /* Call ourselves if necessary and wait for the other party to be done. */
431 if (fCallSelf)
432 {
433 uint32_t cLoops = 0;
434 rtmpLinuxWrapper(&Args);
435 while (ASMAtomicReadU32(&Args.cHits) < 2)
436 {
437 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
438 break;
439 cLoops++;
440 ASMNopPause();
441 }
442 }
443
444 Assert(Args.cHits <= 2);
445 if (Args.cHits == 2)
446 rc = VINF_SUCCESS;
447 else if (Args.cHits == 1)
448 rc = VERR_NOT_ALL_CPUS_SHOWED;
449 else if (Args.cHits == 0)
450 rc = VERR_CPU_OFFLINE;
451 else
452 rc = VERR_CPU_IPE_1;
453
454#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
455 free_cpumask_var(DstCpuMask);
456#endif
457 }
458 /*
459 * A CPU must be present to be considered just offline.
460 */
461 else if ( RTMpIsCpuPresent(idCpu1)
462 && RTMpIsCpuPresent(idCpu2))
463 rc = VERR_CPU_OFFLINE;
464 else
465 rc = VERR_CPU_NOT_FOUND;
466 RTThreadPreemptRestore(&PreemptState);;
467 IPRT_LINUX_RESTORE_EFL_AC();
468 return rc;
469}
470RT_EXPORT_SYMBOL(RTMpOnPair);
471
472
473RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
474{
475 return true;
476}
477RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
478
479
480#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
481/**
482 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
483 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
484 *
485 * @param pvInfo Pointer to the RTMPARGS package.
486 */
487static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
488{
489 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
490 RTCPUID idCpu = RTMpCpuId();
491
492 if (idCpu == pArgs->idCpu)
493 {
494 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
495 ASMAtomicIncU32(&pArgs->cHits);
496 }
497}
498#endif
499
500
501RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
502{
503 IPRT_LINUX_SAVE_EFL_AC();
504 int rc;
505 RTMPARGS Args;
506
507 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
508 Args.pfnWorker = pfnWorker;
509 Args.pvUser1 = pvUser1;
510 Args.pvUser2 = pvUser2;
511 Args.idCpu = idCpu;
512 Args.cHits = 0;
513
514 if (!RTMpIsCpuPossible(idCpu))
515 return VERR_CPU_NOT_FOUND;
516
517 RTThreadPreemptDisable(&PreemptState);
518 if (idCpu != RTMpCpuId())
519 {
520 if (RTMpIsCpuOnline(idCpu))
521 {
522#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
523 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
524#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
525 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
526#else /* older kernels */
527 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
528#endif /* older kernels */
529 Assert(rc == 0);
530 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
531 }
532 else
533 rc = VERR_CPU_OFFLINE;
534 }
535 else
536 {
537 rtmpLinuxWrapper(&Args);
538 rc = VINF_SUCCESS;
539 }
540 RTThreadPreemptRestore(&PreemptState);;
541
542 NOREF(rc);
543 IPRT_LINUX_RESTORE_EFL_AC();
544 return rc;
545}
546RT_EXPORT_SYMBOL(RTMpOnSpecific);
547
548
549#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
550/**
551 * Dummy callback used by RTMpPokeCpu.
552 *
553 * @param pvInfo Ignored.
554 */
555static void rtmpLinuxPokeCpuCallback(void *pvInfo)
556{
557 NOREF(pvInfo);
558}
559#endif
560
561
562RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
563{
564#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
565 int rc;
566 IPRT_LINUX_SAVE_EFL_AC();
567
568 if (!RTMpIsCpuPossible(idCpu))
569 return VERR_CPU_NOT_FOUND;
570 if (!RTMpIsCpuOnline(idCpu))
571 return VERR_CPU_OFFLINE;
572
573# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
574 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
575# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
576 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
577# else /* older kernels */
578# error oops
579# endif /* older kernels */
580 NOREF(rc);
581 Assert(rc == 0);
582 IPRT_LINUX_RESTORE_EFL_AC();
583 return VINF_SUCCESS;
584
585#else /* older kernels */
586 /* no unicast here? */
587 return VERR_NOT_SUPPORTED;
588#endif /* older kernels */
589}
590RT_EXPORT_SYMBOL(RTMpPokeCpu);
591
592
593RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
594{
595 return true;
596}
597RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
598