]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ubuntu/vbox/r0drv/mpnotification-r0drv.c
UBUNTU: ubuntu: vbox -- update to 5.1.6-dfsg-1
[mirror_ubuntu-zesty-kernel.git] / ubuntu / vbox / r0drv / mpnotification-r0drv.c
1 /* $Id: mpnotification-r0drv.c $ */
2 /** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Event Notifications.
4 */
5
6 /*
7 * Copyright (C) 2008-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28 /*********************************************************************************************************************************
29 * Header Files *
30 *********************************************************************************************************************************/
31 #include <iprt/mp.h>
32 #include "internal/iprt.h"
33
34 #include <iprt/asm.h>
35 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
36 # include <iprt/asm-amd64-x86.h>
37 #endif
38 #include <iprt/assert.h>
39 #include <iprt/err.h>
40 #include <iprt/mem.h>
41 #include <iprt/spinlock.h>
42 #include <iprt/string.h>
43 #include <iprt/thread.h>
44 #include "r0drv/mp-r0drv.h"
45
46
47 /*********************************************************************************************************************************
48 * Structures and Typedefs *
49 *********************************************************************************************************************************/
50 /**
51 * Notification registration record tracking
52 * RTMpRegisterNotification() calls.
53 */
54 typedef struct RTMPNOTIFYREG
55 {
56 /** Pointer to the next record. */
57 struct RTMPNOTIFYREG * volatile pNext;
58 /** The callback. */
59 PFNRTMPNOTIFICATION pfnCallback;
60 /** The user argument. */
61 void *pvUser;
62 /** Bit mask indicating whether we've done this callback or not. */
63 uint8_t bmDone[sizeof(void *)];
64 } RTMPNOTIFYREG;
65 /** Pointer to a registration record. */
66 typedef RTMPNOTIFYREG *PRTMPNOTIFYREG;
67
68
69 /*********************************************************************************************************************************
70 * Global Variables *
71 *********************************************************************************************************************************/
72 /** The spinlock protecting the list. */
73 static RTSPINLOCK volatile g_hRTMpNotifySpinLock = NIL_RTSPINLOCK;
74 /** List of callbacks, in registration order. */
75 static PRTMPNOTIFYREG volatile g_pRTMpCallbackHead = NULL;
76 /** The current done bit. */
77 static uint32_t volatile g_iRTMpDoneBit;
78 /** The list generation.
79 * This is increased whenever the list has been modified. The callback routine
80 * make use of this to avoid having restart at the list head after each callback. */
81 static uint32_t volatile g_iRTMpGeneration;
82
83
84
85
86 /**
87 * This is called by the native code.
88 *
89 * @param idCpu The CPU id the event applies to.
90 * @param enmEvent The event.
91 */
92 DECLHIDDEN(void) rtMpNotificationDoCallbacks(RTMPEVENT enmEvent, RTCPUID idCpu)
93 {
94 PRTMPNOTIFYREG pCur;
95 RTSPINLOCK hSpinlock;
96
97 /*
98 * This is a little bit tricky as we cannot be holding the spinlock
99 * while calling the callback. This means that the list might change
100 * while we're walking it, and that multiple events might be running
101 * concurrently (depending on the OS).
102 *
103 * So, the first measure is to employ a 32-bitmask for each
104 * record where we'll use a bit that rotates for each call to
105 * this function to indicate which records that has been
106 * processed. This will take care of both changes to the list
107 * and a reasonable amount of concurrent events.
108 *
109 * In order to avoid having to restart the list walks for every
110 * callback we make, we'll make use a list generation number that is
111 * incremented everytime the list is changed. So, if it remains
112 * unchanged over a callback we can safely continue the iteration.
113 */
114 uint32_t iDone = ASMAtomicIncU32(&g_iRTMpDoneBit);
115 iDone %= RT_SIZEOFMEMB(RTMPNOTIFYREG, bmDone) * 8;
116
117 hSpinlock = g_hRTMpNotifySpinLock;
118 if (hSpinlock == NIL_RTSPINLOCK)
119 return;
120 RTSpinlockAcquire(hSpinlock);
121
122 /* Clear the bit. */
123 for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
124 ASMAtomicBitClear(&pCur->bmDone[0], iDone);
125
126 /* Iterate the records and perform the callbacks. */
127 do
128 {
129 uint32_t const iGeneration = ASMAtomicUoReadU32(&g_iRTMpGeneration);
130
131 pCur = g_pRTMpCallbackHead;
132 while (pCur)
133 {
134 if (!ASMAtomicBitTestAndSet(&pCur->bmDone[0], iDone))
135 {
136 PFNRTMPNOTIFICATION pfnCallback = pCur->pfnCallback;
137 void *pvUser = pCur->pvUser;
138 pCur = pCur->pNext;
139 RTSpinlockRelease(g_hRTMpNotifySpinLock);
140
141 pfnCallback(enmEvent, idCpu, pvUser);
142
143 /* carefully require the lock here, see RTR0MpNotificationTerm(). */
144 hSpinlock = g_hRTMpNotifySpinLock;
145 if (hSpinlock == NIL_RTSPINLOCK)
146 return;
147 RTSpinlockAcquire(hSpinlock);
148 if (ASMAtomicUoReadU32(&g_iRTMpGeneration) != iGeneration)
149 break;
150 }
151 else
152 pCur = pCur->pNext;
153 }
154 } while (pCur);
155
156 RTSpinlockRelease(hSpinlock);
157 }
158
159
160
161 RTDECL(int) RTMpNotificationRegister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
162 {
163 PRTMPNOTIFYREG pCur;
164 PRTMPNOTIFYREG pNew;
165
166 /*
167 * Validation.
168 */
169 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
170 AssertReturn(g_hRTMpNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
171 RT_ASSERT_PREEMPTIBLE();
172
173 RTSpinlockAcquire(g_hRTMpNotifySpinLock);
174 for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
175 if ( pCur->pvUser == pvUser
176 && pCur->pfnCallback == pfnCallback)
177 break;
178 RTSpinlockRelease(g_hRTMpNotifySpinLock);
179 AssertMsgReturn(!pCur, ("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
180
181 /*
182 * Allocate a new record and attempt to insert it.
183 */
184 pNew = (PRTMPNOTIFYREG)RTMemAlloc(sizeof(*pNew));
185 if (!pNew)
186 return VERR_NO_MEMORY;
187
188 pNew->pNext = NULL;
189 pNew->pfnCallback = pfnCallback;
190 pNew->pvUser = pvUser;
191 memset(&pNew->bmDone[0], 0xff, sizeof(pNew->bmDone));
192
193 RTSpinlockAcquire(g_hRTMpNotifySpinLock);
194
195 pCur = g_pRTMpCallbackHead;
196 if (!pCur)
197 g_pRTMpCallbackHead = pNew;
198 else
199 {
200 for (pCur = g_pRTMpCallbackHead; ; pCur = pCur->pNext)
201 if ( pCur->pvUser == pvUser
202 && pCur->pfnCallback == pfnCallback)
203 break;
204 else if (!pCur->pNext)
205 {
206 pCur->pNext = pNew;
207 pCur = NULL;
208 break;
209 }
210 }
211
212 ASMAtomicIncU32(&g_iRTMpGeneration);
213
214 RTSpinlockRelease(g_hRTMpNotifySpinLock);
215
216 /* duplicate? */
217 if (pCur)
218 {
219 RTMemFree(pCur);
220 AssertMsgFailedReturn(("pCur=%p pfnCallback=%p pvUser=%p\n", pCur, pfnCallback, pvUser), VERR_ALREADY_EXISTS);
221 }
222
223 return VINF_SUCCESS;
224 }
225 RT_EXPORT_SYMBOL(RTMpNotificationRegister);
226
227
228 RTDECL(int) RTMpNotificationDeregister(PFNRTMPNOTIFICATION pfnCallback, void *pvUser)
229 {
230 PRTMPNOTIFYREG pPrev;
231 PRTMPNOTIFYREG pCur;
232
233 /*
234 * Validation.
235 */
236 AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
237 AssertReturn(g_hRTMpNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER);
238 RT_ASSERT_INTS_ON();
239
240 /*
241 * Find and unlink the record from the list.
242 */
243 RTSpinlockAcquire(g_hRTMpNotifySpinLock);
244 pPrev = NULL;
245 for (pCur = g_pRTMpCallbackHead; pCur; pCur = pCur->pNext)
246 {
247 if ( pCur->pvUser == pvUser
248 && pCur->pfnCallback == pfnCallback)
249 break;
250 pPrev = pCur;
251 }
252 if (pCur)
253 {
254 if (pPrev)
255 pPrev->pNext = pCur->pNext;
256 else
257 g_pRTMpCallbackHead = pCur->pNext;
258 ASMAtomicIncU32(&g_iRTMpGeneration);
259 }
260 RTSpinlockRelease(g_hRTMpNotifySpinLock);
261
262 if (!pCur)
263 return VERR_NOT_FOUND;
264
265 /*
266 * Invalidate and free the record.
267 */
268 pCur->pNext = NULL;
269 pCur->pfnCallback = NULL;
270 RTMemFree(pCur);
271
272 return VINF_SUCCESS;
273 }
274 RT_EXPORT_SYMBOL(RTMpNotificationDeregister);
275
276
277 DECLHIDDEN(int) rtR0MpNotificationInit(void)
278 {
279 int rc = RTSpinlockCreate((PRTSPINLOCK)&g_hRTMpNotifySpinLock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0Mp");
280 if (RT_SUCCESS(rc))
281 {
282 rc = rtR0MpNotificationNativeInit();
283 if (RT_SUCCESS(rc))
284 return rc;
285
286 RTSpinlockDestroy(g_hRTMpNotifySpinLock);
287 g_hRTMpNotifySpinLock = NIL_RTSPINLOCK;
288 }
289 return rc;
290 }
291
292
293 DECLHIDDEN(void) rtR0MpNotificationTerm(void)
294 {
295 PRTMPNOTIFYREG pHead;
296 RTSPINLOCK hSpinlock = g_hRTMpNotifySpinLock;
297 AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK);
298
299 rtR0MpNotificationNativeTerm();
300
301 /* pick up the list and the spinlock. */
302 RTSpinlockAcquire(hSpinlock);
303 ASMAtomicWriteHandle(&g_hRTMpNotifySpinLock, NIL_RTSPINLOCK);
304 pHead = g_pRTMpCallbackHead;
305 g_pRTMpCallbackHead = NULL;
306 ASMAtomicIncU32(&g_iRTMpGeneration);
307 RTSpinlockRelease(hSpinlock);
308
309 /* free the list. */
310 while (pHead)
311 {
312 PRTMPNOTIFYREG pFree = pHead;
313 pHead = pHead->pNext;
314
315 pFree->pNext = NULL;
316 pFree->pfnCallback = NULL;
317 RTMemFree(pFree);
318 }
319
320 RTSpinlockDestroy(hSpinlock);
321 }
322